diff --git "a/2223.jsonl" "b/2223.jsonl"
new file mode 100644--- /dev/null
+++ "b/2223.jsonl"
@@ -0,0 +1,418 @@
+{"seq_id":"74753593155","text":"#!/usr/bin/env python3 Line 1\n# -*- coding: utf-8 -*- Line 2\n#----------------------------------------------------------------------------\n# Created By : davoc\n# Created: Clase 3 ciclo 1 mision tic 2022. Parte 5 , Tarea para el 4 Mayo 2022\n# version ='2.0'\n\n#Se trata de escribir el algoritmo que permita calcular el valor a pagar para una compra de un articulo determinado \n#del que se adquieren una o varias unidades. El iva a aplicar es del 19% y si el precio bruto(precio de venta mas IVA)\n#es mayor de 500.000 COP, se aplicará un descuento del 6.5% sobre el total.\n#Se debe pedir al usuario que ingrese el valor del articulo y la cantidad.\n\nnumero_producto = int(input(\"Cuantos productos compró?: \")) #solo admite enteros de la cantidad total de articulos. #!!restringir a enteros positivos\nprecio_articulo = float(input(\"Cuanto costó el producto?: \")) #funciona con float usando . pero no con comas\n#Como puedo hacer para que reconozca el formato del usuario, ya que en unos paises\n#el punto es usado como separador decimal, mientras que la coma como separador de miles, y viceversa?\n\ntotal_sin_iva = precio_articulo * numero_producto\nprecio_bruto = precio_articulo * numero_producto * 0.19 + precio_articulo * numero_producto\ndiferencia_promo = 500000 - total_sin_iva\n\n#el precio bruto antes del + solo calcularia el iva de esos producto, entonces le sumo de nuevo las var\n#podria almacenar el total del iva en una var nueva, y en la sumarla a precio sin iva\nif precio_bruto >= 500000:\n precio_promo = precio_articulo * numero_producto * 0.065 + precio_articulo * numero_producto\n ahorro_compra = precio_bruto - precio_promo\n print(f\"El total a pagar con la promo es de: {precio_promo} , y su ahorro en esta compra es de {ahorro_compra}\")\nelse:\n print(f\"Lo sentimos, te faltó : {diferencia_promo} para aplicar la promo\")\n #print(f\"Lo sentimos, te faltó {total_sin_iva}-{tope_promo} para aplicar la promo}\") Se pueden hacer operaciones de resta en un print? \n\nprint(f\"El precio total sin iva es de : {total_sin_iva}\")\nprint(f\"El total a pagar con iva del 19% sin la promo es de : {precio_bruto}\")\n","repo_name":"labsigco/Mision51_2022","sub_path":"Ciclo1/Unidad1/Scripts/Clase3_5EjercicioMañana.py","file_name":"Clase3_5EjercicioMañana.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14226018233","text":"#entering the file name to open the file\n#file name =searching from file.txt\n\n#we are going to count word from file and put them into dictionary so\n#that i count the occurence of word in file\nfname = input(\"Enter file name: \")\n#opening the file usinf open functin\nfh = open(fname)\n\n# empty dictionary\nd={}\nfor i in fh:\n #split word on the basis of spaces\n wordsList=i.split()\n #counting the element of list and addedto dictionary \n for j in wordsList:\n d[j]=d.get(j,0)+1\nprint(d)\n\n","repo_name":"harshittaneja090/mywork.github.io","sub_path":"python/file handling in python examples/beggining codes of file handling/code 13 couting word from file.py","file_name":"code 13 couting word from file.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21260489282","text":"import os\nimport numpy as np\n\nnumimages = 869\nsizeimages = 265 * 91\ni = 0\nmatrix = np.zeros([numimages, sizeimages * 8])\n\nwith os.scandir('C:/Users/Ext1306/Desktop/00') as entries:\n for entry in entries:\n img = np.load(entry)\n matrix[i,:] = img.flatten()\n i += 1\nprint(np.shape(matrix))\nmatrix = matrix.T\nnp.save('pca_matrix', matrix)","repo_name":"Eortvald/Foss-autoencoder","sub_path":"preprocess/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"16524026051","text":"import sys\nfrom cpyutils.parameters import CmdLineParser, Flag, Parameter, Argument, Operation\nimport cpyutils.eventloop as eventloop\nimport cpyutils.db as db\nimport cpyutils.log\nimport cpyutils.xmlrpcutils as xmlrpcutils\nimport logging\nimport version\n\n_LOGGER = cpyutils.log.Log(\"IPFLOATER\")\n\ndef main_function():\n logging.basicConfig(filename=None,level=logging.DEBUG)\n eventloop.create_eventloop(True)\n\n class IPFloaterCmdLine(CmdLineParser):\n def preops(self, result, error):\n SERVER=result.values['--server-ip'][0]\n PORT=result.values['--server-port'][0]\n self._XMLRPC_SERVER = xmlrpcutils.ServerProxy(\"http://%s:%d\" % (SERVER, PORT))\n\n def ippool(self, parse_result, error):\n try:\n _, ips = self._XMLRPC_SERVER.get_public_ips()\n return True, \"IP Pool:\\n%s\\n%s\" % (\"-\"*40, \", \".join(ips))\n except:\n return False, \"Could not contact the server\"\n\n def getip(self, parse_result, error):\n result, ep = self._XMLRPC_SERVER.create_public_redirection(\"\", -1, parse_result.values['private ip'][0], 0)\n if result:\n return True, \"Public IP obtained: %s\" % str(ep)\n else:\n return False, \"Could not obtain a redirection (server responded: %s)\" % ep\n\n def redirect(self, parse_result, error):\n result, ep = self._XMLRPC_SERVER.create_public_redirection(parse_result.values['public ip'][0], 0, parse_result.values['private ip'][0], 0)\n if result:\n return True, \"Public IP obtained: %s\" % str(ep)\n else:\n return False, \"Could not obtain a redirection (server responded: %s)\" % ep\n \n def releaseip(self, parse_result, error):\n ip = parse_result.values['public ip'][0]\n result, ep = self._XMLRPC_SERVER.clean_public_ip(ip)\n if result:\n return True, \"Released the redirection from IP %s\" % (ip)\n else:\n return False, \"Could not release the redirection from IP %s (server responded: %s)\" % (ip, ep)\n \n def status(self, result, error):\n try:\n return True, \"Table of redirections:\\n%s\\n%s\" % (\"-\"*40, self._XMLRPC_SERVER.get_redirections())\n except:\n return False, \"Could not contact the server\"\n\n def version(self, result, error):\n try:\n server_version = self._XMLRPC_SERVER.get_version()\n return True, \"Client version: %s\\nServer version: %s\" % (version.get(), server_version)\n except:\n return True, \"Client version: %s\\nCould not contact server\" % version.get()\n def arp(self, parse_result, error):\n mac = parse_result.values['mac'][0]\n result, ip = self._XMLRPC_SERVER.arp(mac)\n if result:\n return True, \"%s\" % (ip)\n else:\n return False, \"Failed to get the ip address for %s (server responded: %s)\" % (mac, ip)\n \n ap = IPFloaterCmdLine(\"ipfloater\", \"This the client for ipfloaterd, which is a server that deals with iptables to enable floating IPs in private networks\", [\n Parameter(\"--server-ip\", \"-i\", \"The ip adress in which ipfloater listens\", 1, False, [\"127.0.0.1\"]),\n Parameter(\"--server-port\", \"-p\", \"The ip port in which ipfloater listens\", 1, False, [7000]),\n Operation(\"getip\", desc = \"Requests a floating IP for a private IP\", arguments = [\n Argument(\"private ip\", \"private ip address to which is requested the floating ip\", mandatory = True, count = 1),\n ]),\n Operation(\"redirect\", desc = \"Redirects a floating IP to a private IP\", arguments = [\n Argument(\"public ip\", \"floating ip address\", mandatory = True, count = 1),\n Argument(\"private ip\", \"private ip address to which is requested the floating ip\", mandatory = True, count = 1),\n ]),\n Operation(\"releaseip\", desc = \"Releases a floating IP\", arguments = [\n Argument(\"public ip\", \"public ip address (the floating ip)\", mandatory = True, count = 1),\n ]),\n Operation(\"status\", desc = \"Gets the status of the redirections\"),\n Operation(\"version\", desc = \"Gets the version of the client and the server\"),\n Operation(\"ippool\", desc = \"Gets the public ip addresses in the pool\"),\n Operation(\"arp\", desc = \"Requests the IP for a MAC address\", arguments = [\n Argument(\"mac\", \"the mac address for which is requested the ip\", mandatory = True, count = 1),\n ]),\n ])\n \n ap.self_service(True)\n \nif __name__ == '__main__':\n main_function()","repo_name":"grycap/ipfloater","sub_path":"ipfloater.py","file_name":"ipfloater.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"}
+{"seq_id":"37301548398","text":"import urllib\nimport urllib2\nimport json\n\nimport requests\nimport collections\n\n\nGET_ACCESS_TOKEN_URL = 'https://api.weixin.qq.com/cgi-bin/token'\n\nCREATE_MENU_URL = 'https://api.weixin.qq.com/cgi-bin/menu/create'\n\nDELETE_MENU_URL = 'https://api.weixin.qq.com/cgi-bin/menu/delete'\n\nGET_MENU_URL = 'https://api.weixin.qq.com/cgi-bin/menu/get'\n\nCREATE_CHN_QRCODE_URL = 'https://api.weixin.qq.com/cgi-bin/qrcode/create'\n\n\ndef post_json(url, data_map):\n req = urllib2.Request(url)\n req.add_header('Content-Type', 'application/json')\n resp = urllib2.urlopen(req, json.dumps(data_map, ensure_ascii=False))\n return resp.read()\n\n\ndef get(url, data=None, user_agent=None):\n if data:\n encode_params = urllib.urlencode(data)\n url = url + \"?\" + encode_params\n\n request = urllib2.Request(url)\n if user_agent:\n request.add_header('User-Agent', user_agent)\n\n resp = urllib2.urlopen(request)\n return resp.read()\n\n\ndef get_access_token(app_id, app_secret):\n \"\"\"\n 获取凭证\n @see http://mp.weixin.qq.com/wiki/index.php?title=%E9%80%9A%E7%94%A8%E6%8E%A5%E5%8F%A3%E6%96%87%E6%A1%A3\n \"\"\"\n data = {\n 'grant_type': 'client_credential',\n 'appid': app_id,\n 'secret': app_secret,\n }\n resp = get(GET_ACCESS_TOKEN_URL, data)\n return resp\n\n\ndef create_menu(menu_map, access_token):\n \"\"\"\n 创建菜单\n \"\"\"\n\n url = CREATE_MENU_URL + '?access_token=%s' % access_token\n # resp = urllib2.urlopen(url, menu_json_str.encode('utf-8'))\n resp = post_json(url, menu_map)\n return resp\n\n\ndef delete_menu(access_token):\n \"\"\"\n 删除菜单,建议现考虑是否直接关闭开发者模式\n \"\"\"\n url = DELETE_MENU_URL + '?access_token=%s' % access_token\n resp = post_json(url, None)\n return resp\n\n\ndef get_current_menu(access_token):\n \"\"\"\n 获取当前菜单配置\n \"\"\"\n data = {\n 'access_token': access_token,\n }\n resp = get(GET_MENU_URL, data)\n return resp\n\n\ndef create_chn_qrcode(scene_str, access_token):\n \"\"\"\n 创建渠道永久二维码, 需要订阅号才能使用\n scene_str: 渠道标识字符串\n @see http://mp.weixin.qq.com/wiki/18/28fc21e7ed87bec960651f0ce873ef8a.html\n \"\"\"\n url = CREATE_CHN_QRCODE_URL + '?access_token=%s' % access_token\n data = {\n \"action_name\": \"QR_LIMIT_STR_SCENE\",\n \"action_info\": {\n \"scene\": {\n \"scene_str\": scene_str\n }\n }\n }\n resp = post_json(url, data)\n return resp\n\n\ndef auth_url(appid, redirect_uri, state):\n url = \"https://open.weixin.qq.com/connect/oauth2/authorize\"\n data = collections.OrderedDict()\n data['appid'] = appid\n data['redirect_uri'] = redirect_uri\n data['response_type'] = 'code'\n data['scope'] = 'snsapi_userinfo'\n data['state'] = state\n ans = \"%s?%s%s\" % (url, urllib.urlencode(data), \"#wechat_redirect\")\n return ans\n\ndef get_access_token_by_code(appid, secret, code):\n url = \"https://api.weixin.qq.com/sns/oauth2/access_token\"\n params = {\n 'appid': appid,\n 'secret': secret,\n 'code': code,\n 'grant_type': 'authorization_code'\n }\n try:\n res = requests.get(url, params=params)\n res.raise_for_status()\n return res.json()\n except requests.RequestException as e:\n return {'e': e.message}\n\n\ndef get_userinfo(access_token, openid):\n url = \"https://api.weixin.qq.com/sns/userinfo\"\n params = {\n 'access_token': access_token,\n 'openid': openid,\n 'lang': 'zh_CN',\n }\n\n try:\n res = requests.get(url, params=params)\n res.raise_for_status()\n return res.json()\n except requests.RequestException as e:\n return {'e': e.message}\n","repo_name":"xym2010/wechat_lottery","sub_path":"app/wechat/wechat_api.py","file_name":"wechat_api.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1345569242","text":"\nimport cv2\nimport mediapipe as mp\n\ncam = cv2.VideoCapture(0) # não reconheceu minha camera\nmodulo = mp.solutions.face_detection\nface_detection = modulo.FaceDetection()\nshow = mp.solutions.drawing_utils\n\nwhile cam.isOpened():\n x, frame = cam.read()\n if not x:\n print(\"fafa\")\n break\n\n faces = face_detection.process(frame)\n\n if faces.detections:\n for rosto in faces.detections:\n show.draw_detection(frame, rosto)\n\n cv2.imshow(\"Rostos na sua webcam\", frame)\n\n if cv2.waitKey(5) == 27:\n break\n\ncam.release()\ncv2.destroyAllWindows()","repo_name":"Marcos-VM-1708/cam_detention_","sub_path":"face_track.py","file_name":"face_track.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42262613171","text":"import json\nimport numpy\nfrom numpy import *\nimport scipy\nimport scipy.stats\nimport pymultinest\nimport argparse\n\nfrom interp_atm_pdf import Initialize_Atmospheric_PDFs\nfrom interp_astro_pdf import Initialize_Interpolator_Astrophysical_PDF\nfrom full_likelihood import Log_Likelihood\n\n# Recommended run:\n# python likelihood_analysis_parser.py --verbose=1 --n_live_points=200 --evidence_tolerance=0.01\n\n# After MultiNest finishes, run this to analyse the results:\n# multinest_marginals.py out/likelihood/\n\nparser = argparse.ArgumentParser(description='Likelihood analysis')\n\nparser.add_argument(\"--n_live_points\", help=\"Default: 100\",\n\ttype=int, default=100)\n\nparser.add_argument(\"--evidence_tolerance\", help=\"Default: 0.1\",\n\ttype=float, default=0.1)\n\nparser.add_argument(\"--resume\", help=\"Resume MultiNest run [default: False]\",\n\taction=\"store_true\")\n\nparser.add_argument(\"--verbose\", help=\"Default: 0\",\n\ttype=int, default=0)\n\nargs = parser.parse_args()\n\nn_live_points = args.n_live_points\nevidence_tolerance = args.evidence_tolerance\nresume = args.resume\nverbose = args.verbose\n\n\ndef Prior(cube, ndim, nparams):\n\n\t# Spectral index. Uniform prior between 1.8 and 3.\n\tcube[0] = 2.0+cube[0] #1.8+cube[0]*1.2\n\n\t# Log10 of mass of mediator [GeV]. Log uniform prior between -3.0 and -1.0.\n\tcube[1] = -3.0+2.0*cube[1]\n\n\t# Log10 of coupling constant. Log uniform prior between -3.0 and -1.0\n\tcube[2] = -3.0+2.0*cube[2]\n\n\t# Expected number of astrophysical neutrinos. Uniform distribution between 0 and 80.\n\tcube[3] = cube[3]*80\n\t# cube[1] = cube[1]*80\n\t# cube[3] = 10.**(cube[3]*(1.9084+3.0)-3.0)\n\n\t# Expected number of conv. atm. neutrinos. Uniform distribution between 0 and 80.\n\tcube[4] = cube[4]*80\n\t# cube[4] = 10.**(cube[4]*(1.9084-3.0)+3.0)\n\n\t# Expected number of prompt atm. neutrinos. Uniform distribution between 0 and 80.\n\tcube[5] = cube[5]*1\n\t# cube[5] = 10.**(cube[5]*(1.9084-3.0)+3.0)\n\n\t# Expected number of atm. muons. Uniform distribution between 0 and 80.\n\tcube[6] = cube[6]*80\n\t# cube[6] = 10.**(cube[6]*(1.9084-3.0)+3.0)\n\n\treturn 0\n\n\ndef Log_Likelihood_MultiNest(cube, ndim, nparams):\n\n\tgamma = cube[0]\n\tlog10_g = cube[1]\n\tlog10_M = cube[2]\n\tN_a = cube[3]\n\tN_conv = cube[4]\n\tN_pr = cube[5]\n\tN_mu = cube[6]\n\n\tll = Log_Likelihood(gamma, log10_g, log10_M, N_a, N_conv, N_pr, N_mu,\n interp_astro_pdf_sh, pdf_atm_conv_sh, pdf_atm_pr_sh,\n interp_astro_pdf_tr, pdf_atm_conv_tr, pdf_atm_pr_tr,\n pdf_atm_muon_tr, num_ic_sh=58, num_ic_tr=22, verbose=verbose)\n\n\treturn ll\n\n\n# Initialize the atmospheric PDFs for all of the IceCube events\npdf_atm_conv_sh, pdf_atm_pr_sh, pdf_atm_pr_sh, pdf_atm_muon_sh, \\\n pdf_atm_conv_tr, pdf_atm_pr_tr, pdf_atm_muon_tr = \\\n Initialize_Atmospheric_PDFs(verbose=verbose)\n\n# Initialize the astrophysical PDF interpolators for all of the IceCube events\ninterp_astro_pdf_sh, interp_astro_pdf_tr = \\\n Initialize_Interpolator_Astrophysical_PDF(verbose=verbose)\n\n\nparameters = [\"gamma\", \"log10_g\", \"log10_M\", \"N_a\", \"N_conv\", \"N_pr\", \"N_mu\"]\nn_params = len(parameters)\n\n# Run MultiNest\npymultinest.run(Log_Likelihood_MultiNest, Prior, n_params,\n\t outputfiles_basename='out/likelihood/',\n\t\t\t\tresume=resume, verbose=verbose, n_live_points=n_live_points,\n\t\t\t\tseed=-1, evidence_tolerance=evidence_tolerance,\n\t\t\t\tsampling_efficiency=0.8,\n\t\t\t\timportance_nested_sampling=True,\n\t\t\t\tconst_efficiency_mode=False)\n\t\t\t\t#, log_zero=-300.0)\n# const_efficiency_mode=True, sampling_efficiency=1)\n\njson.dump(parameters, open('out/likelihood/params.json', 'w')) # Save parameter names\n\n\n","repo_name":"mbustama/secret-nu-int","sub_path":"dev-likelihood-sl/likelihood_analysis_parser.py","file_name":"likelihood_analysis_parser.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15445567267","text":"class Shape:\n def __init__(self, l, b, h, r):\n # self.l = None\n self.length = l\n self.breath = b\n self.height = h\n self.radios = r\n\n # def getData(self):\n # self.l = float(input(\"Enter The length: \"))\n # self.b =\n #\n","repo_name":"myproject2022/MyPythonProject","sub_path":"takeopython/ClassInherit.py","file_name":"ClassInherit.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39501239281","text":"class Solution:\n # @param A, a list of integers\n # @return a boolean\n\n # dp[i]: if you are able to reach the last index with i elements\n # dp[i] = (or dp[k]) for all k which A[k] + k >= i\n # initial: dp[0] = True\n # ans: dp[n-1]\n # LTE!\n def canJump(self, A):\n # write your code here\n if not A:\n return False\n dp = [False for i in xrange(len(A))]\n dp[0] = True\n for i in xrange(1, len(A)):\n for j in xrange(i):\n if dp[j] and j + A[j] >= i:\n dp[i] = True\n break\n return dp[len(A) - 1]\n\n # Greedy\n def canJump(self, A):\n if not A:\n return False\n farthest = A[0]\n for i in xrange(1, len(A)):\n # If i can be reached from beginning and\n # we can reach farther from i\n if i <= farthest and i + A[i] > farthest:\n farthest = A[i] + i\n return farthest >= len(A) - 1\n\n","repo_name":"jwyx3/practices","sub_path":"python/jump-game.py","file_name":"jump-game.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"86341367462","text":"import numpy as np\nimport pandas as pd\n\ndf = pd.read_csv('../Brazil.csv').drop(['COUNTRY', 'PERWT'], axis=1)\ndf['constant'] = 1\ndf['female'] = (df['SEX'] == 'Female').astype(float)\ndf.drop('SEX', axis=1, inplace=True)\n\ndummies = pd.get_dummies(df[['MARST', 'NATIVITY', 'EDATTAIN', 'EMPSTAT', 'OCCISCO', 'INDGEN']], drop_first=True).astype(np.int8)\ndf = pd.concat([df[['INCTOT']], dummies, df[['female', 'AGE', 'constant']]], axis=1)\ndf.to_csv('brazil_dummies.csv', index=False)","repo_name":"chrisdfong/Gender-Pay-Gap-Analysis","sub_path":"py_files/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"21297665023","text":"import pandas as pd\nimport pyodbc\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\nimport requests as rs\nimport sqltdb as sqdb\nimport sitecount as st\nimport omfn as fn\nimport urllib3\nimport urllib.parse\n\n\ntday = date.today()\ntmdlta = datetime.now() + timedelta(minutes=1)\ntmnw = datetime.now() - timedelta(minutes=1)\nqryst = tmnw.strftime('%Y-%m-%d %H:%M:%S')\nqryend = tmdlta.strftime('%Y-%m-%d %H:%M:%S')\n\n\ndef handdler(ussd,msg,msisdn):\n nw = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n rval = \"\"\n if msg !=\"\":\n ms = msisdn[-10:len(msisdn)]\n ms4sms = msisdn[-11:len(msisdn)]\n code = fn.sitecode_pick(msg)\n if \"ALL\" in msg or '2G' in msg or \"3G\" in msg or \"SC\" in msg or \"4G\" in msg:\n xx = st.siteinfo(msg)\n print(nw, xx)\n yy = st.sms(ms4sms,xx)\n rval = \"S\"\n elif \"PGSTART\" in msg and code != 'NA':\n xx = st.roc(ussd,code,ms,'PGSTART')\n print(nw,xx)\n if 'PGON_DONE' in xx:\n rval = \"S\"\n else:\n rval = \"F\"\n elif \"PGSTOP\" in msg and code != 'NA':\n xx = st.roc(ussd,code,ms,'PGSTOP')\n print(nw,xx)\n if 'PGOFF_DONE' in xx:\n rval = \"S\"\n else:\n rval = \"F\"\n else:\n rval = \"Not Related Query\"\n return rval\n\ndef main():\n nww = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n df = st.smscheck()\n if df.shape[0] != 0:\n for i in range(len(df)):\n msg1 = df.loc[i,\"MESSAGE\"]\n if isinstance(msg1, str):\n ussd = df.loc[i,\"USDLogId\"]\n msg = msg1.upper()\n msisdn = df.loc[i,\"DESTADDR\"]\n sqret = sqdb.queryussd(ussd)\n if sqret == 0:\n st.general_qry()\n rval = handdler(ussd,msg,msisdn)\n st.general_qry()\n if rval == 'S':\n rv2 = sqdb.insertussd(ussd)\n if rv2 == \"S\":\n print('Cycle Complete for::::: ', nww, ussd, msg, msisdn)\n else:\n print(\"Cycle failed:::\", nww, ussd, msg, msisdn)\n else:\n print(rval)\n else:\n print('already served::', nww, ussd, msg, msisdn)\n else:\n print('no sms')\n return \"done at \" + nww","repo_name":"FuckBrains/omEngin","sub_path":"Z_ALL_FILE/Py1/10262020-221-XAQ-main.py","file_name":"10262020-221-XAQ-main.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72287262915","text":"#!/usr/bin/env python3\nimport tweepy\nfrom sense_hat import SenseHat\nfrom datetime import datetime\nsense = SenseHat()\nclient = tweepy.Client(consumer_key='YOUR API KEY',\n consumer_secret='YOUR API KEY SECRET',\n access_token='YOUR ACCESS TOKEN',\n access_token_secret='YOUR ACCESS TOKEN SECRET')\n\ntemp = round(sense.get_temperature(),1)\nnow = datetime.now()\ncurrent_time = now.strftime(\"%H:%M\")\nfunny = [\"Right now the temperature at my desk is \", \"Oh my word it is cold at my desk, it is only \", \"Feeling hot, hot, hot, the temperature is \"]\nif temp < 22.0:\n msg = current_time+\" \"+[1]+str(temp)+\" Celsius\"\nelif temp > 23.0:\n msg = current_time+\" \"+funny[2]+str(temp)+\" Celsius\"\nelse:\n msg = current_time+\" \"+funny[0]+str(temp)+\" Celsius\"\n\nresponse = client.create_tweet(text=msg)\nprint(response)\nprint(msg)\nsense.show_message(str(temp)+\"C\")\n","repo_name":"lesp/LXF-Twitter-Sense","sub_path":"send_tweet.py","file_name":"send_tweet.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23641012851","text":"#!/usr/bin/env python\nimport sys\nimport re\nimport urllib\nimport math\nimport os\nfrom timeit import Timer\n\n#def subset = lambda x: [[y for j, y in enumerate(set(x)) if (i >> j) & 1] for i in range(2**len(set(x)))]\n\ndef output(case, answer):\n\treturn \"Case #%d: %d\\n\" % (case, answer)\n\ndef func(n):\n\tif n == 0: return 0\n\tif n == 1 or n == 2: return 1\n\tif n % 3 == 0: return n/3\n\telse: return int(math.floor(float(n)/3)) + 1\n\ndef determine(l,s,p):\n\ttotal = 0\n\tmaxnorm = [func(val) for val in l]\n\tcandidates = len([val for index, val in enumerate(maxnorm) if val == p-1 and l[index] % 3 != 1 and l[index] > 1])\n\twhile candidates and s:\n\t\ttotal += 1\n\t\tcandidates -= 1\n\t\ts -= 1\n\ttotal += len([val for val in maxnorm if val >= p])\n\treturn total\n\t\ndef main(filein, fileout):\n\tcase = 0\n\tf = open(filein, 'r')\n\to = open(fileout, 'w')\n\ttimes = int(f.readline())\n\twhile case < times:\n\t\tanswer = 0\n\t\tvalues = f.readline().split(\" \")\n\t\tnumPlayers = int(values[0])\n\t\tsurprising = int(values[1])\n\t\tlimit = int(values[2])\n\t\tvalues = [int(number) for number in values[-numPlayers:]]\n\t\tanswer = determine(values,surprising,limit)\n\t\tcase += 1\n\t\to.write(output(case, answer))\n\tf.close()\n\to.close()\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1], sys.argv[1][:-2]+'out')\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/596.py","file_name":"596.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1373607119","text":"import pygame # importing pygame library\nimport random\nfrom sys import exit # Importing 'sys' important for various functions\n\npygame.init() \n\nscreen = pygame.display.set_mode((400, 400)) #showing the screen\nstarted = False #declaring a variable to show the state of game\n\nclass Player: \n speed = 0.5 #speed of the spaceship on press of button\n #initial x and y position of the player\n player_x = 170 #170 to keep it in center of screen as our spaceship is 60px wide\n player_y = 350 #setting y coordinate of spaceship\n player_x_change = 0\n\n #adding the spaceship image in code\n spaceship = pygame.image.load(\n 'C:\\Projects\\Space Invaders\\space-invaders\\Elements\\player.png')\n \n def player(x, y):\n screen.blit(Player.spaceship, (x, y)) #showing the image on screen\n\n def player_movement():\n Player.player_x += Player.player_x_change #this will increase or decrease (depending when the function is called) the x coordinate of position by continuosly adding 0.5 (speed variable) to it.\n\n def player_boundary():\n if Player.player_x < 0: # adding boundary to the game\n Player.player_x = 0 # adding boundary to the game\n elif Player.player_x >= 340: # adding boundary to the game\n Player.player_x = 340 # adding boundary to the game\n \n def line():\n rect1 = ((0 ,330 , 400, 200)) #making a rectangle the endline of the screen (0,330) : coordinates of top left corner of rectangle\n #(400,200) size of rectangle\n pygame.draw.rect(screen, (40,40,40), rect1) #dsiplaying the rectangle on 'screen' and color is (40,40,40)\n\nclass Enemy:\n\n enemy_speed = 0.2 #speed at which enemy moves\n enemy_fasten = 0.2 #speed at which enemy moves when it hit the wall\n\n enemy_red = [] #list to store enemy_images as there are multiple\n\n enemy_x = [] #list to store the x coordinates of enemy_image corresponding to the image stored in enemy_red list\n enemy_y = [] #list to store the y coordinates of enemy_image corresponding to the image stored in enemy_red list\n\n enemy_x_change = [] #list to store the change in x coordinates of enemy_image corresponding to the image stored in enemy_red list\n enemy_y_change = [] #list to store the change in y coordinates of enemy_image corresponding to the image stored in enemy_red list\n\n number_of_enemies = 4 #number of enemies\n\n #making/adding the enemies in game\n for enemies in range(number_of_enemies):\n #adding the enemies to the list enemy_red\n enemy_red.append(pygame.image.load(\n 'C:\\Projects\\Space Invaders\\space-invaders\\Elements\\enemy_red.png')) \n # locates the enemy on a random point between 0, 360\n enemy_x.append(random.randint(0, 360))\n # locates the enemy on a random point between 32, 64\n enemy_y.append(random.randint(32, 64))\n #adding enemy to the corresponding list so that each sprite moves individually at its own speed\n enemy_x_change.append(enemy_speed)\n enemy_y_change.append(32) #change in y coordinate, it is 32 because the heigth of enemy is 32px. As we want the enemy to move to next row on hitting wall, we kept the change to 32\n\n\n def enemy1(x,y,i):\n #showing enemies on screen\n #here x and y are the variable coordinates\n #and i is a variable which will be used in a future loop \n screen.blit(Enemy.enemy_red[i], (x,y))\n\nclass Laser:\n laser_x = Player.player_x + 30 #spaceship initial posn + 30\n laser_y = Player.player_y + 5 #spaceship initial posn + 5\n laser_x_change = 0\n laser_y_change = 1\n laser_state = 'rest'\n #addiing and resizing laser image \n laser = pygame.image.load(\n 'C:\\Projects\\Space Invaders\\space-invaders\\Elements\\laser_bullet.jpg')\n laser = pygame.transform.scale(laser, (4, 20))\n\n def laser_fire(x, y):\n global laser_state #this is same laser state shown in laser.py\n laser_state = 'fired' #changing the state, will change the value to 'fired' when this function is called\n screen.blit(Laser.laser, (x, y)) #displaying image on screen when function is called\n\n def laserstate():\n #when this function is called\n if Laser.laser_state == 'fired' : #it will check the state of laser, if it is fired then\n #it will display the laser and change its coordinate by 'laser_y_change' value, so that it appears that laser is moving upwards. \n Laser.laser_fire(Laser.laser_x + 26, Laser.laser_y) \n Laser.laser_y -= Laser.laser_y_change\n\n def laser_boundary():\n #this function will reset the state of the laser once it is out of window, so that we can shoot the laser again\n if Laser.laser_y <= 0 : #if the coordinate of laser goes out of screen then\n Laser.laser_y = Player.player_y #reset the y coordinate of the laser to the y coordinate of player (spaceship)\n Laser.laser_state = 'rest' #and change the laser state to rest so that we can fire it again\n\nclass Controls:\n def player_control():\n #this function has the controls of player\n if pygame.key.get_pressed()[pygame.K_LEFT] or pygame.key.get_pressed()[pygame.K_a]:\n Player.player_x_change -= Player.speed #will decrease the x coordinate by 'speed' (0.5px) so that it can move to left\n if pygame.key.get_pressed()[pygame.K_RIGHT] or pygame.key.get_pressed()[pygame.K_d]:\n Player.player_x_change += Player.speed #will increase the x coordinate by 'speed' (0.5px) so that it can move to right\n #laser will fire when upper arrow is pressed\n if pygame.key.get_pressed()[pygame.K_UP] or pygame.key.get_pressed()[pygame.K_w]:\n Laser.laser_x = Player.player_x\n if Laser.laser_state == 'rest': #checking if laser state is in rest state \n #this condition above is important becase, without it if we accidentally clicked the up_arrow, the position of laser will reset to spaceship position\n #and it will refire from the spaceship position\n Laser.laser_state = 'fired' #so that it can be changes to fire state\n Laser.laser_fire(Laser.laser_x, Laser.laser_y) #and can be fired from the position of the spaceship (laser_x and laser_y are equals to spaceship position)\n\nclass Score:\n score = 0 #keeping the track of score which will increase upon collision\n #declaring the font used. \n font = pygame.font.Font('C:\\Projects\\Space Invaders\\space-invaders\\Fonts\\VCR_OSD_MONO_1.001.ttf', 16)\n\n def show_score(x, y): #to display the score count using blit()\n score_count = Score.font.render('Score : ' + str(Score.score), True, (255,255,255))\n screen.blit(score_count, (x,y))\n\n def show_title(x, y): #to show 'space invaders title' using (blit)\n score_count = Score.font.render('Space Invaders', True, (255,255,255))\n screen.blit(score_count, (x,y)) \n\nclass StartScreen:\n start = True #variable to store the state of start window, if its true that means start window is displayed on screen. If false, then its not.\n font = pygame.font.Font(r'Fonts\\upheavtt.ttf', 56) #declaring the font\n\n def start_screen(): \n #adding and displaying (blit function) the start image on screen.\n start_image = pygame.image.load(\"C:\\Projects\\Space Invaders\\space-invaders\\Elements\\Space Invaders Start.png\")\n start_image = pygame.transform.scale(start_image, (400,400)) #resizing the screen so that it fits the window\n\n screen.blit(start_image, (0,0))\n \n def show_start():\n while StartScreen.start: #while the start variable in StartScreen class is True (it becomes true when the function is called)\n for event in pygame.event.get(): \n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN :#checks if SPACE key is pressed\n if pygame.key.get_pressed()[pygame.K_SPACE]:\n StartScreen.start = False #sets the start variable to false, hence start screen is closed \n \n StartScreen.start_screen() #calling the start_screen function to display the intro image\n pygame.display.update()\n\nclass Collision:\n def collision():\n laser_rect = Laser.laser.get_rect(center=(Laser.laser_x, Laser.laser_y)) #getting rectangle of size and width of laser image, and mapping it to position of laser\n enemy_rect = [] #making an empty list to store the rectangles we will get of the enemy images stored in enemy_rect list\n\n for i in range(Enemy.number_of_enemies): #iterating over enemies\n Enemy.enemy_x[i] += Enemy.enemy_x_change[i] #this will constantly change the enemy's (at index 'i' in the enemy_rect list) x coordinate by enemy_x_change\n Enemy.enemy_y[i] = Enemy.enemy_y_change[i] #this will constantly change the enemy's (at index 'i' in the enemy_rect list) y coordinate by enemy_x_change\n \n #this is enemy boundary, if enemy at say index 'i' hits the wall ie goes outside the window it will be reset to a new coordinate as per the code\n if Enemy.enemy_x[i] < 0: #if enemy at index 'i' hits the left wall\n Enemy.enemy_x_change[i] = Enemy.enemy_fasten #change the speed of enemy and hence its coordinates by logic mentioned earlier\n Enemy.enemy_y_change[i] += 32 #to change the y position of alien, to make it come one row down if it touches walls\n if Enemy.enemy_x[i] >= 340: #if enemy at index 'i' hits the right wall\n Enemy.enemy_x_change[i] = -Enemy.enemy_fasten #change the speed of enemy and hence its coordinates by logic mentioned earlier\n Enemy.enemy_y_change[i] += 32 #to change the y position of alien, to make it come one row down if it touches walls\n \n if Enemy.enemy_y[i] >= 300 : #if player hits the y boundary that is 300 \n GameOver.show_game_over() #game over screen will be displayed\n break\n \n Enemy.enemy1(Enemy.enemy_x[i], Enemy.enemy_y[i], i) #showing the enemies on screen\n\n #now we will add the rectangles of enemy at index i to the enemy_rect list.\n enemy_rect.append(Enemy.enemy_red[i].get_rect(center=(Enemy.enemy_x[i], Enemy.enemy_y[i]))) \n\n if laser_rect.colliderect(enemy_rect[i]): #colliderect is an inbuilt function which will return True if the two given rectangles collide each other\n print(Score.score)\n Enemy.enemy_x[i] = 10 #set the enemy's x coordinate to 10\n Enemy.enemy_y[i] = 60 #set the enemy's y coordinate to 60\n Enemy.enemy_y_change[i] = random.randint(32, 150) #set the enemy_y_change to any random int between 32 and 150 so that the y coordinate of that specific enemy is changed.\n Score.score += 1 #increase the score by 1 \n Laser.laser_state = 'rest' #change the state of laser to rest so that it can be fired again\n Laser.laser_y = Player.player_y #change the y coordinate of laser to player's y coordinate. \n \n Laser.laser_fire(Player.player_x + 300, Player.player_y + 300) #make the laser dissapear once it hits the spaceship\n\nclass GameScreen :\n def gameplay():\n\n Player.player_movement() #adding the movement logic of player\n Player.player_boundary() #adding the boundary by calling the Boundary() function from Player Class\n\n Laser.laserstate() #checking the state of the laser by calling the laserstate() function from Laser Class\n Laser.laser_boundary() #adding the condition to check if laser is outside the window or not by calling laser_boundary() function from Laser Class\n \n Collision.collision() #checking for collision of enemy and bullet calling collision() function from Collision class\n\n Player.line() #displaying the end line by calling line function from Player class\n Player.player(Player.player_x, Player.player_y) #displaying spaceship by calling player function from Player class\n\n Score.show_score(10,10) #displaying the score by calling show_score function from Score class. (10,10) is the position.\n Score.show_title(260,10) #displaying the title by calling show_title func from Score Class. (260, 10) is the position.\n\nclass Paused :\n pause = True\n\n def paused():\n paused = GameOver.font.render('PAUSED', True, (255,255,255))\n paused_rect = paused.get_rect(center=(400/2, 400/2))\n end_score = Score.font.render('score : '+ str(Score.score), True, (255,255,255))\n rect1 = pygame.Rect((0 ,0 , 400, 400))\n pygame.draw.rect(screen, (40,40,40), rect1)\n screen.blit(paused, paused_rect)\n screen.blit(end_score, (150,230))\n \n while Paused.pause:\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN: # whem key is pressed\n if pygame.key.get_pressed()[pygame.K_SPACE]:\n Paused.pause = False\n GameScreen.gameplay()\n\n pygame.display.update()\n\n def pause_control():\n\n if pygame.key.get_pressed()[pygame.K_SPACE]:\n Paused.pause = True\n Paused.paused()\n\nclass GameOver:\n font = pygame.font.Font('C:\\Projects\\Space Invaders\\space-invaders\\Fonts\\VCR_OSD_MONO_1.001.ttf', 40)\n stopped = True\n\n def gameover():\n game_over = GameOver.font.render('GAME OVER', True, (255,255,255))\n game_over_rect = game_over.get_rect(center=(400/2, 400/2))\n end_score = Score.font.render('score : '+ str(Score.score), True, (255,255,255))\n rect1 = pygame.Rect((0 ,0 , 400, 400))\n pygame.draw.rect(screen, (40,40,40), rect1)\n screen.blit(game_over,game_over_rect)\n screen.blit(end_score, (150,230))\n \n def show_game_over():\n while GameOver.stopped:\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n quit() \n \n GameOver.gameover()\n pygame.display.update()\n\ndef small_text(text) :\n Score.font.render(text, True, (255,255,255))\n\ndef main():\n clock = pygame.time.Clock()\n while True:\n \n screen.fill((30, 30, 30)) # Set the Backgroud Color\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit() \n if event.type == pygame.KEYDOWN: # whem key is pressed\n Controls.player_control()\n Paused.pause_control() \n if event.type == pygame.KEYUP:\n Player.player_x_change = 0\n\n \n StartScreen.show_start()\n GameScreen.gameplay()\n clock.tick(3000)\n pygame.display.update()\n\nmain()","repo_name":"ayushxpatne/space-invaders","sub_path":"Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34321504525","text":"import dht\nimport network\nimport time\nfrom config import *\nfrom machine import Pin\nfrom micropyserver import MicroPyServer\ngc.collect()\n\n\nrelay1 = Pin(0, Pin.OUT)\nrelay2 = Pin(13, Pin.OUT)\nrelay3 = Pin(12, Pin.OUT)\nrelay4 = Pin(4, Pin.OUT)\n\nrelay1.on()\nrelay2.on()\nrelay3.on()\nrelay4.on()\n\nblueled = Pin(2, Pin.OUT)\nblueled.on()\n\nadc = machine.ADC(0)\nm_vin = Pin(5, Pin.OUT)\nm_vin.off()\n\nwlan_id = ssid\nwlan_pass = password\nwlan = network.WLAN(network.STA_IF)\nwlan.active(True)\nserver = MicroPyServer()\n\n# if wlan.isconnected() == False:\n # wlan.connect(wlan_id, wlan_pass)\n # while wlan.isconnected() == False:\n # time.sleep(1)\n \nwhile wlan.isconnected() == False:\n print(\"trying hui\")\n wlan.connect(ssid2, password2)\n time.sleep(15)\n if wlan.isconnected() == False:\n print(\"trying random\")\n wlan.connect(ssid, password)\n time.sleep(15)\n\nprint('IP by DHCP:', wlan.ifconfig()[0])\n\nvarVolt = 4.1339\nvarProcess = 0.05\nPc = 0.0\nG = 0.0\nP = 1.0\nXp = 0.0\nZp = 0.0\nXe = 0.0\n\ndef relay_state(n):\n if n == 1:\n if relay1.value() == 1:\n rez = 0\n elif relay1.value() == 0:\n rez = 1\n elif n == 2:\n if relay2.value() == 1:\n rez = 0\n elif relay2.value() == 0:\n rez = 1\n elif n == 3:\n if relay3.value() == 1:\n rez = 0\n elif relay3.value() == 0:\n rez = 1\n elif n == 4:\n if relay4.value() == 1:\n rez = 0\n elif relay4.value() == 0:\n rez = 1\n return rez\n\ndef kalman(var):\n global varVolt\n global varProcess\n global Pc\n global G\n global P\n global Xp\n global Zp\n global Xe\n Pc = P + varProcess\n G = Pc / (Pc + varVolt)\n P = (1 - G) * Pc\n Xp = Xe\n Zp = Xp\n Xe = G * (var - Zp) + Xp # \"фильтрованное\" значение\n return Xe\n\ndef show_data(request):\n blueled.on()\n d = dht.DHT11(Pin(14))\n d.measure()\n hum = round(kalman(d.humidity()))\n server.send(str(d.temperature()) + \",\" + str(hum))\n blueled.off()\n\ndef show_moisture(request):\n blueled.on()\n m_vin.on()\n s = str(adc.read())\n time.sleep(1)\n m_vin.off()\n server.send(s);\n blueled.off()\n\ndef reboot(request):\n machine.reset()\n\ndef relay1_on(request):\n relay1.off()\n relay1_status(request)\n\ndef relay1_off(request):\n relay1.on()\n relay1_status(request)\n\ndef relay2_on(request):\n relay2.off()\n relay2_status(request)\n\ndef relay2_off(request):\n relay2.on()\n relay2_status(request)\n\ndef relay3_on(request):\n relay3.off()\n relay3_status(request)\n\ndef relay3_off(request):\n relay3.on()\n relay3_status(request)\n\ndef relay4_on(request):\n relay4.off()\n relay4_status(request)\n\ndef relay4_off(request):\n relay4.on()\n relay4_status(request)\n\ndef relay1_status(request):\n server.send(str(relay_state(1)))\n\ndef relay2_status(request):\n server.send(str(relay_state(2)))\n\ndef relay3_status(request):\n server.send(str(relay_state(3)))\n\ndef relay4_status(request):\n server.send(str(relay_state(4)))\n\n''' add request handler '''\nserver.add_route(\"/data\", show_data)\nserver.add_route(\"/moisture\", show_moisture)\nserver.add_route(\"/reboot\", reboot)\nserver.add_route(\"/relay1_on\", relay1_on)\nserver.add_route(\"/relay1_off\", relay1_off)\nserver.add_route(\"/relay2_on\", relay2_on)\nserver.add_route(\"/relay2_off\", relay2_off)\nserver.add_route(\"/relay3_on\", relay3_on)\nserver.add_route(\"/relay3_off\", relay3_off)\nserver.add_route(\"/relay4_on\", relay4_on)\nserver.add_route(\"/relay4_off\", relay4_off)\nserver.add_route(\"/relay1_status\", relay1_status)\nserver.add_route(\"/relay2_status\", relay2_status)\nserver.add_route(\"/relay3_status\", relay3_status)\nserver.add_route(\"/relay4_status\", relay4_status)\n\nprint (\"starting http server\")\n''' start server '''\nserver.start()\n\nblueled.off()\n","repo_name":"makeinstall77/strawberry_monitoring","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23502432121","text":"class PancakeStack:\n def __init__(self, stack = []):\n s = []\n for p in stack:\n if p == '+':\n s.append(1)\n else:\n s.append(-1)\n self.stack = s\n def __str__(self):\n return str(self.stack)\n def flip(self, n):\n self.stack[:n] = [p*-1 for p in self.stack[:n]][::-1]\n def is_good(self):\n for p in self.stack:\n if p == -1:\n return False\n return True\n def solve(self, acc):\n if self.is_good():\n return acc\n if self.stack == []:\n return acc\n if self.stack[-1] == 1:\n s = PancakeStack()\n s.stack = self.stack[:-1]\n return s.solve(acc)\n elif self.stack[0] == -1:\n self.flip(len(self.stack))\n return self.solve(acc + 1)\n else:\n count = 0\n for p in self.stack:\n if p == 1:\n count += 1\n else:\n break\n self.flip(count)\n return self.solve(acc + 1)\n\ndef read_data(filename):\n with open(filename) as f:\n num_test_cases = int(f.readline())\n test_cases = []\n for _ in range(num_test_cases):\n test_case = PancakeStack(f.readline().strip())\n test_cases.append(test_case)\n return num_test_cases, test_cases\n\nif __name__ == \"__main__\":\n num_test_cases, test_cases = read_data(\"input.in\")\n for it in range(num_test_cases):\n test_case = test_cases[it]\n print(\"Case #{}:\".format(it + 1), test_case.solve(0))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_178/795.py","file_name":"795.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23564842811","text":"def last_number(x):\n i = 0\n while i + 1 < len(x):\n if x[i] > x[i + 1]:\n x[i] -= 1\n for j in range(i + 1, len(x)):\n x[j] = 9\n return x\n elif x[i] == x[i + 1]:\n j = i + 1\n fl = False\n while j + 1 < len(x):\n if x[j] < x[j + 1]:\n i = j + 1\n fl = True\n break\n elif x[j] > x[j + 1]:\n x[i] -= 1\n for k in range(i + 1, len(x)):\n x[k] = 9\n return x\n else:\n j += 1\n if fl == False:\n return x\n else:\n i += 1\n return x\n\n\ndef str2list(x):\n return [int(xx) for xx in x if xx != '\\n']\n\ndef list2str(x):\n strconv = ''.join([str(xx) for xx in x])\n if strconv[0] == '0' and len(strconv) > 1:\n return strconv[1:]\n else:\n return strconv\n\n\nif __name__ == '__main__':\n responses = []\n inf = 'tidy2.in'\n outf = 'tidy2.out'\n with open(inf, 'r') as f:\n cases = int(f.readline())\n for i in range(cases):\n numb = f.readline()\n x = str2list(numb)\n res = last_number(x)\n responses.append(list2str(res))\n with open(outf, 'w') as f:\n for i, r in enumerate(responses):\n f.write('Case #{}: {}\\n'.format(i + 1, r))\n\n #\n # x = '20'\n # x = str2list(x)\n # x = last_number(x)\n # print list2str(x)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/601.py","file_name":"601.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43009241375","text":"import os\nimport hashlib\nimport subprocess\nimport requests\nimport threading\nimport time\nimport signal\n\nfrom watchfiles import watch, Change\n\nimport psycopg2\nimport psycopg2.extras\n\ndef log(msg: str):\n print(msg, flush=True)\n\nclass ImmichDatabase:\n def __init__(self, host: str, database: str, user: str, password: str, port: int):\n self.conn = psycopg2.connect(host=host, database=database, user=user, password=password, port=port)\n self.conn.set_client_encoding('UTF8')\n\n def last_removed_asset(self, user_id: str) -> list[psycopg2.extras.RealDictRow]:\n with self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n cur.execute(\"\"\"\n SELECT\n assets_filesync_lookup.asset_path,\n assets_delete_audits.asset_id\n FROM assets_delete_audits\n INNER JOIN assets_filesync_lookup\n ON assets_delete_audits.checksum = assets_filesync_lookup.checksum\n AND assets_delete_audits.user_id = assets_filesync_lookup.user_id\n WHERE assets_filesync_lookup.user_id = %s\n AND assets_delete_audits.file_removed = 'false'\n ORDER BY changed_on desc\n LIMIT 1\n \"\"\", (user_id,))\n\n return cur.fetchall()\n\n def set_asset_removed(self, asset_id: str) -> None:\n with self.conn.cursor() as cur:\n cur.execute(\"\"\"\n UPDATE assets_delete_audits\n SET file_removed = 'true'\n WHERE asset_id = %s\n \"\"\", (asset_id,))\n self.conn.commit()\n\n def save_hash(self, user_id: str, asset_path: str, checksum: bytes) -> None:\n with self.conn.cursor() as cur:\n cur.execute(\"\"\"\n INSERT INTO\n assets_filesync_lookup(user_id, asset_path, checksum)\n VALUES(%s, %s, %s)\n ON CONFLICT (user_id, asset_path) DO\n UPDATE SET checksum = %s\n WHERE assets_filesync_lookup.asset_path = %s\n AND assets_filesync_lookup.user_id = %s;\n \"\"\",\n (user_id, asset_path, checksum,\n checksum, asset_path, user_id))\n self.conn.commit()\n\n def get_asset_id_by_path(self, user_id: str, asset_path: str) -> psycopg2.extras.RealDictRow | None:\n with self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n cur.execute(\"\"\"\n SELECT assets.id\n FROM assets\n INNER JOIN assets_filesync_lookup\n ON assets.checksum = assets_filesync_lookup.checksum\n WHERE assets_filesync_lookup.asset_path = %s\n AND assets_filesync_lookup.user_id = %s\n \"\"\", (asset_path, user_id))\n return cur.fetchone()\n\n def close(self):\n self.conn.commit()\n self.conn.close()\n\nclass ImmichAPI:\n def __init__(self, host: str, api_key: str):\n self.host = host\n self.headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"x-api-key\": api_key\n }\n\n def get_user_id(self) -> str:\n r = requests.get(f\"{self.host}/user/me\", headers=self.headers)\n return r.json()[\"id\"]\n \n def delete_asset(self, asset_id: str) -> dict:\n data = { \"ids\": [ asset_id ] }\n r = requests.delete(f\"{self.host}/asset\", headers=self.headers, json=data)\n return r.json()\n\ndef hash_file(path: str) -> bytes:\n file_hash = hashlib.sha1()\n with open(path, \"rb\") as f:\n fb = f.read(2048)\n while len(fb) > 0:\n file_hash.update(fb)\n fb = f.read(2048)\n return file_hash.digest()\n\ndef ignored_paths(path: str) -> bool:\n if os.path.basename(path).startswith(\".\"):\n return True\n\n if os.path.isdir(path):\n return True\n \n return False\n\ndef hash_all_files(db: ImmichDatabase, user_id: str, path: str) -> None:\n for root, _, files in os.walk(path):\n for file in files:\n if ignored_paths(file):\n continue\n\n file_path = os.path.join(root, file)\n relative_path = os.path.relpath(file_path, path)\n db.save_hash(user_id, relative_path, hash_file(file_path))\n log(f\"Hash {file_path} and store in database\")\n\ndef import_asset(db: ImmichDatabase, api: ImmichAPI, key: str, base_path: str, asset_path: str) -> None:\n snap_path = os.getenv(\"SNAP\")\n relative_path = os.path.relpath(asset_path, base_path)\n import_command = [\n f\"{snap_path}/bin/immich-cli\", \"upload\",\n \"--server\", os.getenv(\"IMMICH_SERVER_ADDRESS\"),\n \"--key\", key,\n \"--yes\",\n asset_path\n ]\n\n if snap_path:\n result = subprocess.run(import_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n result = subprocess.CompletedProcess([], 0)\n log(f\"MOC: {import_command}\")\n\n if result and result.returncode != 0:\n log(f\"Error: Failed to import {asset_path}\")\n log(f\"CLI (stdout): {result.stdout.decode('utf-8')}\")\n log(f\"CLI (stderr): {result.stderr.decode('utf-8')}\")\n else:\n checksum = hash_file(asset_path)\n user_id = api.get_user_id()\n db.save_hash(user_id, relative_path, checksum)\n log(f\"Hash {relative_path} and store in database for user {user_id})\")\n\ndef delete_asset(db: ImmichDatabase, api: ImmichAPI, asset_path: str, base_path: str) -> None:\n relative_path = os.path.relpath(asset_path, base_path)\n user_id = api.get_user_id()\n asset = db.get_asset_id_by_path(user_id, relative_path)\n if asset:\n log(f\"Asset {asset['id']} removed from database\")\n api.delete_asset(asset[\"id\"])\n else:\n log(f\"Asset {relative_path} not found in database\")\n\ndef file_watcher(event: threading.Event, db: ImmichDatabase, api: ImmichAPI, api_key: str, user_path: str) -> None:\n log(\"File watcher thread running...\")\n for changes in watch(user_path, recursive=True, stop_event=event):\n for c_type, c_path in changes:\n\n if ignored_paths(c_path):\n continue\n\n if c_type == Change.added:\n log(f\"{c_path} added, import asset to Immich\")\n import_asset(db, api, api_key, user_path, c_path)\n elif c_type == Change.modified:\n log(f\"{c_path} modified, re-import asset to Immich\")\n import_asset(db, api, api_key, user_path, c_path)\n elif c_type == Change.deleted:\n log(f\"{c_path} deleted, mark asset as removed\")\n delete_asset(db, api, c_path, user_path)\n\ndef database_watcher(event: threading.Event, db: ImmichDatabase, api: ImmichAPI, user_path: str) -> None:\n log(\"Database watcher thread running...\")\n user_id = api.get_user_id()\n while not event.is_set():\n for record in db.last_removed_asset(user_id):\n asset_id = record['asset_id']\n asset_path = record['asset_path']\n full_path = f\"{user_path}/{asset_path}\"\n if os.path.exists(full_path):\n log(f\"Remove asset {asset_id} user {user_id} path {asset_path}\")\n os.remove(full_path)\n else:\n log(f\"Asset {asset_id} user {user_id} path {asset_path} already removed\")\n log(f\"Mark asset {asset_id} as removed\")\n db.set_asset_removed(asset_id)\n time.sleep(5)\n\ndef main():\n db = ImmichDatabase(\n host=os.environ[\"DB_HOSTNAME\"],\n database=os.environ[\"DB_DATABASE_NAME\"],\n user=os.environ[\"DB_USERNAME\"],\n password=os.environ[\"DB_PASSWORD\"],\n port=5432\n )\n\n api_key = os.environ[\"IMMICH_API_KEY\"]\n immich = ImmichAPI(os.environ[\"IMMICH_SERVER_URL\"], api_key)\n snap_common = os.environ[\"SNAP_COMMON\"]\n user_id = immich.get_user_id()\n user_path = f\"{snap_common}/sync/{user_id}\"\n\n log(f\"Starting sync for user {user_id} at {user_path}\")\n\n log(f\"Initial file hash import of all files in {user_path}\")\n hash_all_files(db, user_id, user_path)\n\n stop_event = threading.Event()\n\n watch_thread = threading.Thread(\n target=file_watcher,\n args=(stop_event, db, immich, api_key, user_path)\n )\n\n database_thread = threading.Thread(\n target=database_watcher,\n args=(stop_event, db, immich, user_path)\n )\n\n watch_thread.start()\n database_thread.start()\n\n signal.signal(signal.SIGTERM, lambda signum, frame: stop_event.set())\n\n while True:\n if not watch_thread.is_alive():\n log(\"Critical: Thread watch is not alive\")\n if not database_thread.is_alive():\n log(\"Critical: Thread database is not alive\")\n time.sleep(10)\n\nif __name__ == '__main__':\n main()\n","repo_name":"nsg/immich-distribution","sub_path":"src/bin/sync-service.py","file_name":"sync-service.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"39904203962","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nArxiv Telegram Bot - Base Program\n\nProgram is used to start up the telegram bot\n\"\"\"\n\nimport logging\nimport os\n\nimport dotenv\n\nfrom telegram.ext import (\n Updater,\n MessageHandler,\n CommandHandler,\n Dispatcher,\n PicklePersistence,\n)\n\nfrom arxiv_telegram_bot.functions.handlers import (\n start,\n fetch,\n preference_conversation_handler,\n error,\n schedule,\n unschedule,\n)\n\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\", level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\n\ndotenv.load_dotenv()\nPORT = int(os.environ.get(\"PORT\", 8443))\nTOKEN = os.environ.get(\"TOKEN\")\nHEROKU_URL = os.environ.get(\"HEROKU_URL\")\n\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n\n # Create the Updater and pass it your bot's token.\n persistence = PicklePersistence(filename=\"/tmp/arxivTelegramBot\")\n updater = Updater(TOKEN, use_context=True, persistence=persistence)\n dispatcher: Dispatcher = updater.dispatcher\n\n dispatcher.add_handler(CommandHandler(\"test\", start))\n dispatcher.add_handler(CommandHandler(\"latest\", fetch))\n dispatcher.add_handler(preference_conversation_handler())\n dispatcher.add_handler(CommandHandler(\"schedule\", schedule))\n dispatcher.add_handler(CommandHandler(\"unschedule\", unschedule))\n dispatcher.add_error_handler(error)\n\n if os.environ.get(\"ENV\") == \"HEROKU\":\n updater.start_webhook(\n listen=\"0.0.0.0\",\n port=int(PORT),\n url_path=TOKEN,\n webhook_url=f\"{HEROKU_URL}/{TOKEN}\",\n )\n else:\n updater.start_polling()\n\n updater.idle()\n","repo_name":"sonaalPradeep/arxiv-telegram-bot","sub_path":"arxiv_telegram_bot/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72237813633","text":"\"\"\"empty message\n\nRevision ID: 7f30c93eb9e2\nRevises: 7b6ade3a0ce9\nCreate Date: 2023-05-10 20:46:02.842541\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7f30c93eb9e2'\ndown_revision = '7b6ade3a0ce9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('group', schema=None) as batch_op:\n batch_op.add_column(sa.Column('start_time', sa.Time(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('group', schema=None) as batch_op:\n batch_op.drop_column('start_time')\n\n # ### end Alembic commands ###\n","repo_name":"Cpierswim/SwimTeamManager","sub_path":"backend/migrations/versions/7f30c93eb9e2_.py","file_name":"7f30c93eb9e2_.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41352690959","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.optimizers import RMSprop\nimport argparse\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport kml_utils\n\nSLASH = 0.2 # percentage of test(validation) data\n\n# parsing arguments\ndef parse_args():\n parser = argparse.ArgumentParser(description='image classifier')\n parser.add_argument('--data', dest='data_dir', default='data')\n parser.add_argument('--list', dest='list_dir', default='list')\n args = parser.parse_args()\n return args\n\nargs = parse_args()\nif kml_utils.exist_list(args.list_dir):\n print('Lists already exist in ./{0}. Use these lists.'.format(args.list_dir))\n classes, train_list, test_list = kml_utils.load_lists(args.list_dir)\nelse:\n print('Lists do not exist. Create list from ./{0}.'.format(args.data_dir))\n classes, train_list, test_list = kml_utils.create_list(args.data_dir, args.list_dir, SLASH)\n\ntrain_image, train_label = kml_utils.load_images(classes, train_list)\ntest_image, test_label = kml_utils.load_images(classes, test_list)\n\n# convert to numpy.array\nx_train = np.asarray(train_image)\ny_train = np.asarray(train_label)\nx_test = np.asarray(test_image)\ny_test = np.asarray(test_label)\n\nprint('train samples: ', len(x_train))\nprint('test samples: ', len(x_test))\n\nNUM_CLASSES = len(classes)\nBATCH_SIZE = 32\nEPOCH = 100\n\n# building the model\nprint('building the model ...')\n\nmodel = Sequential()\n\nmodel.add(Convolution2D(32, 3, 3, border_mode='valid',\n input_shape=x_train.shape[1:]))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(32, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Convolution2D(64, 3, 3, border_mode='valid'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(64, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(256))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(NUM_CLASSES))\nmodel.add(Activation('softmax'))\n\nrmsplop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\nmodel.compile(loss='categorical_crossentropy', optimizer=rmsplop, metrics=['accuracy'])\n\n# training\nhist = model.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n verbose=1,\n nb_epoch=EPOCH,\n validation_data=(x_test, y_test)) \n\n# save model\ndate_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\nmodel.save('kml_' + date_str + '.model')\n\n# plot loss\nprint(hist.history.keys())\nloss = hist.history['loss']\nval_loss = hist.history['val_loss']\nacc = hist.history['acc']\nval_acc = hist.history['val_acc']\n\nnb_epoch = len(loss)\nfig, ax1 = plt.subplots()\nax1.plot(range(nb_epoch), loss, label='loss', color='b')\nax1.plot(range(nb_epoch), val_loss, label='val_loss', color='g')\nleg = plt.legend(loc='upper left', fontsize=10)\nleg.get_frame().set_alpha(0.5)\nax2 = ax1.twinx()\nax2.plot(range(nb_epoch), acc, label='acc', color='r')\nax2.plot(range(nb_epoch), val_acc, label='val_acc', color='m')\nleg = plt.legend(loc='upper right', fontsize=10)\nleg.get_frame().set_alpha(0.5)\nplt.grid()\nplt.xlabel('epoch')\nplt.savefig('graph_' + date_str + '.png')\nplt.show()\n","repo_name":"domkade/kill_me_learning","sub_path":"kml_train.py","file_name":"kml_train.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"13406273012","text":"import glob\nimport os\nimport tarfile\nimport torch.utils.data as data\nfrom PIL import Image\nimport numpy as np\nfrom torchvision.datasets.utils import download_url\n\n\nclass DomainDataset(data.Dataset):\n \"\"\"\n An abstract class for dataset that can be download as a zip file.\n The dataset can be kept in the zip file.\n \n Args:\n data_dir (string): Root directory of dataset where directory exists or will be saved to if download is set to\n True.\n url (str): url to the compressed dataset file\n md5_file (str): md5 checksum of the file\n filename (str): which name to save the downloaded file\n base_folder (str): where the images are located after the extraction\n num_images (int): how many images are located inside the base folder\n transform (callable, optional): A function/transform that takes in an\n PIL image and returns a transformed version. E.g\n ``transforms.RandomCrop``\n download (bool, optional): If true, downloads the dataset from the\n internet and puts it in data_dir directory. If dataset is already\n downloaded, it is not downloaded again.\n is_zip: if `True`, reads the images directly from compressed archive\n \"\"\"\n \n def __init__(self,\n data_dir,\n url=None,\n md5_file=None,\n filename='dataset.tar.gz',\n num_images=10000,\n transform=None,\n download=False,\n is_zip=True):\n\n super().__init__()\n\n self.data_dir = data_dir\n self.transform = transform\n\n self.url = url\n self.filename = filename\n self.zipped_filepath = os.path.join(self.data_dir, self.filename)\n\n self.md5_file = md5_file\n\n self.num_images = num_images\n self.is_zip = is_zip\n self.zfile = None\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted.' + ' You can use download=True to download it')\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n image\n \"\"\"\n\n if self.zfile is None:\n self.zfile = tarfile.open(self.zipped_filepath)\n\n img = self.data[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n if self.is_zip:\n with self.zfile.extractfile(img) as f:\n img = Image.open(f)\n img = img.convert('RGB')\n elif isinstance(img, str):\n img = Image.open(img)\n img = img.convert('RGB')\n else:\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img\n\n def __len__(self):\n return len(self.data)\n\n def _check_integrity(self):\n if self.is_zip and os.path.exists(self.zipped_filepath):\n return True\n\n if not self.is_zip and os.path.isdir(self.data_dir) and ((self.num_images is None or self.data is None)\n or len(self.data) == self.num_images):\n return True\n\n return False\n\n @property\n def data(self):\n if not hasattr(self, '_data'):\n if self.is_zip:\n self._data = list(f.name for f in tarfile.open(self.zipped_filepath, 'r').getmembers() if f.isfile())\n else:\n self._data = [\n f for f in glob.iglob(os.path.join(self.data_dir, '**', '*'), recursive=True)\n if DomainDataset.__is_image_file(f)\n ]\n\n return self._data\n\n def download(self):\n\n if self._check_integrity():\n return\n\n download_url(self.url, self.data_dir, self.filename, self.md5_file)\n\n if not self.is_zip:\n # extract file\n tar = tarfile.open(self.zipped_filepath, 'r')\n tar.extractall(self.data_dir)\n tar.close()\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.data_dir)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n \n @staticmethod\n def __is_image_file(filename):\n \"\"\"Checks if a file is an allowed image extension.\n Args:\n filename (string): path to a file\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)\n","repo_name":"PatrickgHayes/gmm-dnn-for-interpretability","sub_path":"datasets/domain_dataset.py","file_name":"domain_dataset.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39297617035","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \n# Vamos a utilizar las herramientas de Web Scrapping para la siguiente página y poder obtener la lista de los paises Suramericanos y las variables claves de su economía: https://en.wikipedia.org/wiki/South_America\n\n# In[210]:\n\n\n# Importamos librerías\nimport requests\n\n\n# In[211]:\n\n\n# Escribimos la página web que vamos a scrapear\nwebsite_url = 'https://en.wikipedia.org/wiki/South_America'\npage = requests.get(website_url)\n\nprint(page.text)\n\n\n# In[212]:\n\n\n# Hacemos simulación de que alguien entra a la pagina web. Podemos encontrarlo en la pagina: https://developers.whatismybrowser.com/useragents/explore/software_name/chrome/\n# Y pasamos el \"User-agent\" para que pueda simular interacción con la página usando Navegador web y evite que nos bloqueen\ninter = {\"User-agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36'}\n\nanswer = requests.get(website_url, headers=inter)\n\n# Si se obtiene el código de respuesta \"200\", significa que no hemos tenido problema\nprint(answer)\n\n\n# Empezamos leyendo el código fuente de la página web y creando un objeto BeautifulSoup.\n# \n# BeautifulSoup permite crear un árbol de análisis para las páginas que se buscan analizar y que se pueden usar para extraer datos de HTML.\n# \n# La función prettify() en BeautifulSoup nos permitirá ver cómo se estructuran las etiquetas en el documento.\n\n# In[213]:\n\n\nfrom bs4 import BeautifulSoup\n\nsoup = BeautifulSoup(page.content, \"html.parser\")\nprint(soup.prettify())\n\n\n# In[214]:\n\n\ntitle = soup.find(id=\"firstHeading\")\ntitle\n\n\n# In[215]:\n\n\n# Imprimimos el titulo de la página web\nprint(title.text)\n\n\n# In[216]:\n\n\ntables = soup.find_all(\"table\", {\"class\":\"wikitable sortable\"})\n\n\n# In[217]:\n\n\n#En la página web podemos observar que la tabla que se desea extraer la información es la número 8\nmy_table = soup.find_all('table')[8]\nprint(my_table)\n\n\n# In[218]:\n\n\n# Buscamos todos los elementos 'th' en el cuerpo de la tabla, usando find_all():\nth = my_table.find_all(\"th\")\nprint(th)\n\n\n# In[219]:\n\n\n# Extraemos el título de los enlaces, para conocer los nombres de las columnas:\nfor i in range(len(th)):\n link = th[i].find(\"a\")\n\n if link != None:\n print(link.get(\"title\"))\n\n\n# In[220]:\n\n\n# Extraemos solo los valores de la tabla\nmytable=soup.find_all('table')[8]\nrows=mytable.find_all('tr')\nrows=rows[1:-1]\n\nc1=[]\nc2=[]\nc3=[]\nc4=[]\nc5=[]\nc6=[]\nc7=[]\n\nfor row in rows:\n x=row.find_all('td')\n# x=x[1].text\n# c1.append(x[:-1])\n x1=x[0]\n x2=x[1]\n x3=x[2]\n x4=x[3]\n x5=x[4]\n x6=x[5]\n x7=x[6]\n c1.append(x1.text[:-1])\n c2.append(x2.text[:-1])\n c3.append(x3.text[:-1])\n c4.append(x4.text[:-1])\n c5.append(x5.text[:-1])\n c6.append(x6.text[:-1])\n c7.append(x7.text[:-1])\n \nprint(c2)\n \n\n\n# In[221]:\n\n\nimport pandas as pd\n\ndf=pd.DataFrame()\n\ndf['Country']=c1\ndf['GPD_nominal']=c2\ndf['GDP_PPP']=c3\ndf['GDP_PPP_per_capita']=c4\ndf['Merchandise_exports']=c5\ndf['HDI']=c6\ndf['Percent_less_than_2']=c7\n\ndf\n\n\n# In[222]:\n\n\n# Exportamos la tabla a un archivo csv\ndf.to_csv(\"South_America_Economy.csv\", index = False)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"danamejia1810/Web-Scrapping-Economia-Paises-Sudamericanos","sub_path":"Código Práctica Tipología .py","file_name":"Código Práctica Tipología .py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74820125315","text":"# coding=utf-8\n\n\nfrom excel_generator.Style import Style\n# if you need add style for single gird, add it as need_merge cell\nfrom excel_generator.Common import bg_color, alignment, side_style, font_style, number_format\n\n# RSM and SD\n\ncommon1_matrix = [\n [None, None],\n [None, 'Product'],\n ['PCMO', 'WT'],\n ['PCMO', 'Ultra'],\n ['PCMO', 'HX8'],\n ['PCMO', 'HX7'],\n ['PCMO', 'HX6'],\n ['PCMO', 'HX5'],\n ['PCMO', 'HX3'],\n ['PCMO', 'HX2'],\n ['PCMO', 'Other'],\n ['CRTO', 'R6'],\n ['CRTO', 'R5'],\n ['CRTO', 'R4 Plus'],\n ['CRTO', 'R4'],\n ['CRTO', 'R3'],\n ['CRTO', 'R2'],\n ['CRTO', 'Gadus'],\n ['CRTO', 'Spirax'],\n ['CRTO', 'Other'],\n ['Sum Total', None]\n]\n\ncommon1_header_product = ['WT', 'Ultra', 'HX8', 'HX7', 'HX6', 'HX5', 'HX3', 'HX2', 'Other',\n 'R6', 'R5', 'R4 Plus', 'R4', 'R3', 'R2', 'Gadus', 'Spirax', 'Other']\n\ncommon1_need_merge = [\n {'coordinate': [2, 0, 10, 0], 'style': Style(bg_color[4], al=alignment[5])},\n {'coordinate': [11, 0, 19, 0], 'style': Style(bg_color[4], al=alignment[5])},\n {'coordinate': [20, 0, 20, 1],\n 'style': Style(bg_color[4], border=side_style[3], font=font_style[2], al=alignment[5])},\n\n {'coordinate': [1, 1, 1, 1], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [2, 1, 2, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [3, 1, 3, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [4, 1, 4, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [5, 1, 5, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [6, 1, 6, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [7, 1, 7, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [8, 1, 8, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [9, 1, 9, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [10, 1, 10, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [11, 1, 11, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [12, 1, 12, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [13, 1, 13, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [14, 1, 14, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [15, 1, 15, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [16, 1, 16, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [17, 1, 17, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [18, 1, 18, 1], 'style': Style(bg_color[4], al=alignment[1])},\n {'coordinate': [19, 1, 19, 1], 'style': Style(bg_color[4], al=alignment[1])}\n]\n\ncommon_header1 = {'matrix': common1_matrix, 'merge': common1_need_merge, 'product': common1_header_product,\n 'row': 21, 'col': 2, 'owner': 1, 'a_column': 7, 'owner_width': 3, 'follower_width': 6}\n\n# SGM\n\ncommon_header_sgm_1_0 = [\n ['SD', 'RSM', 'Ref Target KL', 'RSM&SD Submitted Target KL', 'Target Volume KL',\n 'Target C3 $', 'Target Proceed $']\n]\n\ncommon_header_sgm_1_0_merge = [\n {'coordinate': [0, 0, 0, 0], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [0, 1, 0, 1], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])}\n]\n\ncommon_header_sgm_1_1 = [\n ['SD', 'RSM', 'Province', 'City', 'LE KL', 'Market size KL(this year)', 'Market Share %',\n 'Market size KL(last year)', 'Market Growth %', 'Platform', 'Market Share Score',\n 'Market Growth Score', 'Platform Score', 'Market Share Score(0.75)',\n 'Market Growth Score(0.15)',\n 'Platform Score(0.1)', 'Total Score', 'Increase %', 'Ref Target KL', 'Target KL']\n]\n\ncommon_header_sgm_1_1_merge = [\n {'coordinate': [0, 0, 0, 0], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [0, 1, 0, 1], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [0, 2, 0, 2], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [0, 3, 0, 3], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])}\n]\n\ncommon_header_sgm_1_1_formula = [0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1]\ncommon_header_sgm_1_1_total = [0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\ncommon_header_sgm_1_1_number = [None, None, None, None, None, None, number_format['percent'], None,\n number_format['percent'], None,\n None, None, None, None, None, None, None, number_format['percent'], None, None]\n\ncommon_header_sgm_2_0 = [\n ['SD', 'RSM', 'Province', 'City', 'Ref Volume KL', 'Ref C3 $', 'Ref Proceed $',\n 'Target Volume KL', 'Target C3 $', 'Target Proceed $']\n]\n\ncommon_header_sgm_2_1 = [\n ['SD', 'RSM', 'Province', 'City', 'UC3 $', 'UC3 $', 'UC3 $', 'UC3 $', 'UC3 $',\n 'UC3 $', 'UC3 $', 'UC3 $', 'UC3 $', 'UNP $', 'UNP $', 'UNP $', 'UNP $', 'UNP $',\n 'UNP $', 'UNP $', 'UNP $', 'UNP $', 'Portfolio %', 'Portfolio %', 'Portfolio %',\n 'Portfolio %', 'Portfolio %', 'Portfolio %', 'Portfolio %', 'Portfolio %', 'Portfolio %'],\n ['SD', 'RSM', 'Province', 'City', 'WT', 'Ultra', 'HX8', 'HX7', 'HX6', 'HX5', 'HX3', 'HX2', 'Other',\n 'WT', 'Ultra', 'HX8', 'HX7', 'HX6', 'HX5', 'HX3', 'HX2', 'Other',\n 'WT', 'Ultra', 'HX8', 'HX7', 'HX6', 'HX5', 'HX3', 'HX2', 'Other']\n]\ncommon_header_sgm_2_1_merge = [\n {'coordinate': [0, 0, 1, 0], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [0, 1, 1, 1], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [0, 2, 1, 2], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [0, 3, 1, 3], 'style': Style(bg_color[4], font=font_style[2], al=alignment[1])},\n {'coordinate': [0, 4, 0, 12], 'style': Style(bg_color[4], font=font_style[2], al=alignment[2])},\n {'coordinate': [0, 13, 1, 21], 'style': Style(bg_color[4], font=font_style[2], al=alignment[2])},\n {'coordinate': [0, 22, 1, 30], 'style': Style(bg_color[4], font=font_style[2], al=alignment[2])}\n]\n\ncommon_header_sgm_1 = {\n 0: {'data': common_header_sgm_1_0, 'scale': [1, 7], 'merge': common_header_sgm_1_0_merge, 'formula': None,\n 'number_format': None},\n 1: {'data': common_header_sgm_1_1, 'scale': [1, 20], 'merge': common_header_sgm_1_1_merge,\n 'formula': common_header_sgm_1_1_formula, 'number_format': common_header_sgm_1_1_number,\n 'total': common_header_sgm_1_1_total}\n}\n\ncommon_header_sgm_2 = {\n 0: {'data': common_header_sgm_2_0, 'scale': [1, 10], 'merge': None, 'formula': None, 'number_format': None},\n 1: {'data': common_header_sgm_2_1, 'scale': [2, 31], 'merge': common_header_sgm_2_1_merge, 'formula': [],\n 'number_format': None}\n}\n\nheader_index = {\n 'RSM': common_header1,\n 'SGM': {1: common_header_sgm_1,\n 2: common_header_sgm_2,\n 3: common_header_sgm_1,\n 4: common_header_sgm_2}\n}\n","repo_name":"intwzt/ShellExcel","sub_path":"template/HeaderTemplate.py","file_name":"HeaderTemplate.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19927543175","text":"from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('sessions', '0001_initial'),\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='article',\n name='owner',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='article',\n name='owner_sessions',\n field=models.ManyToManyField(blank=True, db_table='article_owners', to='sessions.Session'),\n ),\n ]\n","repo_name":"Flaiers/flatype","sub_path":"src/apps/core/migrations/0002_initial.py","file_name":"0002_initial.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"27344817364","text":"import copy\nimport math\nfrom functools import reduce\nfrom model.Cluster import Cluster\nfrom model.PointsWorker import PointsWorker\n\nEXTERNAL_K_MAX = 20\nEXTERNAL_K_MIN = 10\nINTERNAL_K_MAX = 10\nINTERNAL_K_MIN = 4\n\n\nclass UnionClusterizationWorker:\n\n def __init__(self, points):\n self.worker = PointsWorker()\n self.worker.points = points\n\n def make_clusterization(self, k_max, k_min, clusters=None):\n\n k_cluster_dict = {}\n distance_k_dict = {}\n clusters = clusters\n\n for k in range(k_max, k_min - 1, -1):\n print(f\"Получаем {k}-кластаризацию\")\n # получение и разметка кластеров\n clusters = [*self.worker.make_union_clustering(k, clusters)]\n print(f\"Получаем знаки на {k}-кластаризацию\")\n signs = [f\"{i + 1} class\" for i in range(len(clusters))]\n for cl, sign in zip(clusters, signs):\n cl.accept_class_sign(sign)\n\n # оценка кластеризации\n internal = reduce(lambda a, cl: a + cl.get_internal_cluster_distance(), clusters, 0) / len(clusters)\n external = Cluster([cl.get_center() for cl in clusters]).get_internal_cluster_distance()\n k_cluster_dict[k] = copy.deepcopy(clusters)\n distance_k_dict[math.fabs(internal - external)] = k\n\n return k_cluster_dict[distance_k_dict[min(distance_k_dict.keys())]]\n","repo_name":"makdim5/SpaceGeometry3DAnalizer","sub_path":"ML_Union_Algorithm/model/clusterization.py","file_name":"clusterization.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23414918351","text":"#!/bin/python2\n\nimport fileinput\n\ndef read_int(lines, cur):\n return (int(lines[cur]), cur + 1)\n\ndef read_matr(lines, cur):\n matr = [map(int, lines[cur+i].split()) for i in xrange(4)]\n return (matr, cur + 4)\n\nlines = [line for line in fileinput.input()]\n\nT = int(lines[0])\ncur = 1\nfor i in xrange(1, T+1):\n n1, cur = read_int(lines, cur)\n m1, cur = read_matr(lines, cur)\n n2, cur = read_int(lines, cur)\n m2, cur = read_matr(lines, cur)\n\n ans = set(m1[n1-1]).intersection(set(m2[n2-1]))\n ans = list(ans)\n if len(ans) == 1:\n msg = str(ans[0])\n elif len(ans) == 0:\n msg = \"Volunteer cheated!\"\n elif len(ans) > 1:\n msg = \"Bad magician!\"\n\n print (\"Case #%i: \" + msg) % (i,)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2823.py","file_name":"2823.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74628377153","text":"\"\"\"\nThread Pool:\nReusing existing Threads, Because creating threads are expensive.\nAlso, Most of the computer OS caps the number of Thread that they can run.\nAnd program could be crash if you will try to create more number of threads.\n\nWhy Creating Threads are Expensive ?\nA: For that we need to look at the structure of Thread Pool:\n 1. components:\n 1. Work Producers (tasks like Network call, i/o task, db connections, interrupts, rw to a file, etc)\n 2. Job Queue (intermediate b/w producer and pool)\n 3. Thread Pool (list of threads of fixed len)\n 2. lot of calls to OS and OS need to allocate OS and CPU\n\nSo, ThreadPoolExecutor comes to rescue, an approach to keep up the throughput is to create & instantiate\na pool of idle threads beforehand and reuse the threads from this pool until all the threads are exhausted.\nAlso, the pool keeps track and manages the threads lifecycle and schedules them on the programmer’s behalf\nthus making the code much simpler and less buggy.\n\nwe can use 3 methods to spawn threads from ThreadPoolExecutor:\n1. map(fn, *iterables, timeout = None, chunksize = 1)\n2. submit(fn, *args, **kwargs) -> Future:\n3. shutdown(wait = True, *, cancel_futures = False)\n i. It must be called before executor.submit() and executor.map() method else it would throw RuntimeError.\n ii. It signals the executor to free up all resources when the futures are done executing.\n iii. wait=True makes the method not to return until execution of all threads is done and resources are freed up.\n iv. cancel_futures=True then the executor will cancel all the future threads that are yet to start.\n\n\n`workers` are just MAX number of running tasks on parallel threads, hence a thread is nothing but worker !\n\n\"\"\"\n\nimport time\n\nimport logging\nimport random\n\nfrom threading import Thread, get_ident, current_thread\nfrom threading import Timer # utilized to run a code after a specified time period\n\n\n# concurrent is the high level version of Threading to hide all the ugly working of thread details\nfrom concurrent.futures import Future # The upcoming proxy object\nfrom concurrent.futures import ThreadPoolExecutor # the Thread Pool Executor, Python 3.2+\n# from concurrent.futures import ProcessPoolExecutor # the Process Pool Executor\n# from concurrent.futures import as_completed\n\n\n# to see the concept of thread reusing we need to make uneven time period for each tasks\nwait_time = 10\n\n\ndef some_task(item):\n \"\"\"This Function will take 14 sec to complete\"\"\"\n # no_tasks = random.randrange(start=0, stop=10, step=1)\n logging.info(f\"Task: {item} started!\")\n # id of current Thread, is created by OS and id belongs to the worker\n logging.info(f'Thread {item}: id = {get_ident()}')\n logging.info(f'Thread {item}: name = {current_thread().name}')\n logging.info(f'Thread {item}: sleeping for {wait_time}')\n time.sleep(random.randrange(wait_time))\n logging.info(f'Thread {item}: finished')\n\n\n# Main function\ndef main():\n logging.basicConfig(\n format='%(levelname)s - %(asctime)s: %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG\n )\n logging.info('App Start')\n\n cores = 4 # MacBook Pro cores\n workers = 2*cores + 1\n items = 20\n\n # No need to Join the Threads\n # No need to Monitor or Handle the Threads\n # automatically spawn a new worker when there is\n # Said objects use significant amount of memory and for last project uses the large memory.\n # To reduce this memory management overhead (allocating and deallocating many threads)\n with ThreadPoolExecutor(max_workers=workers) as executor:\n executor.map(some_task, range(0, items))\n\n # some of the ids will gets repeated in the terminal that depicts the reuse of Threads\n logging.info('App Finished')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"deepanshumehtaa/Concurrency-Python","sub_path":"tut4_ThreadPoolExecutor_map.py","file_name":"tut4_ThreadPoolExecutor_map.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28034874997","text":"import torch\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nbnmom = 1e-3\n\n\nclass mie(nn.Module):\n\n\tdef __init__(self,modim,tardim,dp):\n\t\t\n\t\tsuper(mie, self).__init__()\n\t\ttctrl = modim[-1]\n\t\tself.modim = modim\n\t\tself.tardim = tardim\n\t\tself.dp = dp\n\n\t\tself.moac = nn.ModuleList([])\n\t\tfor idx in range(len(modim)-1):\n\t\t\tif idx == len(modim)-2:\n\t\t\t\tself.moac.append(nn.Softplus(modim[idx+1]).cuda())\n\t\t\telse:\t\t\t\t\n\t\t\t\tself.moac.append(nn.PReLU(modim[idx+1]).cuda())\n\n\t\tself.xmoac = nn.ModuleList([])\n\t\tfor idx in range(len(modim)-1):\n\t\t\tself.xmoac.append(nn.PReLU(modim[len(modim)-idx-2]).cuda())\n\n\t\tself.ac = nn.ModuleList([])\n\t\tfor idx in range(len(tardim)-1):\n\t\t\tself.ac.append(nn.PReLU(tardim[idx+1]).cuda())\n\n\t\tself.xac = nn.ModuleList([])\n\t\tfor idx in range(len(tardim)-1):\n\t\t\tself.xac.append(nn.PReLU(tardim[len(tardim)-idx-2]).cuda())\n\n\t\tself.net = nn.ModuleList([])\n\t\tfor idx in range(len(tardim)-1):\n\t\t\tx = nn.ModuleList([])\n\t\t\tfor j in range(tctrl):\n\t\t\t\tx.append(nn.Linear(tardim[idx], tardim[idx+1]).cuda())\n\t\t\tself.net.append(x)\n\n\t\tself.xnet = nn.ModuleList([])\n\t\tfor idx in range(len(tardim)-1):\n\t\t\tx = nn.ModuleList([])\n\t\t\tfor j in range(tctrl):\n\t\t\t\tx.append(nn.Linear(tardim[len(tardim)-idx-1], tardim[len(tardim)-idx-2]).cuda())\n\t\t\tself.xnet.append(x)\n\n\t\tself.monet = nn.ModuleList([])\n\t\tfor idx in range(len(modim)-1):\n\t\t\tx = nn.ModuleList([])\n\t\t\tx.append(nn.Linear(modim[idx], modim[idx+1]).cuda())\n\t\t\tself.monet.append(x)\n\n\t\tself.xmonet = nn.ModuleList([])\n\t\tfor idx in range(len(modim)-1):\n\t\t\tx = nn.ModuleList([])\n\t\t\tx.append(nn.Linear(modim[len(modim)-idx-1], modim[len(modim)-idx-2]).cuda())\n\t\t\tself.xmonet.append(x)\n\n\t\t\t\n\t\tself.d = nn.Dropout(p=dp)\n\t\tbnmom=1e-3\n\n\t\tself.bn = nn.ModuleList([])\n\t\tfor idx in range(len(tardim)-1):\n\t\t\tself.bn.append(nn.BatchNorm1d(tardim[idx], affine=True, momentum=bnmom).cuda())\n\n\t\tself.xbn = nn.ModuleList([])\n\t\tfor idx in range(len(tardim)-1):\n\t\t\tself.xbn.append(nn.BatchNorm1d(tardim[len(tardim)-idx-2], affine=True, momentum=bnmom).cuda())\n\n\t\tself.mobn = nn.ModuleList([])\n\t\tfor idx in range(len(modim)-1):\n\t\t\tself.mobn.append(nn.BatchNorm1d(modim[idx], affine=True, momentum=bnmom).cuda())\n\n\t\tself.xmobn = nn.ModuleList([])\n\t\tfor idx in range(len(modim)-1):\n\t\t\tself.xmobn.append(nn.BatchNorm1d(modim[len(modim)-idx-2], affine=True, momentum=bnmom).cuda())\n\n\n\tdef m(self, x0): #x0:bsxdatadim\n\t\t\n\t\tdobn = 1>2\n\n\t\tfor idx in range(len(self.modim)-1):\n\t\t\tif dobn:\n\t\t\t\tx0 = self.mobn[idx](x0)\n\t\t\tx0 = self.monet[idx][0](x0)\n\t\t\tx0 = self.moac[idx](x0)\n\n\t\treturn x0\n\n\tdef xm(self, x0): #x0:bsxdatadim\n\t\t\n\t\tdobn = 1>2\n\n\t\tfor idx in range(len(self.modim)-1):\n\t\t\tx0 = self.xmonet[idx][0](x0)\n\t\t\tx0 = self.xmoac[idx](x0)\n\t\t\tif dobn and idx < len(self.modim)-2:\n\t\t\t\tx0 = self.xmobn[idx](x0)\n\t\treturn x0\n\t\t\n\tdef g(self, x0,weights): #x0:bsxdatadim\n\t\t\n\t\ttctrl = len(weights)\n\t\t\n\t\tdobn = 1>2\n\n\t\tfor idx in range(len(self.tardim)-1):\n\t\t\tif dobn:\n\t\t\t\tx0 = self.bn[idx](x0)\n\t\t\ttx = 0\n\t\t\ttx_test = 0\n\t\t\tw_sum = 0\n\t\t\tfor j in range(tctrl):\n\t\t\t\tv = self.net[idx][j](x0)\n\t\t\t\tw = weights[j]/torch.sum(weights)\n\t\t\t\tw_sum += w\n\t\t\t\twv = w*v\n\t\t\t\ttx_test = tx_test + v\n\t\t\t\ttx = tx + wv\n\t\t\t\t#tx = tx + weights[j]/torch.sum(weights) * self.net[idx][j](x0)\n\t\t\tx0 = tx\n\t\t\tx0 = self.ac[idx](x0)\n\n\t\treturn x0\n\n\tdef h(self, x0, weights): #x0:bsxdatadim\n\n\t\ttemp = int(x0.shape[0])\n\t\tx0 = x0.reshape((1, temp))\n\n\t\ttctrl = len(weights)\n\n\t\tdobn = 1>2\n\n\t\tfor idx in range(len(self.tardim)-1):\n\t\t\ttx = 0\n\t\t\tfor j in range(tctrl):\n\t\t\t\ttx = tx + weights[j]/torch.sum(weights)* self.xnet[idx][j](x0)\n\t\t\tx0 = tx\n\t\t\tx0 = self.xac[idx](x0)\n\t\t\tif dobn and idx < len(self.tardim)-2:\n\t\t\t\tx0 = self.xbn[idx](x0)\n\t\t\t\n\t\treturn x0\n\n\n\n","repo_name":"YuanBoot/Intrinsic_Garment_Space","sub_path":"scripts/mie_model.py","file_name":"mie_model.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":229,"dataset":"github-code","pt":"61"}
+{"seq_id":"5699659606","text":"# Baekjoon Online Judge - 2776번. 암기왕\n\nimport sys\ninput = sys.stdin.readline\n\n\nT = int(input())\n\nfor _ in range(T):\n N = int(input())\n numbers = list(map(int, input().split()))\n numbers.sort()\n M = int(input())\n target = list(map(int, input().split()))\n answer = []\n for num in target:\n left, right = 0, N - 1\n found = False\n while left <= right:\n mid = (left + right) // 2\n if num == numbers[mid]:\n found = True\n break\n # 찾고자 하는 값이 현재 값 보다 작다면 값의 범위를 줄인다.\n if num < numbers[mid]:\n right = mid - 1\n # 찾고자 하는 값이 현재 값 보다 크다면 값의 범위를 늘린다.\n else:\n left = mid + 1\n\n if found:\n answer.append(1)\n else:\n answer.append(0)\n\n for i in answer:\n print(i)\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_2776.py","file_name":"BOJ_2776.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29837844737","text":"import re\nfrom BeautifulSoup import BeautifulSoup\n\nfrom fiftystates.scrape import NoDataForPeriod\nfrom fiftystates.scrape.legislators import Legislator, LegislatorScraper\n\ndef split_name(full_name):\n last_name = full_name.split(',')[0]\n rest = ','.join(full_name.split(',')[1:])\n\n m = re.search('(\\w+)\\s([A-Z])\\.$', rest)\n if m:\n first_name = m.group(1)\n middle_name = m.group(2)\n else:\n first_name = rest\n middle_name = ''\n\n if last_name.endswith(' Jr.'):\n first_name += ' Jr.'\n last_name = last_name.replace(' Jr.', '')\n\n return (first_name.strip(), last_name.strip(), middle_name.strip())\n\nclass KYLegislatorScraper(LegislatorScraper):\n state = 'ky'\n\n def scrape(self, chamber, year):\n if year != '2009':\n raise NoDataForPeriod(year)\n\n if chamber == 'upper':\n leg_list_url = 'http://www.lrc.ky.gov/senate/senmembers.htm'\n else:\n leg_list_url = 'http://www.lrc.ky.gov/house/hsemembers.htm'\n\n with self.urlopen(leg_list_url) as leg_list:\n leg_list = BeautifulSoup(leg_list)\n leg_table = leg_list.find(id=\"table2\")\n\n for row in leg_table.findAll('tr')[1:]:\n leg_link = row.findAll('td')[1].font\n if leg_link: leg_link = leg_link.a\n if not leg_link:\n # Vacant seat\n continue\n\n full_name = leg_link.contents[0].strip()\n\n district = \"\"\n for text in row.findAll('td')[2].findAll(text=True):\n district += text.strip()\n district = district.strip()\n\n self.parse_legislator(chamber, year, full_name,\n district, leg_link['href'])\n\n def parse_legislator(self, chamber, year, full_name, district, url):\n with self.urlopen(url) as leg_page:\n leg_page = BeautifulSoup(leg_page)\n name_str = leg_page.find('strong').contents[0].strip()\n\n if name_str.endswith('(D)'):\n party = 'Democrat'\n elif name_str.endswith('(R)'):\n party = 'Republican'\n elif name_str.endswith('(I)'):\n party = 'Independent'\n else:\n party = 'Other'\n\n full_name = full_name.replace('\\n', '').replace('"', '\"')\n full_name = full_name.replace('\\t', '').replace('\\r', '')\n (first_name, last_name, middle_name) = split_name(full_name)\n\n legislator = Legislator(year, chamber, district, full_name,\n first_name, last_name, middle_name, party)\n legislator.add_source(url)\n\n self.save_legislator(legislator)\n","repo_name":"runderwood/fiftystates","sub_path":"fiftystates/scrape/ky/legislators.py","file_name":"legislators.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"36183420302","text":"import pygame, random\nimport wizards.constants, wizards.monster_ai\n\nclass BaseMonster(pygame.sprite.Sprite):\n def __init__(self, mid, x, y, name, level, m_type):\n super().__init__()\n self.monster_id = mid\n self.x = x\n self.y = y \n self.name = name\n self.level = level\n self.orig_hp = self.get_hp(level)\n self.hp = self.orig_hp \n self.dead = False\n self.weapon = None\n self.current_weapon = None\n self.inventory = []\n self.m_type = m_type\n self.fleeing = False\n self.morale = 6\n self.save_magic = 16\n self.undead = False\n self.never_surrender = False\n self.hit_chance = wizards.bags.NumberBag(1, 20, 2)\n\n self.weight = 0\n\n self.charmable = True\n self.charmed = False\n self.charmed_by = None\n self.charm_duration = 0\n self.charm_gfx = None\n\n self.asleep = False\n self.asleep_for = 0\n\n # TODO Monster AI\n self.ai = None\n #self.ai = self.set_ai(wizards.monster_ai.PassiveAI(coll_map))\n self.level_seen = {}\n self.moved = False\n self.player_seen = False\n\n def get_id(self):\n return self.monster_id\n \n def updatePosition(self,direction,col_map):\n \n new_x = self.x\n new_y = self.y\n if direction == 0:\n new_y = self.y - 1\n elif direction == 1:\n new_x = self.x + 1\n elif direction == 2:\n new_y = self.y + 1\n elif direction == 3:\n new_x = self.x - 1 \n \n if self.is_valid_move(new_x, new_y, col_map):\n self.x = new_x\n self.y = new_y\n \n self.rect.x = self.x * wizards.constants.CHAR_SIZE\n self.rect.y = self.y * wizards.constants.CHAR_SIZE\n\n def __str__(self):\n return self.name + \": \" + str(self.monster_id) + \" >> \"+ str(self.y) + \"_\" + str(self.x)\n\n def __eq__(self, other):\n return self.monster_id == other.monster_id\n\n def __lt__(self, other):\n return self.monster_id < other.monster_id\n\n def __hash__(self):\n return self.monster_id\n\n def set_ai(self, a):\n self.ai = a\n\n def set_position(self, x, y, monster_map):\n\n if (self.x, self.y) in monster_map:\n del monster_map[(self.x, self.y)]\n\n self.x = x\n self.y = y\n monster_map[(self.x, self.y)] = self.monster_id\n self.rect.x = self.x * wizards.constants.CHAR_SIZE\n self.rect.y = self.y * wizards.constants.CHAR_SIZE\n \n def is_valid_move(self, x, y, col_map):\n if col_map[y][x] == 0:\n return True\n else:\n return False\n\n def take_damage(self, dmg):\n self.hp -= dmg\n if self.hp < 1:\n self.dead = True\n\n def get_hp(self, num_of_dice):\n \"\"\"Get initial hitpoints, level * D8\"\"\"\n total = 0\n for i in range(num_of_dice):\n total += random.randrange(1,9)\n return total\n\n def in_panic(self):\n roll = (random.randrange(6) + 1) + (random.randrange(6) + 1)\n if roll > self.morale:\n return True\n else:\n return False\n\n def get_weapon_damage(self):\n if self.current_weapon is not None:\n return self.current_weapon.max_damage\n else:\n return 0\n\n def do_turn(self, player, player_map, collision_map, monster_map, combat_resovler):\n if self.ai is not None:\n self.ai.update(self, player, player_map, collision_map, monster_map, combat_resovler)\n print(self.name + \" \" + str(self.monster_id) + \" has moved\")\n\n def add_item_to_inventory(self, i):\n self.inventory.append(i)","repo_name":"Grufferz/wizards-of-twiddly","sub_path":"wizards/base_monster.py","file_name":"base_monster.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14868653851","text":"import unittest\nfrom montyhall import MontyHall\n\nclass MontyHallTest(unittest.TestCase):\n\n def test_make_choice(self):\n num_doors = 3\n num_rounds = 1000\n one_third = float(1) / 3\n two_thirds = float(2) / 3\n accepted_delta = 0.05\n\n correct_guesses_initial = [MontyHall(num_doors).make_choice(keep_initial_choice=True) for i in range(num_rounds)].count(True)\n correct_guesses_changed = [MontyHall(num_doors).make_choice(keep_initial_choice=False) for i in range(num_rounds)].count(True)\n\n # Keeping initial guess should give us 1/3 correct correct guesses\n self.assertAlmostEqual(correct_guesses_initial, one_third * num_rounds, delta=accepted_delta * num_rounds)\n # Changing initial guess should give us 2/3 correct correct guesses\n self.assertAlmostEqual(correct_guesses_changed, two_thirds * num_rounds, delta=accepted_delta * num_rounds)\n\nif __name__ == '__main__':\n unittest.main()\n ","repo_name":"fsto/pyMontyHall","sub_path":"test_montyhall.py","file_name":"test_montyhall.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24974687886","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# @FileName : demo_2_4.py\n# @Time : 2022/9/26 19:28\n# @Author : liang.lian\nimport re\n\ntext1 = \"11/27/2012\"\ntext2 = \"Nov 27, 2012\"\ntext3 = 'Today is 11/27/2012. PyCon starts 3/13/2013.'\n\ndatepat = re.compile(r'\\d+/\\d+/\\d+')\n\nif datepat.match(text1):\n print('yes')\nelse:\n print('no')\n\nif datepat.match(text2):\n print('yes')\nelse:\n print('no')\n\ndatepat1 = re.compile(r'(\\d+)(/\\d+)(/\\d+)')\nprint(datepat1.findall(text3))\n\nfor m in datepat1.finditer(text3):\n print(m.groups())","repo_name":"comeonlian/code-dev","sub_path":"python-cookbook/chapter02/demo_2_4.py","file_name":"demo_2_4.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"75144368834","text":"import cv2\nimport os\n\n#Initialize camera\ncam1 = cv2.VideoCapture(0);\ncam2 = cv2.VideoCapture(0);\n\nret1, image1 = cam1.read();\nret2, image2 = cam2.read();\n\nif ret1:\n os.system('mkdir ./pics/')\n cv2.imwrite(\"./pics/ex1.jpg\", image1)\n\nif ret2:\n os.system('mkdir ./pics/')\n cv2.imwrite(\"./pics/ex2.jpg\", image2)\n\ncam1.release()\ncam2.release()","repo_name":"RexGoliath1/Argyle","sub_path":"examples/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"75237175554","text":"import yaml\nimport os\nfrom utils import recreate_dirs\n\n\nclass Config:\n\n def __init__(self, cfg_id, create_dirs=False):\n self.id = cfg_id\n cfg_name = 'config/statereg/%s.yml' % cfg_id\n if not os.path.exists(cfg_name):\n print(\"Config file doesn't exist: %s\" % cfg_name)\n exit(0)\n cfg = yaml.safe_load(open(cfg_name, 'r'))\n\n # create dirs\n self.base_dir = 'results'\n self.cfg_dir = '%s/statereg/%s' % (self.base_dir, cfg_id)\n self.model_dir = '%s/models' % self.cfg_dir\n self.result_dir = '%s/results' % self.cfg_dir\n self.log_dir = '%s/log' % self.cfg_dir\n self.tb_dir = '%s/tb' % self.cfg_dir\n os.makedirs(self.model_dir, exist_ok=True)\n os.makedirs(self.result_dir, exist_ok=True)\n if create_dirs:\n recreate_dirs(self.log_dir, self.tb_dir)\n\n # training config\n self.meta_id = cfg['meta_id']\n self.seed = cfg['seed']\n self.fr_num = cfg['fr_num']\n self.v_net = cfg.get('v_net', 'lstm')\n self.v_net_param = cfg.get('v_net_param', None)\n self.v_hdim = cfg['v_hdim']\n self.mlp_dim = cfg['mlp_dim']\n self.cnn_fdim = cfg['cnn_fdim']\n self.lr = cfg['lr']\n self.num_epoch = cfg['num_epoch']\n self.iter_method = cfg['iter_method']\n self.shuffle = cfg.get('shuffle', False)\n self.num_sample = cfg.get('num_sample', 20000)\n self.save_model_interval = cfg['save_model_interval']\n self.fr_margin = cfg['fr_margin']\n self.pose_only = cfg.get('pose_only', False)\n self.causal = cfg.get('causal', False)\n self.cnn_type = cfg.get('cnn_type', 'mlp')\n\n # misc config\n self.humanoid_model = cfg['humanoid_model']\n self.vis_model = cfg['vis_model']\n","repo_name":"Garfield-kh/PoseTriplet","sub_path":"imitator/pose_imitation/utils/statereg_config.py","file_name":"statereg_config.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":293,"dataset":"github-code","pt":"61"}
+{"seq_id":"24798336808","text":"from contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom typing import Iterable, Iterator, List, Optional, Sequence, Collection, TypeVar, Union\n\nfrom rich.live import Live\nfrom rich.progress import Progress, BarColumn, TimeElapsedColumn, TimeRemainingColumn, TextColumn\nfrom rich.traceback import install\ninstall(show_locals=False)\n\nV = TypeVar('V')\n\n@dataclass\nclass Bar:\n progress: Progress\n job_id: int\n\n def iter(\n self,\n it: Iterable[V],\n description: str = \"\") -> Iterator[V]:\n try:\n total = len(it)\n except:\n total = -1\n\n self.progress.reset(self.job_id)\n self.progress.update(\n self.job_id,\n total=total if total > 0 else 3,\n description=description)\n if total < 0:\n self.progress.advance(self.job_id)\n for i in it:\n yield i\n if total > 0:\n self.progress.advance(self.job_id)\n\n def range(\n self,\n *args,\n description=\"\"):\n for arg in args:\n assert not isinstance(arg, str), f\"'{arg}' is not int, it's maybe description\"\n return self.iter(\n range(*args),\n description=description)\n\n def update(self, description: str = None):\n self.progress.update(self.job_id, description=description)\n\n\n@contextmanager\ndef progress_bar(num: int = 1, refresh_hz=1):\n job_progress = Progress(\n \"{task.description}\",\n TimeElapsedColumn(),\n BarColumn(),\n TextColumn(\"[progress.percentage]{task.percentage:>3.0f}%\"),\n TimeRemainingColumn()\n )\n\n bars = tuple(Bar(job_progress, job_progress.add_task(\"\"))\n for _ in range(num))\n\n with Live(job_progress, refresh_per_second=refresh_hz):\n yield bars\n","repo_name":"liyihc/clad","sub_path":"clad/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41787239065","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport traceback\nimport math\nfrom scrapy.conf import settings\n\nfrom product_ranking.guess_brand import guess_brand_from_first_words\nfrom product_ranking.items import Price, SiteProductItem\nfrom product_ranking.spiders import BaseProductsSpider, cond_set_value\nfrom product_ranking.powerreviews import parse_powerreviews_buyer_reviews\n\nfrom scrapy import Request\nfrom scrapy.log import DEBUG\n\n\nclass BJSProductsSpider(BaseProductsSpider):\n name = 'bjs_products'\n allowed_domains = ['bjs.com', 'bjswholesale-cors.groupbycloud.com', 'readservices-b2c.powerreviews.com']\n\n SEARCH_URL = \"https://bjswholesale-cors.groupbycloud.com/api/v1/search\"\n PRODUCT_URL = \"https://api.bjs.com/digital/live/api/v1.0/pdp/10201?productId={product_id}&pageName=PDP&clubId=0096\"\n REVIEW_URL = \"http://readservices-b2c.powerreviews.com/m/9794/l/en_US/product/{part_num}/reviews?\"\n\n payload = {\n \"area\": \"BCProduction\",\n \"biasing\": {\"biases\": []},\n \"collection\": \"productionB2CProducts\",\n \"excludedNavigations\": ['visualVariant.nonvisualVariant.availability'],\n \"fields\": ['*'],\n \"pageSize\": 40,\n \"query\": \"\",\n \"refinements\": [],\n \"skip\": 0,\n \"sort\": {\n \"field\": \"_relevance\",\n \"order\": \"Descending\"\n }\n }\n\n headers = {\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Content-Type': 'application/json',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'\n }\n\n def __init__(self, *args, **kwargs):\n self.total_matches = None\n super(BJSProductsSpider, self).__init__(\n site_name=self.allowed_domains[0],\n *args,\n **kwargs)\n self.user_agent = 'Slackbot-LinkExpanding 1.0 (+https://api.slack.com/robots)'\n\n settings.overrides['DOWNLOADER_CLIENTCONTEXTFACTORY'] = 'product_ranking.utils.TLSFlexibleContextFactory'\n\n def start_requests(self):\n for request in super(BJSProductsSpider, self).start_requests():\n if not self.product_url:\n data = self.payload.copy()\n data['query'] = self.searchterms[0]\n data['skip'] = 0\n\n request = request.replace(url=self.SEARCH_URL, method=\"POST\", body=json.dumps(data),\n headers=self.headers,\n meta={'search_term': self.searchterms[0], 'remaining': self.quantity})\n if self.product_url:\n prod = SiteProductItem()\n prod['is_single_result'] = True\n prod['url'] = self.product_url\n prod['search_term'] = ''\n\n product_id = self.product_url.split('/')[-1]\n url = self.PRODUCT_URL.format(product_id=product_id)\n request = request.replace(url=url, callback=self._parse_single_product, meta={'product': prod})\n\n yield request\n\n def _parse_single_product(self, response):\n return self.parse_product(response)\n\n def _scrape_total_matches(self, response):\n if self.total_matches:\n return self.total_matches\n try:\n contents = json.loads(response.body)\n self.total_matches = int(contents.get('totalRecordCount'))\n return self.total_matches\n except Exception as e:\n self.log(\"Exception looking for total_matches {}\".format(e), DEBUG)\n finally:\n self.total_matches = 0\n\n def _scrape_next_results_page_link(self, response):\n meta = response.meta\n current_page = meta.get('current_page', 1)\n total_matches = self._scrape_total_matches(response)\n results_per_page = self._scrape_results_per_page(response)\n if not results_per_page:\n results_per_page = 40\n if total_matches and current_page < math.ceil(total_matches / float(results_per_page)):\n current_page += 1\n st = response.meta['search_term']\n data = self.payload.copy()\n data['query'] = st\n data['skip'] = (current_page - 1) * 40\n meta['current_page'] = current_page\n return Request(\n url=self.SEARCH_URL, method=\"POST\", body=json.dumps(data), headers=self.headers, meta=meta)\n\n def _scrape_product_links(self, response):\n links = []\n try:\n contents = json.loads(response.body)\n for record in contents.get('records', []):\n link = 'https://www.bjs.com' + record.get('allMeta', {}).get('visualVariant')[0].get('nonvisualVariant', [])[0].get('product_url')\n links.append(link)\n except Exception as e:\n self.log(\"Exception looking for product links {}\".format(e), DEBUG)\n finally:\n for link in links:\n prod = SiteProductItem()\n prod['url'] = link\n prod_id = link.split('/')[-1]\n link = self.PRODUCT_URL.format(product_id=prod_id)\n yield link, prod\n\n @staticmethod\n def _parse_title(data):\n title = data.get('description', {}).get('name')\n return title\n\n def _parse_price(self, data):\n price = data.get('maximumItemPrice', {})\n if not price:\n price = data.get('bjsClubProduct', [{}])[0].get('clubItemStandardPrice', {})\n try:\n return Price(price=float(price.get('amount')), priceCurrency='USD') if price else None\n except:\n self.log('Error Parsing Price: {}'.format(traceback.format_exc()))\n\n @staticmethod\n def _parse_image(data):\n images = data.get('productImages', {}).get('fullImage')\n return images\n\n def _parse_categories(self, data):\n category_list = []\n try:\n categories_info = data.get('breadCrumbDetail')\n category_level = categories_info.get('Levels')\n for index in range(1, category_level + 1):\n category = categories_info.get('Level{}'.format(index)).split('||')[-1]\n category_list.append(category)\n return category_list\n except:\n self.log(\"Error while parsing categories {}\".format(traceback.format_exc()))\n\n @staticmethod\n def _search_attribute(attribute_name, data):\n if data.get('descriptiveAttributes'):\n for attr in data.get('descriptiveAttributes'):\n if attr.get('name') == attribute_name:\n return attr.get('attributeValueDataBeans', [{}])[0].get('value')\n\n def parse_product(self, response):\n meta = response.meta.copy()\n product = meta['product']\n\n try:\n data = json.loads(response.body_as_unicode())\n except:\n self.log('JSON not found or invalid JSON: {}'\n .format(traceback.format_exc()))\n product['not_found'] = True\n return product\n\n title = self._parse_title(data)\n if title is None:\n product[\"no_longer_available\"] = True\n return product\n cond_set_value(product, 'title', title)\n\n price = self._parse_price(data)\n cond_set_value(product, 'price', price)\n\n image_url = self._parse_image(data)\n cond_set_value(product, 'image_url', image_url)\n\n brand = guess_brand_from_first_words(product['title'])\n cond_set_value(product, 'brand', brand)\n\n if data.get('bjsitems', []):\n sku = data.get('bjsitems', [])[0].get('articleId')\n cond_set_value(product, 'sku', sku)\n cond_set_value(product, 'reseller_id', sku)\n\n model = data.get('manufacturerPartNumber')\n cond_set_value(product, 'model', model)\n\n upc = self._search_attribute('upc', data)\n cond_set_value(product, 'upc', upc)\n\n categories = self._parse_categories(data)\n cond_set_value(product, 'categories', categories)\n\n if categories:\n cond_set_value(product, 'department', categories[-1])\n\n # Available Online: 1 or 0 (1 = yes, 0 = no)\n if data.get('bjsClubProduct', []):\n online_avail = data.get('bjsClubProduct', [])[0].get('itemAvailableOnline', 'N')\n product['available_online'] = 1 if online_avail == 'Y' else 0\n\n # Available In-club(store): 1 or 0 (1 = yes, 0 = no)\n if data.get('bjsClubProduct', []):\n club_avail = data.get('bjsClubProduct', [])[0].get('itemAvailableInClub', 'N')\n product['available_store'] = 1 if club_avail == 'Y' else 0\n\n product['is_out_of_stock'] = str(data.get('description', {}).get('available')) == '0'\n\n product['is_in_store_only'] = str(product.get('available_online', None)) == '0' and str(\n product.get('available_store', None)) == '1'\n\n product['locale'] = \"en-US\"\n\n part_number = data.get('partNumber')\n\n if part_number:\n url = self.REVIEW_URL.format(part_num=part_number)\n return Request(url=url,\n callback=self._parse_reviews,\n meta={'product': product},\n headers={'authorization': '7c12e7e9-fe30-4e7a-bcb8-8376b9117a6b'},\n dont_filter=True)\n\n return product\n\n @staticmethod\n def _parse_reviews(response):\n meta = response.meta\n product = meta.get('product')\n cond_set_value(product, 'buyer_reviews', parse_powerreviews_buyer_reviews(response))\n\n return product\n","repo_name":"aprosdev/ecom-predictor","sub_path":"product-ranking/product_ranking/spiders/bjs.py","file_name":"bjs.py","file_ext":"py","file_size_in_byte":9618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"22518833564","text":"import argparse\nimport sys\nfrom peheader import *\n\ndef start():\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--version', action='version', version='version 0.0.1')\n parser.add_argument('-i', '--info',action=\"store_true\", help='PE文件头信息')\n parser.add_argument('filename')\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n args = start()\n if args.info or (len(sys.argv) == 2 and args.filename != None):\n d = {}\n r = open(args.filename,'rb')\n dosheader = r.read(0x40)\n ImageDosHeader = ImageDosHeader(dosheader)\n r.seek(ImageDosHeader.PEoffser(),0)\n ntheader = r.read(0xf0)\n ImageNtHeader = ImageNtHeader(ntheader)\n ImageNtHeader.show()","repo_name":"yifeng-lee/PEParser","sub_path":"PEparser.py","file_name":"PEparser.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74203215233","text":"#!/usr/local/bin/python3\n\n\ndef tag_bloco(conteudo, *args, classe='success', inline=False):\n tag = 'span' if inline else 'div'\n conteudo = conteudo if not callable(conteudo) else conteudo(*args)\n return f'<{tag} class={classe}>{conteudo}{tag}>'\n\n\ndef tag_lista(*itens):\n lista = ''.join(f'
{item} ' for item in itens)\n return f''\n\n\nif __name__ == '__main__':\n print(tag_bloco('teste1'))\n print(tag_bloco('teste2', inline=True))\n print(tag_bloco('teste3', classe='danger'))\n print(tag_lista('teste1', 'teste2'))\n print(tag_bloco(tag_lista, 'teste1', 'teste2', classe='danger'))\n","repo_name":"flaviogf/courses","sub_path":"coders/curso_python/funcoes/v4.py","file_name":"v4.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"fr","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"34564710118","text":"import argparse\r\nimport os\r\nfrom dartel_pipeline import batched_spm12_dartel\r\n\r\n\r\nif __name__=='__main__':\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--num_encoding_layers', type = int, default = 2, help = 'keep the default setting')\r\n parser.add_argument('--num_filters', type = int, default = 64, help = 'keep the default setting')\r\n parser.add_argument('--num_subjects', type = int, default = 2, help = 'keep the default setting')\r\n parser.add_argument('--num_voxels_per_subject', type = int , default = 1, help = 'keep the default setting')\r\n parser.add_argument('--filepath_csv', type = str, help = 'the location of the .csv file containing the meta-data assocated to the dataset in cause')\t\r\n parser.add_argument('--dirpath_raw_data', type = str, help = 'the location of the directory containing the raw T1 nifti files')\r\n parser.add_argument('--dataset_name', type = str, help = 'the name of the dataset in cause, it will influence where the results are written')\r\n parser.add_argument('--size_batch_preprocessing', type = int, help = 'how many nifti files to process at the same time')\r\n args = parser.parse_args()\r\n\r\n ##### spm12 pre-processing #####\r\n batched_spm12_dartel(img_dir = args.dirpath_raw_data, name_of_dataset = args.dataset_name, size_batch = args.size_batch_preprocessing)\r\n\r\n\r\n ##### getting LocalBrainAge predictions #####\r\n cmd = 'python3 ./LocalBrainAge_testing.py --filepath_csv='+str(args.filepath_csv)+' --dirpath_gm='+str(args.dirpath_raw_data)+'/gm_data --dirpath_wm='+str(args.dirpath_raw_data)+'/wm_data --dataset_name='+str(args.dataset_name)","repo_name":"SebastianPopescu/U-NET-for-LocalBrainAge-prediction","sub_path":"full_testing_script.py","file_name":"full_testing_script.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"}
+{"seq_id":"32759738878","text":"##parameters=datastructure\n\"\"\"\nDo the necessary rendering or redirection after an entry has been\nsuccessfully created and filled with the initial values by the user.\n\nThe context is the directory.\n\nMay return a rendered document, or do a redirect.\n\"\"\"\n\nfrom urllib import urlencode\n\ndirname = context.getId()\nid_field = context.id_field\nid = datastructure.getDataModel()[id_field]\n\nportal_url = context.portal_url()\nargs = urlencode({'dirname': dirname,\n 'id': id,\n 'portal_status_message': 'psm_entry_created',\n })\naction_path = 'taskdirectory_entry_edit_form?'+args\ncontext.REQUEST.RESPONSE.redirect('%s/%s' % (portal_url, action_path))\n","repo_name":"nuxeo-cps/products--CPSTaskManager","sub_path":"skins/task_directory/taskdirectory_entry_created.py","file_name":"taskdirectory_entry_created.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21872380766","text":"#Document Enhancer\r\n\r\nimport docx\r\nfrom docx import Document\r\nfrom docx.shared import Inches\r\nfrom docx.shared import Pt\r\nimport random\r\nfrom docx.shared import RGBColor\r\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\r\nfrom docx.enum.style import WD_STYLE_TYPE\r\nimport re\r\n\r\nimport os\r\nimport sys\r\nimport torch\r\nimport argparse\r\nimport numpy as np\r\nfrom gpt2Pytorch.GPT2.model import (GPT2LMHeadModel)\r\nfrom gpt2Pytorch.GPT2.utils import load_weight\r\nfrom gpt2Pytorch.GPT2.config import GPT2Config\r\nfrom gpt2Pytorch.GPT2.sample import sample_sequence\r\nfrom gpt2Pytorch.GPT2.encoder import get_encoder\r\n\r\nfrom gpt2Pytorch.mainLib import *\r\n\r\n\r\ndocument = Document(\"test.docx\")\r\ndocument = Document()\r\n\r\ndef formatStyles():\r\n style = document.styles['Normal']\r\n font = style.font\r\n font.name = 'Times New Roman'\r\n font.size = Pt(12)\r\n font.color.rgb = RGBColor(0,0,0)\r\n font.underline = False\r\n style.paragraph_format.line_spacing = 2\r\n\r\n styles = document.styles\r\n styles['Title'].delete()\r\n style = styles.add_style('Title', WD_STYLE_TYPE.PARAGRAPH)\r\n\r\n style = document.styles['Title']\r\n font = style.font\r\n font.name = 'Times New Roman'\r\n font.size = Pt(20)\r\n font.color.rgb = RGBColor(0,0,0)\r\n font.underline = False\r\n style.paragraph_format.line_spacing = 2\r\n\r\n\r\ndef addheader():\r\n section = document.sections[0]\r\n header = section.header\r\n head = header.paragraphs[0]\r\n list = readParagraph(0)\r\n head.text = list.split( )[1]\r\n head.alignment = WD_ALIGN_PARAGRAPH.RIGHT\r\n\r\n\r\ndef addheading():\r\n list = readHeading()\r\n heading = document.add_paragraph(list[0])\r\n heading = document.add_paragraph(list[1])\r\n heading = document.add_paragraph(headingDate(list[2]))\r\n heading = document.add_paragraph(list[3])\r\n\r\n heading.alignment = WD_ALIGN_PARAGRAPH.LEFT\r\n heading.paragraph_format.line_spacing = 2\r\n\r\n\r\ndef addtitle():\r\n title = document.add_paragraph(readParagraph(4))\r\n title.alignment = WD_ALIGN_PARAGRAPH.CENTER\r\n\r\n\r\ndef addbody():\r\n list = readBody()\r\n for i in list:\r\n paragraph = document.add_paragraph('\\t' + i)\r\n for paragraph_text in AIconverter(readParagraph(5)).split('\\n\\n'):\r\n paragraph = document.add_paragraph(\"\\t\"+paragraph_text.strip())\r\n\r\n paragraph_format = paragraph.paragraph_format\r\n paragraph_format.line_spacing = 2\r\n\r\n\r\ndef readFile():\r\n file = open(\"test.txt\", \"r\")\r\n content = file.read()\r\n\r\n\r\ndef readHeading():\r\n doc = docx.Document('test.docx')\r\n headingList = [doc.paragraphs[0].text, doc.paragraphs[1].text, doc.paragraphs[2].text, doc.paragraphs[3].text]\r\n return headingList;\r\n\r\n\r\ndef readParagraph(paragraph):\r\n doc = docx.Document('test.docx')\r\n return doc.paragraphs[paragraph].text\r\n\r\n\r\ndef headingDate(date):\r\n try:\r\n day = re.search(\"([^\\d])([0-2]|[0-2][0-9])([^\\d])\" , \" \"+date+\" \")\r\n print(day.group())\r\n year = re.search(\"[2-9][0-9][0-9][0-9]\" , date)\r\n print(year.group())\r\n month = re.search(\"[^\\s\\d][^\\s\\d][^\\s\\d]\" , date)\r\n print(month.group())\r\n newdate=day.group()[1:-1]+\" \"+month.group().capitalize()+\". \"+year.group()\r\n print(newdate)\r\n except:\r\n print(\"error\")\r\n newdate=date\r\n return newdate\r\n\r\n\r\ndef readBody():\r\n doc = docx.Document('test.docx')\r\n list = []\r\n i=len(doc.paragraphs)-1\r\n while i < len(doc.paragraphs):\r\n list.append(doc.paragraphs[i].text)\r\n i+=1\r\n return list\r\n\r\n\r\n\r\n\r\n#MAIN\r\ndef main():\r\n formatStyles()\r\n addheader()\r\n addheading()\r\n addtitle()\r\n addbody()\r\n document.save('main2.docx')\r\n\r\nmain()\r\n\r\n\r\n\r\n\r\n\r\ndocumentold = Document()\r\nparagraph = 0\r\nword = 0\r\n\r\n#returns true if a word is ignorable and false if important\r\ndef ignorable(word):\r\n ignore = [\"The\", \"the\", \"To\", \"to\", \"Of\", \"of\", \"Be\", \"be\", \"and\", \"A\", \"a\", \"That\", \"that\", \"Have\", \"have\", \"I\",\r\n \"It\", \"it\", \"For\", \"for\", \"Not\", \"not\", \"With\", \"with\", \"You\", \"you\", \"As\", \"as\", \"Do\", \"do\", \"At\", \"at\"\r\n \"This\", \"this\", \"By\", \"by\", \"or\", \"An\", \"an\", \"From\", \"from\", \"Will\", \"will\", \"Is\", \"is\"]\r\n for x in range( 0, len(ignore) ):\r\n if(word == ignore[x]):\r\n return True\r\n return False\r\n\r\n#takes in a string and returns a dictionary on the word count of each word\r\ndef getRepetitive( text ):\r\n unique = {}\r\n\r\n for word in text:\r\n if ignorable(word) == False:\r\n if len(unique) == 0:\r\n unique[word] = 1\r\n else:\r\n for y in list(unique):\r\n if(word == y):\r\n unique[word] += 1\r\n break\r\n unique[word] = 1\r\n return unique\r\n","repo_name":"sethrodg/doc-enhancer","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"2077531569","text":"# multiAgents.py\n# --------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\nimport sys\nfrom util import manhattanDistance\nfrom game import Directions\nfrom game import Actions\nimport random, util\n\nfrom game import Agent\n\ndef closestFoodOrComputeDistance(curP, destP, gameState):\n if curP == destP: return None, 0\n walls = gameState.getWalls()\n foodList = gameState.getFood().asList()\n count = 0\n points = [curP,]\n queue = util.Queue()\n queue.push(curP)\n while not queue.isEmpty():\n count += 1\n qsize = len(queue.list)\n for i in range(qsize):\n temp = queue.pop()\n for direction in [Directions.WEST, Directions.NORTH, Directions.EAST, Directions.SOUTH]:\n x,y = temp\n dx, dy = Actions.directionToVector(direction)\n nextx, nexty = int(x + dx), int(y + dy)\n if not walls[nextx][nexty]:\n if (nextx, nexty) not in points:\n queue.push((nextx, nexty))\n points.append((nextx, nexty))\n \n if destP == None:\n if (nextx, nexty) in foodList:\n return (nextx, nexty), count\n elif (nextx, nexty) == destP:\n return None, count\n return None, None\n\nclass ReflexAgent(Agent):\n \"\"\"\n A reflex agent chooses an action at each choice point by examining\n its alternatives via a state evaluation function.\n\n The code below is provided as a guide. You are welcome to change\n it in any way you see fit, so long as you don't touch our method\n headers.\n \"\"\"\n\n\n def getAction(self, gameState):\n \"\"\"\n You do not need to change this method, but you're welcome to.\n\n getAction chooses among the best options according to the evaluation function.\n\n Just like in the previous project, getAction takes a GameState and returns\n some Directions.X for some X in the set {North, South, West, East, Stop}\n \"\"\"\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n\n \"Add more of your code here if you want to\"\n return legalMoves[chosenIndex]\n\n def evaluationFunction(self, currentGameState, action):\n \"\"\"\n Design a better evaluation function here.\n\n The evaluation function takes in the current and proposed successor\n GameStates (pacman.py) and returns a number, where higher numbers are better.\n\n The code below extracts some useful information from the state, like the\n remaining food (newFood) and Pacman position after moving (newPos).\n newScaredTimes holds the number of moves that each ghost will remain\n scared because of Pacman having eaten a power pellet.\n\n Print out these variables to see what you're getting, then combine them\n to create a masterful evaluation function.\n \"\"\"\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n return successorGameState.getScore()\n\ndef scoreEvaluationFunction(currentGameState):\n \"\"\"\n This default evaluation function just returns the score of the state.\n The score is the same one displayed in the Pacman GUI.\n\n This evaluation function is meant for use with adversarial search agents\n (not reflex agents).\n \"\"\"\n return currentGameState.getScore()\n\nclass MultiAgentSearchAgent(Agent):\n \"\"\"\n This class provides some common elements to all of your\n multi-agent searchers. Any methods defined here will be available\n to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.\n\n You *do not* need to make any changes here, but you can if you want to\n add functionality to all your adversarial search agents. Please do not\n remove anything, however.\n\n Note: this is an abstract class: one that should not be instantiated. It's\n only partially specified, and designed to be extended. Agent (game.py)\n is another abstract class.\n \"\"\"\n\n def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):\n self.index = 0 # Pacman is always agent index 0\n self.evaluationFunction = util.lookup(evalFn, globals())\n self.depth = int(depth)\n\nclass MinimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent (question 2)\n \"\"\"\n def maxVal(self,state,agent,depth): #pacman's turn get the max val\n val = -999999\n actions = state.getLegalActions(agent)\n if Directions.STOP in actions:\n actions.remove(Directions.STOP)\n for action in actions:\n val = max(val,self.minimax(state.generateSuccessor(agent,action),agent+1,depth+1))\n return val\n\n def minVal(self,state,agent,depth): #ghosts' turn to get the min val\n val = 999999\n for action in state.getLegalActions(agent):\n val = min(val,self.minimax(state.generateSuccessor(agent,action),agent+1,depth+1))\n return val\n\n def minimax(self,gameState,agent,depth):\n score = 0\n if agent == self.agentCount: ##it's pacman's turn\n agent = self.index\n if depth == self.depth*self.agentCount or gameState.isWin() or gameState.isLose():## finish?\n score = self.evaluationFunction(gameState)\n elif agent == self.index: ## judge again, is pacman's turn?\n score = self.maxVal(gameState,agent,depth)\n else: ##ghosts's turn\n score = self.minVal(gameState,agent,depth)\n return score\n\n def getAction(self, gameState):\n \"\"\"\n Returns the minimax action from the current gameState using self.depth\n and self.evaluationFunction.\n\n Here are some method calls that might be useful when implementing minimax.\n\n gameState.getLegalActions(agentIndex):\n Returns a list of legal actions for an agent\n agentIndex=0 means Pacman, ghosts are >= 1\n\n gameState.generateSuccessor(agentIndex, action):\n Returns the successor game state after an agent takes an action\n\n gameState.getNumAgents():\n Returns the total number of agents in the game\n \"\"\"\n depth = 0\n agentIndex = self.index\n Dict = {}\n self.agentCount = gameState.getNumAgents()\n actions = gameState.getLegalActions(agentIndex)\n if Directions.STOP in actions:\n actions.remove(Directions.STOP)\n for action in actions:\n eval_f = self.minimax(gameState.generateSuccessor(agentIndex,action),agentIndex+1,depth+1)\n Dict[eval_f] = action\n choices = Dict[max(Dict)]\n return choices\n\n\n# class MinimaxAgent(MultiAgentSearchAgent):\n# \"\"\"\n# Your minimax agent (question 2)\n# \"\"\"\n\n# def getAction(self, gameState):\n# \"\"\"\n# Returns the minimax action from the current gameState using self.depth\n# and self.evaluationFunction.\n\n# Here are some method calls that might be useful when implementing minimax.\n\n# gameState.getLegalActions(agentIndex):\n# Returns a list of legal actions for an agent\n# agentIndex=0 means Pacman, ghosts are >= 1\n\n# gameState.generateSuccessor(agentIndex, action):\n# Returns the successor game state after an agent takes an action\n\n# gameState.getNumAgents():\n# Returns the total number of agents in the game\n# \"\"\"\n# \"*** YOUR CODE HERE ***\"\n# # return self.dfMiniMaxSearch(self.depth, gameState, True)[1]\n\n# def dfMiniMax(depth, curState, agentId):\n# if depth == 0 or curState.isWin() or curState.isLose():\n# return self.evaluationFunction(curState)\n\n# legalActions = curState.getLegalActions(agentId)\n# if agentId == 0:\n# maxScore = -sys.maxint\n# for action in legalActions:\n# nextState = curState.generateSuccessor(agentId, action)\n# maxScore = max(maxScore, dfMiniMax(depth, nextState, agentId+1))\n# return maxScore\n# else :\n# minScore = sys.maxint\n# for action in legalActions:\n# nextState = curState.generateSuccessor(agentId, action)\n# if agentId+1 == curState.getNumAgents():\n# minScore = min(minScore, dfMiniMax(depth-1, nextState, 0))\n# else: minScore = min(minScore, dfMiniMax(depth, nextState, agentId+1))\n# return minScore\n\n# legalActions = gameState.getLegalActions(0)\n# childList = [gameState.generateSuccessor(0, action) for action in legalActions]\n# scores = [dfMiniMax(self.depth, nextState, 1) for nextState in childList]\n# bestScore = max(scores)\n# bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n# # print dir(gameState.problem)\n# # closestFood = closestFoodOrComputeDistance(gameState.getPacmanPosition(), None, gameState)\n# # dists = [closestFoodOrComputeDistance(nextState.getPacmanPosition(), closestFood[0], nextState)[1] \\\n# # for nextState in childList]\n# # chosenIndex = min(bestIndices, key=lambda x: dists[x])\n# random.seed()\n# chosenIndex = random.choice(bestIndices)\n# return legalActions[chosenIndex]\n\n# util.raiseNotDefined()\n\n# # consider only pacman, see all ghosts as min\n# # def dfMiniMaxSearch(self, depth, curState, pacmanTurn):\n# # if depth == 0 or curState.isWin() or curState.isLose():\n# # return curState.getScore(), None\n# # legalActions = curState.getLegalActions()\n# # successors = [curState.generateSuccessor(0, action) for action in legalActions]\n# # scores = [self.dfMiniMaxSearch(depth-1, nextState, not pacmanTurn)[0] for nextState in successors]\n# # bestScore = max(scores) if pacmanTurn else min(scores)\n# # bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n# # closestFood = closestFoodOrComputeDistance(curState.getPacmanPosition(), None, curState)\n# # dists = [closestFoodOrComputeDistance(nextState.getPacmanPosition(), closestFood[0], nextState)[1] \\\n# # for nextState in successors]\n# # chosenIndex = min(bestIndices, key=lambda x: dists[x])\n# # # chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n# # return bestScore, legalActions[chosenIndex]\n\n\nclass AlphaBetaAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent with alpha-beta pruning (question 3)\n \"\"\"\n \n def getAction(self, gameState):\n \"\"\"\n Returns the minimax action using self.depth and self.evaluationFunction\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # return self.alphabeta(self.depth, gameState, -sys.maxint, sys.maxint, True)[1]\n \n def dfAlphaBeta(depth, curState, agentId, alpha, beta):\n legalActions = curState.getLegalActions(agentId)\n if depth == 0 or curState.isWin() or curState.isLose():\n return curState.getScore()\n if agentId == 0:\n v = -sys.maxint\n for action in legalActions:\n nextState = curState.generateSuccessor(agentId, action)\n v = max(v, dfAlphaBeta(depth, nextState, agentId+1, alpha, beta))\n alpha = max(alpha, v)\n if alpha > beta: break\n return v\n else :\n v = sys.maxint\n for action in legalActions:\n nextState = curState.generateSuccessor(agentId, action)\n if agentId+1 == curState.getNumAgents():\n v = min(v, dfAlphaBeta(depth-1, nextState, 0, alpha, beta))\n else: v = min(v, dfAlphaBeta(depth, nextState, agentId+1, alpha, beta))\n beta = min(beta, v)\n if alpha > beta: break\n return v\n\n bestIndices = []\n legalActions = gameState.getLegalActions(0)\n alpha, beta = -sys.maxint, sys.maxint\n for i, action in enumerate(legalActions):\n nextState = gameState.generateSuccessor(0, action)\n newAlpha = dfAlphaBeta(self.depth, nextState, 1, alpha, beta)\n if alpha < newAlpha:\n alpha, bestIndices = newAlpha, [i]\n elif alpha == newAlpha:\n bestIndices.append(i)\n \n random.seed()\n chosenIndex = random.choice(bestIndices)\n return legalActions[chosenIndex]\n\n util.raiseNotDefined()\n\n # consider only pacman, see all ghosts as min\n # def alphabeta(self, depth, curState, alpha, beta, pacmanTurn):\n # if depth == 0 or curState.isWin() or curState.isLose():\n # return curState.getScore(), None\n # legalActions = curState.getLegalActions()\n # successors = [curState.generateSuccessor(0, action) for action in legalActions]\n # bestIndices = []\n # if pacmanTurn:\n # for i, nextState in enumerate(successors):\n # newAlpha = self.alphabeta(depth-1,nextState,alpha,beta,not pacmanTurn)[0]\n # if alpha < newAlpha:\n # alpha, bestIndices = newAlpha, [i]\n # elif alpha == newAlpha:\n # bestIndices.append(i)\n # if beta < alpha: break\n # if depth != self.depth: return alpha, None\n # else :\n # for i, nextState in enumerate(successors):\n # newBeta = self.alphabeta(depth-1,nextState,alpha,beta,not pacmanTurn)[0]\n # if beta > newBeta:\n # beta, bestIndices = newBeta, [i]\n # elif beta == newBeta:\n # bestIndices.append(i)\n # if beta < alpha: break\n # if depth != self.depth: return beta, None\n # closestFood = closestFoodOrComputeDistance(curState.getPacmanPosition(), None, curState)\n # dists = [closestFoodOrComputeDistance(nextState.getPacmanPosition(), closestFood[0], nextState)[1] \\\n # for nextState in successors]\n # if len(bestIndices) == 0:\n # bestIndices = [i for i in range(len(legalActions))]\n # chosenIndex = min(bestIndices, key=lambda x: dists[x])\n # if pacmanTurn: return alpha, legalActions[chosenIndex]\n # else : return beta, legalActions[chosenIndex]\n\n\nclass ExpectimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your expectimax agent (question 4)\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the expectimax action using self.depth and self.evaluationFunction\n\n All ghosts should be modeled as choosing uniformly at random from their\n legal moves.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\ndef betterEvaluationFunction(currentGameState):\n \"\"\"\n Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable\n evaluation function (question 5).\n\n DESCRIPTION: \n \"\"\"\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\n# Abbreviation\nbetter = betterEvaluationFunction\n\n","repo_name":"shinegrin/Pacman","sub_path":"multiagent/multiAgents.py","file_name":"multiAgents.py","file_ext":"py","file_size_in_byte":16702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3735117762","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\nwith open('requirements.txt') as f:\n\tinstall_requires = f.read().strip().split('\\n')\n\n# get version from __version__ variable in purpletheme/__init__.py\nfrom purpletheme import __version__ as version\n\nsetup(\n\tname='purpletheme',\n\tversion=version,\n\tdescription='ERPNext app for Vezolve',\n\tauthor='Ayeshka Abeysinghe',\n\tauthor_email='ayeshka@vezolve.com',\n\tpackages=find_packages(),\n\tzip_safe=False,\n\tinclude_package_data=True,\n\tinstall_requires=install_requires\n)\n","repo_name":"ayeshkaVezolve/vezolve_purpletheme","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3058001602","text":"import inflection\n\n\ndef create_expressions(requested_fields: list) -> tuple[str, dict]:\n \"\"\"\n Creates expression components for a dynamo query\n :param requested_fields: List of enum fields names\n\n example usage:\n requested_fields = [\"ID\", \"Created\", \"FileName\"]\n projection_expression, expression_attribute_names = create_expressions(requested_fields)\n\n result:\n [\n \"#ID_attr,#Created_attr,#FileName_attr\",\n {\"#ID_attr\": \"ID\", \"#Created_attr\": \"Created\", \"#FileName_attr\": \"FileName\"}\n ]\n \"\"\"\n projection_expression = \"\"\n expression_attribute_names = {}\n\n for field_definition in requested_fields:\n field_placeholder = create_expression_attribute_placeholder(field_definition)\n if len(projection_expression) > 0:\n projection_expression = f\"{projection_expression},{field_placeholder}\"\n else:\n projection_expression = field_placeholder\n\n expression_attribute_names[field_placeholder] = field_definition\n return projection_expression, expression_attribute_names\n\n\ndef create_attribute_filter(filtered_fields: dict) -> str:\n \"\"\"\n Creates a filter for dynamodb queries for existing and non-existing attributes\n :param filtered_fields: Dictionary of filtered fields\n\n example usage:\n fields_filter = {\n DocumentReferenceMetadataFields.DELETED.value: \"\",\n DocumentReferenceMetadataFields.FILENAME.value: \"Test Filename\"\n }\n attribute_filter = create_attribute_filter(fields_filter)\n\n result:\n \"attribute_not_exists(Deleted) OR Deleted = :Deleted_val AND \"\n \"Filename = :Filename_val\"\n\n \"\"\"\n attr_filter = \"\"\n\n for field_name, field_value in filtered_fields.items():\n base_filter_string = (\n f\"{field_name} = {create_expression_value_placeholder(field_name)}\"\n )\n if not field_value:\n filter_string = (\n f\"attribute_not_exists({field_name}) OR {base_filter_string}\"\n )\n else:\n filter_string = base_filter_string\n\n if not attr_filter:\n attr_filter = filter_string\n else:\n attr_filter += f\" AND {filter_string}\"\n\n return attr_filter\n\n\ndef create_update_expression(field_names: list) -> str:\n \"\"\"\n Creates an expression for dynamodb queries to SET a new value for an item\n :param field_names: List of fields to update\n\n example usage:\n field_names = [\"Name\", \"Age\"...]\n fields_filter = create_update_expression(field_names)\n\n result:\n \"SET #Name_attr = :Name_val, #Age_attr = :Age_val\"\n\n \"\"\"\n update_expression = \"SET\"\n for field in field_names:\n expression = f\" {create_expression_attribute_placeholder(field)} = {create_expression_value_placeholder(field)}\"\n if update_expression == \"SET\":\n update_expression += expression\n else:\n update_expression += f\",{expression}\"\n\n return update_expression\n\n\ndef create_expression_attribute_values(attribute_field_values: dict) -> dict:\n \"\"\"\n Maps a dict of expression names and expression values to create a dictionary to pass into query\n :param attribute_field_values: Dictionary of attribute field names and values\n\n example usage:\n attribute_field_values = {\n DocumentReferenceMetadataFields.DELETED.value: \"\",\n DocumentReferenceMetadataFields.FILENAME.value: \"Test Filename\"\n }\n expression_attribute_values = create_expression_attribute_values(attribute_field_values)\n\n result:\n {\n \":Deleted_val\" : \"\"\n \":FileName_val\" : \"Test Filename\"\n }\n \"\"\"\n expression_attribute_values = {}\n for field_name, field_value in attribute_field_values.items():\n expression_attribute_values[\n f\"{create_expression_value_placeholder(field_name)}\"\n ] = field_value\n\n return expression_attribute_values\n\n\ndef create_expression_value_placeholder(value: str) -> str:\n \"\"\"\n Creates a placeholder value for an expression attribute name\n :param value: Value to change into a placeholder\n\n example usage:\n placeholder = create_expression_value_placeholder(\"VirusScanResult\")\n\n result:\n \":VirusScanResult_val\"\n \"\"\"\n return f\":{inflection.camelize(value, uppercase_first_letter=True)}_val\"\n\n\ndef create_expression_attribute_placeholder(value: str) -> str:\n \"\"\"\n Creates a placeholder value for a projection attribute name\n :param value: Value to change into a placeholder\n\n example usage:\n placeholder = create_expression_attribute_placeholder(\"VirusScanResult\")\n\n result:\n \"#VirusScanResult_attr\"\n \"\"\"\n return f\"#{inflection.camelize(value, uppercase_first_letter=True)}_attr\"\n","repo_name":"nhsconnect/national-document-repository","sub_path":"lambdas/utils/dynamo_utils.py","file_name":"dynamo_utils.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32626067466","text":"import httplib2\r\nfrom json import dumps\r\n\r\nh = httplib2.Http(disable_ssl_certificate_validation=True)\r\nh.add_credentials('scoblrtest1@cldc.shoretel.com', 'Shor1234')\r\n#h.add_certificate(\"89458dc4c867bd29dbd76bab0570af6ab3b968ed\", \"C:\\\\Users\\\\pkumarasami\\\\Desktop\\\\ShoreTel\\\\Work Space\\\\Sprint 1\\\\shoretel_engineering_ca.crt\", \"cldc.shoretel.com\")\r\n#body = {'USERNAME': 'scoblrtest1@cldc.shoretel.com', 'PASSWORD': 'Shore1234'}\r\nheaders = {'Content-type': 'application/json;charset=utf-8','Accept':'application/json,text/plain'}\r\n\r\nbody = {\"fn\": \"pannaga\",\"n\": \"jayaram\"}\r\n#response, content = h.request(\"https://buddycloud.cldc.shoretel.com/api/profile\", \"PUT\")\r\nurl = 'https://buddycloud.cldc.shoretel.com/api/workspaces'\r\ndictionary = {\"channel\": \"Parasuram221\",\"title\": \"scoe parasu workspace2 2\"}\r\n\r\nresp, content = h.request(\r\n uri=url,\r\n method='POST',\r\n headers={'Content-Type': 'application/json'},\r\n body=dumps(dictionary),\r\n)\r\nprint (resp)\r\nprint (content)","repo_name":"parasuramankumarasami/Examples","sub_path":"Workspace REST API/CreateWorkspace.py","file_name":"CreateWorkspace.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17199692189","text":"from PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtGui import (QColor, QFont, QPainter, QPalette, QPen, QBrush)\nfrom PyQt5.QtCore import Qt\n\nclass PowerShow(QWidget):\n \"\"\"docstring for PowerShow\"\"\"\n def __init__(self):\n super(PowerShow, self).__init__()\n # self.arg = arg\n\n self.setPalette(QPalette(QColor(239,246,250)))\n self.setAutoFillBackground(True)\n self.setGeometry(100,100,100,100)\n self.setMinimumSize(100, 100)\n # self.show()\n self.pter = QPainter(self)\n self.powerList = {'logNumber':0,\n 'currentPower':0,\n 'averagePower':0,\n 'variancePower':0,\n 'maxPower':0,\n 'minPower':0}\n\n def paintEvent(self,event):\n pter = self.pter\n self.text = '最大功率:'+self.__Power2str(self.powerList['maxPower'])+'\\n\\\n最小功率:'+self.__Power2str(self.powerList['minPower'])+'\\n\\\n平均功率:'+self.__Power2str(self.powerList['averagePower'])+'\\n\\\n功率方差:'+str(round(self.powerList['variancePower'],2))+'\\n'\n self.textshow = self.__Power2str(self.powerList['currentPower'])\n pter.begin(self)\n # print('PowerShowlist',self.text ,'\\n',self.textshow)\n pter.setPen(QPen(Qt.black,0.1))\n pter.setBrush(QBrush(QColor(125,185,222)))\n pter.drawRoundedRect(event.rect(), 10, 10)\n pter.translate(10,10)\n self.drawPowerText(event,pter)\n # pter.drawRoundedRect(20,20, 210, 160,50,50)\n pter.translate(130,2)\n self.drawPowershishiText(event, pter)\n pter.translate(-5,20)\n self.drawPowerCurrentText(event, pter)\n\n pter.end()\n\n def drawPowerText(self,event,qp):\n qp.setPen(Qt.white)\n qp.setFont(QFont('微软雅黑', 10))\n qp.drawText(event.rect(), Qt.RightToLeft, self.text)\n\n def drawPowerCurrentText(self,event,qp):\n qp.setPen(Qt.white)\n qp.setFont(QFont('微软雅黑', 25))\n qp.drawText(event.rect(), Qt.RightToLeft, self.textshow)\n\n def drawPowershishiText(self,event,qp):\n qp.setPen(Qt.white)\n qp.setFont(QFont('微软雅黑', 8))\n qp.drawText(event.rect(), Qt.RightToLeft, '实时:')\n\n def updateFigure(self):\n self.update()\n\n def __Power2str(self,data):\n if data > 0.1:\n return str(round(data,2))+'W'\n else:\n return str(round(data*1000,2)) + 'mW'\n\n\nif __name__ == '__main__':\n import sys\n from PyQt5.QtWidgets import QApplication\n app = QApplication(sys.argv)\n addressBook = PowerShow()\n addressBook.show()\n\n sys.exit(app.exec_())\n","repo_name":"lidingke/photodarkening","sub_path":"view/powershow.py","file_name":"powershow.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"12476372232","text":"def sevens_in_a_row(arr, n):\n\tif n==1:\n\t\tfor item in arr:\n\t\t\tif item==7:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\tbr=1\n\tflag=False\n\tfor i in range(0,len(arr)-1):\n\t\tif arr[i]==7:\n\t\t\tif arr[i]==arr[i+1]:\n\t\t\t\tbr+=1\n\t\t\t\tif br>=n:\n\t\t\t\t\tflag = True\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbr=1\n\treturn flag\n","repo_name":"mileto94/HackBulgaria","sub_path":"week0/First/6-SevensRow/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23560901961","text":"\r\n\"\"\"\r\nGiven a number N, break it down into an array\r\n\r\nN = '999' outputs\r\na = [[1,1,1,1,1,1,1,1,1],\r\n [1,1,1,1,1,1,1,1,1],\r\n [1,1,1,1,1,1,1,1,1]]\r\nIt's just magic\r\n\"\"\"\r\ndef magic(N):\r\n\r\n bar = []\r\n\r\n for i in range(len(N)):\r\n bar.append([0]*9)\r\n for j in range(0,len(N)):\r\n x = int(N[j])\r\n for i in range(0,x):\r\n bar[j][i] = 1\r\n return bar, len(N)\r\n\r\ndef help(s): # reduces the order by one decimal, for example 100 to 10\r\n bar = [0]*9\r\n if s == 0:\r\n bar = [1]*9\r\n else:\r\n for i in range(0,s-1):\r\n bar[i]=1\r\n return bar\r\n\r\ndef unmagic(bar):\r\n s = 0\r\n tidy_N = \"\"\r\n tiding_N = \"\"\r\n for i in range(0, len(bar)):\r\n s = sum(bar[i])\r\n tiding_N += str(s)\r\n tidy_N = int(tiding_N)\r\n return tidy_N\r\n\r\ndef wizard(bar, index):\r\n aux_bar = bar[:]\r\n s=0\r\n flag = 0\r\n while(flag < index ):\r\n for i in range(0, index):\r\n if aux_bar[index-i-1] < aux_bar[index-i-2] and i!=index-1:\r\n aux_bar[index-i-1] = [1]*9\r\n aux_bar[index-i-2] = help(sum(bar[index-i-2]))\r\n flag = 0\r\n elif aux_bar[index-i-1] >= aux_bar[index-i-2]:\r\n flag += 1\r\n\r\n return aux_bar\r\n\r\n\r\norder = []\r\nwith open(\"input.txt\") as f:\r\n file_o = open('output.txt','w') #GET IT OPEN HERE\r\n input = f.readlines()\r\n input_x = [i.replace(\"\\n\", \"\") for i in input]\r\n test_size = int( input_x[0] )\r\n\r\n if test_size == len(input_x[1:]):\r\n for j in range(0, test_size):\r\n order.extend(input_x[j+1].split(\" \"))\r\n print(\"test \",order , len(order))\r\n bar = []\r\n index = []\r\n for i in range(0, len(order)): # The input is now ready for some magic\r\n bar, index = magic(order[i])\r\n untidy = wizard(bar, index)\r\n tidy = unmagic(untidy)\r\n print(\"tidy\", tidy)\r\n file_o.write(\"Case #\"+ str(i+1) +\": \"+ str(tidy) +\"\\n\")\r\n else:\r\n print (\"Input count error\")\r\n file_o.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4210.py","file_name":"4210.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17481563832","text":"from preprocessing import *\nimport os\nimport numpy as np\n\n\"\"\"\n Before running the script, make sure to have the scans in the structure\n below. Names may vary:\n\n input:\n training scans - ./data/name_of_data/train/*.nii.gz\n test scans - ./data/name_of_data/test/*.nii.gz\n annotation masks - ./data/annotations/*.nii.gz (optional)\n crop_file - *.npz (optional, manual crops of scan of region of interest) \n\n output:\n training - ./data/name_of_data/train/trainA\n - ./data/name_of_data/train/trainB\n - ./data/name_of_data/train/annotations\n \n testing - ./data/name_of_data/train/testA\n - ./data/name_of_data/train/testB\n - ./data/name_of_data/train/annotations\n\n Then transfer trainA, trainB, testA, testB to CycleGAN/datasets/name_of_data\n\"\"\"\n\ndef get_patches(scan_path, scan_name, side='c', patch_size=256, patch_step=(128, 128)):\n scan = np.load(scan_path)['data']\n\n # crop scan using segmentation - currently not in use\n # seg = np.load(seg_path)['data']\n # cropped_scan = crop_volume(scan, seg, is_mr)\n \n # get all patches\n all_patches = get_all_patches(scan, side=side, dim=patch_size, step=patch_step)\n \n print(all_patches.shape)\n\n return all_patches\n\n\ndef prepare_data(root_path, crops, is_train = True, is_prep_npz=True, is_prep_seg=False, side='c', patch_size=256, patch_step=(128, 128)):\n\n data_type = 'train'\n if is_train is False:\n data_type = 'test'\n\n # root_path = './data/visceral_full'\n \n train_path = root_path + '/' + data_type\n dom_a_path = train_path + '/{}A'.format(data_type) # CT\n dom_b_path = train_path + '/{}B'.format(data_type) # MR\n \n train_seg_path = train_path + '/annotations'\n seg_root_path = root_path + '/annotations'\n \n nii_ext_name = '.nii.gz'\n scan_paths_train = get_image_paths_given_substr(train_path, '.nii')\n scan_names = [ p.split('/')[-1].strip('.nii.gz') for p in scan_paths_train ]\n\n os.makedirs(train_seg_path, exist_ok=True)\n os.makedirs(dom_a_path, exist_ok=True)\n os.makedirs(dom_b_path, exist_ok=True)\n \n if is_prep_npz is True:\n print(\"Converting zipped nii to npz with crops\")\n os.makedirs(dom_a_path, exist_ok=True)\n os.makedirs(dom_b_path, exist_ok=True)\n prepare_volume_as_npz(scan_paths_train, nii_ext_name, train_path, crops)\n \n if is_prep_seg is True:\n print(\"Getting all segmentations\")\n os.makedirs(train_seg_path, exist_ok=True)\n prepare_seg_as_npz(seg_root_path, scan_names, train_seg_path, crops)\n\n # only generate slices when preparing training data!\n if is_train is True:\n \n print(\"Processing npz volume files to npz image slices\")\n \n npz_file_paths = get_image_paths_given_substr(train_path, '.npz')\n\n for scan_path in npz_file_paths:\n scan_name = scan_path.replace(\".npz\", \"\").split('/')[-1]\n print(scan_name)\n seg_path = train_seg_path + '/' + scan_name + '.npz'\n is_ct = is_ct_file(scan_path)\n is_mr = not is_ct\n\n # get all patches\n all_patches = get_patches(scan_path, scan_name, side=side, patch_size=patch_size, patch_step=patch_step)\n\n for i, patch in enumerate(all_patches):\n dom_path = dom_b_path\n \n if (is_ct):\n dom_path = dom_a_path\n\n save_path = dom_path + '/' + scan_name + '_' + str(i) + '.npz'\n \n # patch = resize_img(patch, 128)\n np.savez(save_path, data=patch)\n\n\nif __name__ == '__main__':\n data_path = './data/visceral_full'\n crop_path = './visceral_crops.npz'\n \n crops = np.load(crop_path, allow_pickle=True)['data']\n\n # prepare train data here\n prepare_data(data_path, crops, is_train=True, is_prep_npz=True, is_prep_seg=False, side='c', patch_size=256, patch_step=(64, 64))\n\n # prepare test data here\n prepare_data(data_path, crops, is_train=False, is_prep_npz=False, is_prep_seg=True)\n\n","repo_name":"momenator/ct-mr-translation","sub_path":"prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"}
+{"seq_id":"8096850483","text":"import unittest\nimport os\nfrom flask import Flask\nfrom dotenv import load_dotenv\nfrom src import db\nfrom src.repositories.user_repository import user_repository as ur\nfrom src.entities.user import User\nfrom src.tools.db_tools import clear_database\n\n\nclass TestUserRepository(unittest.TestCase):\n\n def setUp(self):\n load_dotenv()\n self.app = Flask(__name__)\n self.app.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\n self.app.config[\"SQLALCHEMY_DATABASE_URI\"] = os.getenv(\"DATABASE_URL\")\n db.init_app(self.app)\n\n self.app_context = self.app.app_context()\n self.app_context.push()\n clear_database()\n self.ur = ur\n self.user1 = User(\"Tiina Testiopettaja\", \"tiina.testiope@email.com\", True)\n self.ur.register(self.user1)\n\n def tearDown(self):\n db.drop_all()\n self.app_context.pop()\n\n def test_get_user_by_email_invalid(self):\n \"\"\"\n Test that an invalid email returns False\n \"\"\"\n user = ur.get_user_by_email(\"moti@motivaatio.com\")\n self.assertFalse(user)\n\n def test_get_user_by_email(self):\n \"\"\"\n Test that a user is returned with the correct email\n \"\"\"\n user = ur.get_user_by_email(\"tiina.testiope@email.com\")\n self.assertEqual(user.name, \"Tiina Testiopettaja\")\n self.assertEqual(user.email, \"tiina.testiope@email.com\")\n self.assertTrue(user.isteacher)\n","repo_name":"piryopt/pienryhmien-optimointi","sub_path":"tests/user_repository_test.py","file_name":"user_repository_test.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9799566616","text":"from django.db import migrations\n\n\ndef rename_food_names(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n Food = apps.get_model(\"product\", \"Food\")\n foods = Food.objects.using(db_alias).all()\n for food in foods:\n food.name = food.name.lower()\n food.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('product', '0022_add_default_scorecreator'),\n ]\n\n operations = [\n migrations.RunPython(rename_food_names),\n ]\n","repo_name":"mathijsromans/consupedia","sub_path":"product/migrations/0023_rename_food_names_lowercase.py","file_name":"0023_rename_food_names_lowercase.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"16385396781","text":"import json\nimport re\nimport ast\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport operator\nimport collections\n\njson_contents = open('output/msd_fit_categories0.2.txt','r').read()[1:-1]\njson_contents_split = [int(a) for a in json_contents.split()]\ncluster_sizes = sorted(collections.Counter(json_contents_split).items())\n\ncluster_years = collections.defaultdict(list)\njson_contents = open('output/song_groupings0.2.txt','r').read()\nfor g in re.finditer('(\\d{1,2}): \\[.*?(\\)\\])',json_contents):\n cluster_num = g.group(1)\n for year in re.finditer(', (\\d{4})\\),',g.group(0)):\n cluster_years[cluster_num].append(int(year.group(1)))\n\nall_song_dists = {}\nall_song_nums = {}\nfor key in cluster_years.keys():\n song_dist = cluster_years[key]\n all_songs_dists_raw = sorted(collections.Counter(song_dist).items())\n total_songs_num = sum([tup[1] for tup in all_songs_dists_raw])\n all_song_nums[key] = total_songs_num\n all_song_dists[key] = [(tup[0],float(tup[1])/total_songs_num) for tup in all_songs_dists_raw]\n\nplt.switch_backend('agg')\nfor idx, key in enumerate(cluster_years):\n plt.xlim([1970,2010])\n plt.hist(cluster_years[key])\n plt.title('Group {}'.format(key))\n plt.savefig('output/songdist_group' + str(key) + '_0.2.png')\n plt.close()\n","repo_name":"matthewsilver/msd-parser","sub_path":"group_interpreter.py","file_name":"group_interpreter.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"35658638835","text":"\"\"\"Main experiment file.\"\"\"\n\nimport warnings; warnings.filterwarnings(\"ignore\")\n\n# reproducibility bit ----------------\nfrom random import seed; seed(42)\nfrom numpy.random import seed as np_seed; np_seed(42)\nfrom tensorflow.compat.v1 import set_random_seed; set_random_seed(42)\nimport os; os.environ['PYTHONHASHSEED'] = str(42)\n# -----------------------------------\n\nimport argparse\n\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.pipeline import Pipeline\n\nfrom evaluation import Evaluation\nfrom models import (BayesFeatures, BertFeatures, WordEmbeddings)\nfrom reader import Reader, merge_datasets\nfrom utils import debug_tests\n\n\nclass EnglishCompare(object):\n\n def __init__(self, pipeline: Pipeline, datasets: list = None,\n merge: bool = False, cross: bool = True, neural: bool = False,\n clean: bool = True, preprocess: bool = False,\n multi_read: int = 0) -> None:\n # NOTE: comment out those unavailable, or provide own list\n self.datasets = [\n ('bretschneider', 'agg_set'),\n ('kaggle', 'kag_set'),\n ('kontostathis', 'msp_set'),\n ('maral', 'ytb_set'),\n ('vanhee', 'asken_set'),\n ('xu', 'xu_set'),\n ('kaggle', 'kag_conv'),\n ('vanhee', 'asken_conv'),\n ('toxic', 'toxic_set')\n ] if not datasets else datasets\n self.data = Reader(clean=True, preprocess=False, language='en',\n multi_threading=multi_read)\n self.eval = Evaluation(pipeline,\n headers=['_'.join(x) for x in self.datasets],\n merge=merge, cross=cross, neural=neural)\n self.merge = merge\n\n def _cross_data(self) -> (list, list):\n train = [data for data in self.data.subset(self.datasets)]\n test = [_data for _data in self.data.subset(self.datasets)]\n if self.merge:\n train = train[:-1]\n train = [merge_datasets(train)]\n return train, test\n\n def run(self, nest: bool = False, store: bool = False,\n report: bool = False) -> None:\n print(f\"\\n> Merging: {self.merge}\")\n train, test = self._cross_data()\n self.eval.score(train, test=test, nest=nest, store=store,\n report=report)\n\n\nclass DutchCompare(object):\n\n def __init__(self, pipeline: Pipeline, datasets: list = None,\n merge: bool = False, cross: bool = True, neural: bool = False,\n clean: bool = True, preprocess: bool = False,\n multi_read: int = 0) -> None:\n if merge:\n raise(ValueError(\"Sorry, the NL data manually does merging.\"))\n self.datasets = [\n ('vanhee', 'asknl_set'),\n ('vanhee', 'simnl_set'),\n ('vanhee', 'donnl_set'),\n ('vanhee', 'cnvnl_set'),\n ('vanhee', 'cnsnl_set')\n ] if not datasets else datasets\n self.data = Reader(clean=True, preprocess=False, language='nl')\n headers = ['_'.join(x) for x in self.datasets]\n headers += [f'{headers[0]}+{headers[1]}']\n self.eval = Evaluation(pipeline, headers=headers, cross=cross,\n neural=neural)\n\n def _combine_sets(self) -> dict:\n sets = {'ask': '', 'sim': '', 'don': '', 'cnv': '', 'cns': ''}\n train = [data for data in self.data.subset(self.datasets)]\n\n for data in train:\n for key in sets:\n if key in data.id:\n sets[key] = data\n return sets\n\n def run(self, nest: bool = False, store: bool = False,\n report: bool = False) -> None:\n dsets = self._combine_sets()\n config = [\n (dsets['ask'], dsets['ask']),\n (dsets['sim'], dsets['sim']),\n (dsets['cnv'], dsets['cnv']),\n (dsets['cns'], dsets['cns']),\n (dsets['ask'], dsets['sim']),\n (dsets['sim'], dsets['ask']),\n (dsets['ask'], dsets['don']),\n (dsets['sim'], dsets['don']),\n (dsets['ask'], dsets['cnv']),\n (dsets['ask'], dsets['cns']),\n (dsets['sim'], dsets['cnv']),\n (dsets['sim'], dsets['cns']),\n (dsets['cnv'], dsets['ask']),\n (dsets['cnv'], dsets['sim']),\n (dsets['cnv'], dsets['don']),\n (dsets['cnv'], dsets['cns']),\n (dsets['cns'], dsets['cnv']),\n (dsets['cns'], dsets['ask']),\n (dsets['cns'], dsets['sim']),\n (dsets['cns'], dsets['don']),\n (merge_datasets([dsets['ask'], dsets['sim']]), dsets['ask']),\n (merge_datasets([dsets['ask'], dsets['sim']]), dsets['sim']),\n (merge_datasets([dsets['ask'], dsets['sim']]), dsets['don']),\n (merge_datasets([dsets['ask'], dsets['sim']]), dsets['cnv']),\n (merge_datasets([dsets['ask'], dsets['sim']]), dsets['cns']),\n (merge_datasets([dsets['ask'], dsets['sim'], dsets['cnv'],\n dsets['cns']]), dsets['ask']),\n (merge_datasets([dsets['ask'], dsets['sim'], dsets['cnv'],\n dsets['cns']]), dsets['sim']),\n (merge_datasets([dsets['ask'], dsets['sim'], dsets['cnv'],\n dsets['cns']]), dsets['don']),\n (merge_datasets([dsets['ask'], dsets['sim'], dsets['cnv'],\n dsets['cns']]), dsets['cnv']),\n (merge_datasets([dsets['ask'], dsets['sim'], dsets['cnv'],\n dsets['cns']]), dsets['cns'])\n ]\n for train, test in config:\n self.eval.score([train], test=[test], nest=nest, df=False)\n\n\ndef select_model(key: str) -> Pipeline:\n \"\"\"Select the model to use based on argparse input.\"\"\"\n\n # NOTE: all these if statements don't look particularly nice, but we also\n # don't want to load a bunch of models we're not gonna use now, do we?\n\n # Simple Default Test\n if key == 'debug':\n return {\n ('vect', TfidfVectorizer(ngram_range=(1, 2), min_df=3,\n max_df=0.9, use_idf=1, smooth_idf=1,\n sublinear_tf=1)): {},\n ('nbf', BayesFeatures()): {},\n ('lr', LogisticRegression(dual=True, random_state=42,\n class_weight=\"balanced\")): {}\n }\n\n # FINAL BINARY SVM BASELINE\n elif key == 'baseline':\n return {\n ('vect', CountVectorizer(binary=True)): {\n 'vect__ngram_range': [(1, 1), (1, 2), (1, 3)]\n },\n ('svc', LinearSVC(random_state=42)): {\n 'svc__C': [1e-3, 1e-2, 1e-1, 1e-0, 1e1, 1e2, 1e3],\n 'svc__loss': ['hinge', 'squared_hinge'],\n 'svc__class_weight': [None, \"balanced\"]\n }\n }\n\n elif key == 'debug-baseline':\n return {\n ('vect', CountVectorizer(binary=True)): {},\n ('svc', LinearSVC(random_state=42)): {}\n }\n\n # NB-SVM Model\n elif key == 'nbsvm':\n return {\n ('vect', TfidfVectorizer(ngram_range=(1, 2), min_df=3, max_df=0.9,\n use_idf=1, smooth_idf=1,\n sublinear_tf=1)): {},\n ('nbf', BayesFeatures()): {},\n ('lr', LogisticRegression(dual=True, solver='liblinear',\n random_state=42,\n class_weight=\"balanced\")): {\n 'lr__C': [1, 2, 3, 4, 5, 10, 25, 50, 100, 200, 500]\n }\n }\n\n elif key == 'debug-nbsvm':\n return {\n ('vect', TfidfVectorizer()): {},\n ('nbf', BayesFeatures()): {},\n ('lr', LogisticRegression()): {}\n }\n\n # LR over Embeddings\n elif key == 'w2v':\n return {\n # NOTE: cow.nl.kv for Dutch\n ('vct', WordEmbeddings(pre_trained=\"w2v.kv\")): {},\n ('lr', LogisticRegression(class_weight=\"balanced\",\n solver='liblinear', random_state=42)): {\n 'lr__C': [1, 2, 3, 4, 5, 10, 25, 50, 100, 200, 500],\n }\n }\n\n elif key == 'debug-w2v':\n return {\n # NOTE: cow.nl.kv for Dutch\n ('vct', WordEmbeddings(pre_trained=\"w2v.kv\")): {},\n ('lr', LogisticRegression()): {}\n }\n\n # LR over DistilBert\n elif key == 'bert':\n return {\n ('vect', BertFeatures()): {},\n ('lr', LogisticRegression(class_weight=\"balanced\",\n solver='liblinear', random_state=42)): {\n 'lr__C': [1, 2, 3, 4, 5, 10, 25, 50, 100, 200, 500],\n }\n }\n\n elif key == 'debug-bert':\n return {\n ('vect', BertFeatures()): {},\n ('lr', LogisticRegression()): {}\n }\n\n # Reproduction B-LSTM\n elif key == 'blstm':\n from neural import ReproductionNeuralNetwork\n return {\n ('neur', ReproductionNeuralNetwork(\n m_type='blstm', inp_dim=128, num_classes=2, learn_rate=0.01,\n batch_size=128, epochs=10, embed_size=50)): {}\n }\n\n elif key == 'debug-blstm':\n from neural import ReproductionNeuralNetwork\n return {\n ('neur', ReproductionNeuralNetwork(\n m_type='blstm', inp_dim=128, num_classes=2, learn_rate=1,\n batch_size=32, epochs=1, embed_size=50)): {}\n }\n\n # Reproduction CNN\n elif key == 'cnn':\n from neural import ReproductionNeuralNetwork\n return {\n ('neur', ReproductionNeuralNetwork(\n m_type='cnn', inp_dim=128, num_classes=2, learn_rate=0.01,\n batch_size=128, epochs=10, embed_size=50)): {}\n }\n\n elif key == 'debug-cnn':\n from neural import ReproductionNeuralNetwork\n return {\n ('neur', ReproductionNeuralNetwork(\n m_type='cnn', inp_dim=128, num_classes=2, learn_rate=1,\n batch_size=32, epochs=1, embed_size=50)): {}\n }\n\n # Own NN Grid\n elif key == 'nn':\n from neural import ReproductionNeuralNetwork\n return {\n ('neur', ReproductionNeuralNetwork(\n m_type='clstm', inp_dim=128, num_classes=2, learn_rate=0.01,\n batch_size=128, epochs=10, embed_size=50, character_level=True,\n early_stop=3)\n ): {\n # NOTE: roughly optimal params: 128 batch / 100, 50 embeddings\n 'neur__batch_size': [32, 64, 128, 256],\n 'neur__embed_size': [50, 100, 200, 300],\n 'neur__learn_rate': [0.1, 0.01, 0.05, 0.001, 0.005]\n }\n }\n\n elif key == 'debug-nn':\n from neural import ReproductionNeuralNetwork\n return {\n ('neur', ReproductionNeuralNetwork(\n m_type='clstm', inp_dim=128, num_classes=2, learn_rate=1,\n batch_size=32, epochs=1, embed_size=50)): {}\n }\n\n else:\n raise(KeyError(f\"Sorry, `{key}` is not a valid --model name.\"))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Cyberbullying detection replication environment.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('model',\n help=\"\"\"debug | baseline | nbsvm | w2v | bert |\n blstm | cnn | nn -- Debug will run all possible\n configurations!\"\"\", type=str)\n parser.add_argument('--language', default='en', type=str, help=\"\"\"Run on\n English (en) or Dutch (nl) data.\"\"\")\n parser.add_argument('--preprocessing', default='clean', type=str,\n help=\"none | clean | preprocess\")\n parser.add_argument('--merge', default=False, type=bool, help=\"\"\"Merge all\n training sets (D_All in paper).\"\"\")\n parser.add_argument('--nest', default=False, type=bool, help=\"\"\"Report\n nested cross-validation scores (only relevant when\n using GridSearch).\"\"\")\n parser.add_argument('--single_domain', default=False, type=bool,\n help=\"Don't run eval cross-domain.\")\n parser.add_argument('--multi_read', default=0, help=\"\"\"Number of cores the\n _reader_ should use for multi-threading.\"\"\")\n parser.add_argument('--store', default=False, type=bool, help=\"\"\"Save the\n best model in a pickle file under /results.\"\"\")\n parser.add_argument('--report', default=False, type=bool, help=\"\"\"Report\n the most important features for SVM/LR models.\"\"\")\n args = parser.parse_args()\n\n Experiment = EnglishCompare if args.language == 'en' else DutchCompare\n if args.model == 'debug':\n debug_tests(args, EnglishCompare, select_model)\n else:\n Experiment(pipeline=select_model(args.model), merge=args.merge,\n datasets=None, cross=args.single_domain,\n neural=args.model in ['blstm', 'cnn', 'nn'],\n clean='clean' in args.preprocessing,\n preprocess='preprocess' in args.preprocessing,\n multi_read=args.multi_read).run(nest=args.nest,\n store=args.store,\n report=args.report)\n","repo_name":"cmry/amica","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":13653,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"41192601053","text":"import FWCore.ParameterSet.Config as cms\n\nhgcalMultiClusters = cms.EDProducer('HGCalMultiClusterProducer',\n HGCLayerClusters = cms.InputTag('hgcalLayerClusters'),\n verbosity = cms.untracked.uint32(3),\n doSharing = cms.bool(False),\n HGCEEInput = cms.InputTag('HGCalRecHit', 'HGCEERecHits'),\n HGCFHInput = cms.InputTag('HGCalRecHit', 'HGCHEFRecHits'),\n multiclusterRadii = cms.vdouble(\n 2,\n 5,\n 5\n ),\n HGCBHInput = cms.InputTag('HGCalRecHit', 'HGCHEBRecHits'),\n HGCLayerClustersSharing = cms.InputTag('hgcalLayerClusters', 'sharing'),\n minClusters = cms.uint32(3),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"RecoLocalCalo/HGCalRecProducers/hgcalMultiClusters_cfi.py","file_name":"hgcalMultiClusters_cfi.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"3586780282","text":"from .historyitem_subcomponent import historyitem_module as HistoryItem\nfrom ....common_components.datetime_datatypes import datetime_module as DateTime\nfrom ....common_components.datetime_datatypes import eras_module as EraFunctions\nfrom .graphing_subcomponent import graphing_module as Graphing\n\n\nclass DefineHistory:\n\n\tdef __init__(self):\n\n\t\t# An array of historic monitor history\n\t\tself.monitorhistory = []\n\n\t\t# Defines the granularity of display of monitor data\n\t\tself.erasize = 4 # Ten minute intervals\n\t\tself.longerasize = 5 # Hour intervals\n\n\t\t# Graphing module\n\t\tself.graphs = Graphing.creategraphing(self.erasize, self.longerasize)\n\n# =========================================================================================\n\n\tdef addhistoryentry(self, colourcounts, networkstatus, uploadedtotal, temperature):\n\n\t\tcurrentdatetime = DateTime.getnow()\n\t\tnewhistoryitem = HistoryItem.createhistoryitem(currentdatetime, colourcounts, networkstatus, uploadedtotal,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttemperature)\n\t\tself.monitorhistory.append(newhistoryitem)\n\t\tself.clearuphistory(currentdatetime)\n\t\t#print(\"NEW MONITOR HISTORY ITEM: \", newhistoryitem.getsavedata())\n\t\treturn newhistoryitem.getsavedata()\n\n\n\n\tdef restorehistory(self, saveddatalist):\n\n\t\tfor dataitem in saveddatalist:\n\t\t\tself.monitorhistory.append(HistoryItem.createfromfile(dataitem))\n\n\n\n\tdef gethistorygraphics(self, historyperiod):\n\n\t\tif historyperiod == \"Latest\":\n\t\t\toutput = self.graphs.drawgraphs(False, self.monitorhistory)\n\t\telif historyperiod == \"Recent\":\n\t\t\toutput = self.graphs.drawgraphs(True, self.getlonghistory())\n\t\telse:\n\t\t\toutput = {}\n\n\t\treturn output\n\n\n\n\tdef clearuphistory(self, currentdatetime):\n\n\t\tif currentdatetime.gettimevalue() < 600:\n\t\t\tprint(\"Before clean up: \", len(self.monitorhistory))\n\t\t\tthreshold = DateTime.createfromobject(currentdatetime)\n\t\t\tthreshold.adjustdays(-11)\n\t\t\tnewhistorylist = []\n\t\t\tfor historyitem in self.monitorhistory:\n\t\t\t\tif DateTime.isfirstlaterthansecond(historyitem.getdatetime(), threshold) == True:\n\t\t\t\t\tnewhistorylist.append(historyitem)\n\n\t\t\tself.monitorhistory = newhistorylist.copy()\n\t\t\tprint(\"After clean up: \", len(self.monitorhistory))\n\n\n\n\n\tdef getlonghistory(self):\n\n\t\toutcome = []\n\t\tcurrentlonghistoryitem = HistoryItem.createblank(DateTime.createfromiso(\"20100101000000\"))\n\t\tfor historyitem in self.monitorhistory:\n\t\t\tnewhour = historyitem.getdatetime()\n\t\t\tif EraFunctions.compareeras(newhour, currentlonghistoryitem.getdatetime(), 5) == True:\n\t\t\t\tcurrentlonghistoryitem.cumulate(historyitem)\n\t\t\telse:\n\t\t\t\toutcome.append(currentlonghistoryitem)\n\t\t\t\tcurrentlonghistoryitem = HistoryItem.createblank(EraFunctions.geteraasobject(newhour, 5))\n\t\t\t\tcurrentlonghistoryitem.cumulate(historyitem)\n\t\tif EraFunctions.compareeras(currentlonghistoryitem.getdatetime(), DateTime.getnow(), 5) == False:\n\t\t\toutcome.append(currentlonghistoryitem)\n\t\treturn outcome\n\n\n\n","repo_name":"johnpcole/Download-Manager","sub_path":"codebase/manager_component/monitoring_subcomponent/history_subcomponent/history_class.py","file_name":"history_class.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9161716571","text":"#! /usr/bin/env python\n\"\"\"\nSetup for Robbie\n\"\"\"\nimport os\nfrom setuptools import setup\nfrom subprocess import check_output\n\n\n# The following two functions were taken from the repo: https://github.com/pyfidelity/setuptools-git-version/blob/master/setuptools_git_version.py\n\ndef format_version(version, fmt=\"{tag}_{gitsha}\"):\n parts = version.split(\"-\")\n if len(parts) < 4:\n return parts[0]\n assert len(parts) in (3, 4)\n dirty = len(parts) == 4\n tag, count, sha = parts[:3]\n if count == \"0\" and not dirty:\n return tag\n return fmt.format(tag=tag, commitcount=count, gitsha=sha.lstrip(\"g\"))\n\n\ndef get_git_version():\n git_version = check_output(\"git describe --tags --long --dirty --always\".split()).decode('utf-8').strip()\n return format_version(version=git_version)\n\n\nrobbie_version = get_git_version()\n#m ake a temporary version file to be installed then delete it\nwith open(\"robbie_version.sh\", \"a\") as the_file:\n the_file.write(f\"#!/bin/bash -l\\necho {robbie_version}\")\n\nsetup(\n name=\"Robbie\",\n version=robbie_version,\n description=\"A batch processing work-flow for the detection of radio transients and variables\",\n url=\"https://github.com/PaulHancock/Robbie\",\n python_requires=\">=3.6\",\n packages=['robbie'],\n scripts=[\n \"robbie_version.sh\",\n # python\n \"scripts/auto_corr.py\",\n \"scripts/calc_var.py\",\n \"scripts/collect_transients.py\",\n \"scripts/filter_transients.py\",\n \"scripts/get_epoch.py\",\n \"scripts/get_lc_from_vot.py\",\n \"scripts/join_catalogues.py\",\n \"scripts/plot_variables.py\",\n \"scripts/make_weights.py\",\n \"scripts/reprojection.py\",\n \"scripts/convol_common_resolution.py\",\n # nextflow\n \"robbie.nf\",\n \"nextflow.config\",\n ],\n)\nos.remove(\"robbie_version.sh\")\n","repo_name":"PaulHancock/Robbie","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"8627165092","text":"upp, low, dig, pct = 0, 0, 0, 0\n\npswd = input('암호 입력: ')\nif pswd.isalnum() == False : pct = 1 # 특수문자 있으면 pct = 1\nfor k in pswd:\n if k.isupper() : upp = 1 # 대문자 있으면 upp = 1\n elif k.islower() : low = 1 # 소문자 있으면 low = 1\n elif k.isdigit() : dig = 1 # 숫자 있으면 dig = 1\n\nif low + upp + dig + pct >= 3 :\n print('사용 가능')\nelse : print('!!불가능한 암호!!')","repo_name":"hellen1221/2023pythonclass","sub_path":"class6/passw.py","file_name":"passw.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"19053414160","text":"#coding:utf-8\n\"\"\"\nWrite a Python program to find the roots of a quadratic function. Go to the editor\n\nExpected Output :\n\nQuadratic function : (a * x^2) + b*x + c \na: 25 \nb: 64 \nc: 36 \nThere are 2 roots: -0.834579 and -1.725421\n\"\"\"\nfrom math import sqrt\ndef roots_quadratic():\n a = float(input(\"a: \"))\n b = float(input(\"b: \"))\n c = float(input(\"c: \"))\n\n discriminant=b**2 - 4*a*c\n if discriminant > 0:\n num_roots = 2\n x1 = (((-b) + sqrt(discriminant))/(2*a)) \n x2 = (((-b) - sqrt(discriminant))/(2*a))\n print(\"There are 2 roots: %f and %f\" % (x1, x2))\n elif discriminant == 0:\n num_roots = 1\n x = (-b) / 2*a\n print(\"There is one root: \", x)\n else:\n num_roots = 0\n print(\"No roots, discriminant < 0.\")\n exit()\n\nprint(roots_quadratic())","repo_name":"DonaFidele/PythonExercices","sub_path":"math/exo_30.py","file_name":"exo_30.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"8059203934","text":"from ...models import Move\nfrom ...models import Game\nfrom rest_framework import serializers\nfrom django.http import Http404\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom ...utils import get_neighbors\n\n\ndef tile_coordinates(value):\n valid_values = range(0, 9)\n if value not in valid_values:\n raise serializers.ValidationError('Not a valid value.')\n\n\ndef sudoku_value(value):\n valid_values = [None]\n valid_values.extend(range(1, 10))\n if value not in valid_values:\n raise serializers.ValidationError('Not a valid value.')\n\n\nclass MoveSerializer(serializers.ModelSerializer):\n x = serializers.IntegerField(validators=[tile_coordinates])\n y = serializers.IntegerField(validators=[tile_coordinates])\n value = serializers.IntegerField(validators=[sudoku_value], allow_null=True)\n\n class Meta:\n model = Move\n fields = ('id', 'game', 'previous_move', 'x', 'y', 'value')\n read_only_fields = ('id', 'previous_move',)\n\n\nclass MoveAPIListViewV1(APIView):\n def get(self, request, format=None):\n game_id = request.GET.get('game', None)\n try:\n game_id = int(game_id)\n except (TypeError, ValueError):\n return Response({'detail': 'The query parameter game must be a positive integer.'}, status.HTTP_400_BAD_REQUEST)\n\n try:\n game = Game.objects.get(id=game_id)\n except Game.DoesNotExist:\n raise Http404\n\n moves = Move.objects.filter(game=game)\n serializer = MoveSerializer(moves, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = MoveSerializer(data=request.data)\n if serializer.is_valid():\n game = serializer.validated_data.get('game')\n\n # Ensure the tile can be edited\n if not game.is_tile_editable(serializer.validated_data.get('x'), serializer.validated_data.get('y')):\n return Response({'detail': 'The given coordinate is not editable.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Ensure the tile doesn't conflict with neighbors\n rendered_game = game.render_game()\n if serializer.validated_data['value'] in get_neighbors(serializer.validated_data.get('y'), serializer.validated_data.get('x'), rendered_game):\n return Response({'detail': 'Bad move! Conflict with our neighbors.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Determine the last move dynamically\n previous_move = Move.objects.filter(game=request.data.get('game')).order_by('-id').first()\n serializer.save(previous_move=previous_move)\n\n # Update the game state\n game.user_input[serializer.validated_data['y']][serializer.validated_data['x']] = serializer.validated_data['value']\n game.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"ryanisnan/sudoku_be","sub_path":"src/sudoku/sudoku/api/v1/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23426202901","text":"\r\nout_file = open('b_out.txt', 'w')\r\n\r\ndef out(case_idx, num):\r\n\tprint(\"Case #%d: %.7f\" % (case_idx + 1, num), file=out_file)\r\n\r\ndef get_time(target, rate):\r\n\treturn target / rate\r\n\r\nwith open('B-large.in') as f:\r\n\tnum_cases = int(f.readline())\r\n\tfor i in range(num_cases):\r\n\t\t[cost, increase, target] = [float(x) for x in f.readline().split(\" \")]\r\n\r\n\t\tcur_rate = 2\r\n\t\tbest_time = get_time(target, cur_rate)\r\n\t\ttime_to_add = 0.0\r\n\t\twhile True:\r\n\t\t\ttime_to_add += get_time(cost, cur_rate)\r\n\t\t\tcur_rate += increase\r\n\t\t\talt_time = get_time(target, cur_rate) + time_to_add\r\n\r\n\t\t\tif alt_time < best_time:\r\n\t\t\t\tbest_time = alt_time\r\n\t\t\telse:\r\n\t\t\t\tbreak\r\n\r\n\t\talt_time = get_time(target, 2 + increase) + cost\r\n\r\n\t\tout(i, best_time)\r\n\r\n\t\t\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2878.py","file_name":"2878.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37802762647","text":"from tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport json\n\n# access tokens for twitter\naccess_token = \"981309028490981376-YoTeZT7xidSEg1euxFbIzYshY16sVpI\"\naccess_token_secret = \"BK9P4xUY3zPgVZw6wpQxJkmjsmH4NsM8wJ2rEVOkH5Mv3\"\nconsumer_key = \"fg4pBbU44JUMRnDRbjtOmKCRR\"\nconsumer_secret = \"iN3k4V7MuA7M12rwZOvmyAfJudd4qm4mnt68kSTJuOSkfP0K7X\"\n\n\ntweets_file = None\ntoxic_key_words_file = \"../../../../resources/toxic-keywords.txt\"\nnon_toxic_key_words_file = \"../../../../resources/non-toxic-keywords.txt\"\n\n\"\"\"\n Returns toxic keywords\n\"\"\"\n\n\ndef keywords(key_words_file):\n fp = open(key_words_file, \"r\")\n words = [w.strip() for w in fp.readlines() if w.strip() is not None and len(w.strip()) > 0]\n return words\n\n\n\"\"\"\nThis is a basic listener that just prints received tweets to stdout.\n\"\"\"\n\n\nclass StdOutListener(StreamListener):\n def on_status(self, status):\n with open(tweets_file,'a',encoding=\"utf-8\") as tf:\n tf.write('\\n')\n if hasattr(status, 'retweeted_status'):\n try:\n tweet = status.retweeted_status.extended_tweet['full_text']\n tf.write(tweet.strip().replace('\\n', \" \").replace('\\r', ''))\n except:\n tweet = status.retweeted_status.text\n tf.write(tweet.strip().replace('\\n', \" \").replace('\\r', ''))\n else:\n try:\n tweet = status.extended_tweet[\"full_text\"]\n tf.write(tweet.strip().replace('\\n', \" \").replace('\\r', ''))\n except:\n tweet = status.text\n tf.write(tweet.strip().replace('\\n', \" \").replace('\\r', ''))\n return True\n\n def on_error(self, status):\n print(status)\n\n\nif __name__ == '__main__':\n # This handles Twitter authentication and the connection to Twitter Streaming API\n listener = StdOutListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, listener)\n\n # This line filter Twitter Streams to capture data\n # change file name (keywords and output file) to track the respective\n tweets_file = '../../../../resources/toxic-tweets.txt'\n stream.filter(languages=[\"en\"], track=keywords(toxic_key_words_file))\n\n","repo_name":"KavyaJampani/toxic_tweet","sub_path":"src/edu/utdallas/dc/tweetCollect.py","file_name":"tweetCollect.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39501535181","text":"class Solution:\n \"\"\"\n @param matrix, a list of lists of integers\n @param target, an integer\n @return a boolean, indicate whether matrix contains target\n \"\"\"\n def searchMatrix(self, matrix, target):\n if not matrix or not matrix[0]:\n return False\n n, m = len(matrix), len(matrix[0])\n left, right = 0, n - 1\n while left + 1 < right:\n mid = left + (right - left) / 2\n if matrix[mid][0] > target:\n right = mid - 1\n else:\n left = mid\n row = right\n if matrix[right][0] > target:\n row = left\n\n left, right = 0, m - 1\n while left + 1 < right:\n mid = left + (right - left) / 2\n if matrix[row][mid] > target:\n right = mid - 1\n else:\n left = mid\n if matrix[row][right] == target or matrix[row][left] == target:\n return True\n return False\n\n","repo_name":"jwyx3/practices","sub_path":"python/search-a-2d-matrix.py","file_name":"search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23552033031","text":"# Leon Xueliang Liu 2017\r\n\r\nwith open('B-large.in', 'r') as f:\r\n\tcontent = f.readlines()\r\n\r\nT = int(content[0]) # # of cases\r\ndata = [line.split()[0] for line in content[1:]]\r\n\r\nresult = [] # list of results\r\n\r\nfor n in range(T):\r\n\tans = []\r\n\tN = str(data[n])\r\n\tfor char in N:\r\n\t\tans.append(char)\r\n\tL = len(ans)\r\n\r\n\t# forward pass, if next # is smaller, decrement current, set all later to 9\r\n\tfor i in range(L-1):\r\n\t\tif int(ans[i]) > int(ans[i+1]):\r\n\t\t\tans[i] = str(int(ans[i])-1)\r\n\t\t\tfor j in range(i+1, L):\r\n\t\t\t\tans[j] = '9'\r\n\t\t\tbreak\r\n\r\n\t# reverse pass, if next # is bigger, decrement next, set current to 9\r\n\tfor i in range(L-1):\r\n\t\tm = int(ans[L-1-i])\r\n\t\tn = int(ans[L-2-i])\r\n\t\tif m < n:\r\n\t\t\tans[L-1-i] = '9'\r\n\t\t\tans[L-2-i] = str(n-1)\r\n\r\n\tans = int(\"\".join(ans))\r\n\tresult.append(ans)\t\t\r\n\r\n#write to output\r\nwith open('B-large.txt','w+') as f:\r\n\tfor count, ans in enumerate(result):\r\n\t\tf.write(\"Case #{}: {}\\n\".format(count+1, ans))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1243.py","file_name":"1243.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34211402822","text":"import uproot\nimport awkward as ak\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# read the ttree\ntree = uproot.open(\"myNanoProdMc2016_NANO_1.root\")[\"Events\"]\n# read the pf candidates information into akward arrays\npfcands = tree.arrays(tree.keys('PF_*') + ['nPF'], entry_start=0, entry_stop=1000)\n\n# plot the number of PF candidates per event\nplt.figure()\nplt.hist( ak.to_numpy(pfcands['nPF']), bins=50, range=(0,3000), histtype='step')\nplt.savefig(\"nPF.png\")\n\n# get the unique pdgIds\npdgIds = ak.to_numpy(ak.flatten(pfcands['PF_pdgId']))\nprint(\"unique pdgIds: \", np.unique(pdgIds))\nprint(\"unique abs(pdgIds): \", np.unique(abs(pdgIds)))\n\n# calculate the average of abs(PF_dxy) for charged pf candidates \ndxyavg_PV = ak.to_numpy(ak.mean(abs(pfcands['PF_dxy']),weight=((pfcands['PF_charge']!=0) * (pfcands['PF_puppiWeight']==1)), axis=1), allow_missing=False)\ndxyavg_PU = ak.to_numpy(ak.mean(abs(pfcands['PF_dxy']),weight=((pfcands['PF_charge']!=0) * (pfcands['PF_puppiWeight']==0)), axis=1), allow_missing=False)\n\nplt.figure()\nplt.hist(dxyavg_PU, bins=50, range=(0,1.0), histtype='step', label='Charged PU')\nplt.hist(dxyavg_PV, bins=50, range=(0,1.0), histtype='step', label='Charged PV')\nplt.legend(loc=\"upper right\")\nplt.xlabel('Average |PF_dxy| for charged particles [cm]')\nplt.savefig('PF_absdxy_charged.png')\n","repo_name":"yongbinfeng/PFCands","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73849201473","text":"from libqtile import bar\nfrom libqtile.config import Screen\nfrom qtile_extras import widget\nfrom modules.widgets import *\nfrom utils.settings import colors, two_monitors, wallpaper_main, wallpaper_sec\n\ncolor_alert = '#ee9900'\ncolor_frame = '#808080'\n\nwidget_defaults = dict(\n font=\"FiraCode Nerd Font\",\n fontsize=14,\n padding=2,\n)\n\nextension_defaults = widget_defaults.copy()\n\ndef create_bar(extra_bar = False):\n \"\"\"Create top bar, defined as function to allow duplication in other monitors\"\"\"\n return bar.Bar(\n [\n gen_separator(25,50),\n w_bar_icon,\n w_window_name,\n gen_spacer(),\n *gen_groupbox(),\n gen_spacer(),\n *((w_systray,) if not extra_bar else ()),\n gen_separator(15,50),\n vol_icon, w_vol,\n gen_separator(5,50),\n network_icon, w_network,\n gen_separator(11,50),\n clock_icon, w_clock,\n gen_separator(25,50),\n ],\n 36,\n margin=[0, 0, 4, 0],\n background=\"#000000\", opacity=0.8,\n )\n\nmain_screen_bar = create_bar()\nif two_monitors:\n secondary_screen_bar = create_bar(True)\n\nscreens = [\n Screen(\n wallpaper=wallpaper_main,\n wallpaper_mode=\"fill\",\n top=main_screen_bar,\n bottom=bar.Gap(4),\n left=bar.Gap(4),\n right=bar.Gap(4),\n ),\n]\n\nif two_monitors:\n screens.append(\n Screen(\n wallpaper=wallpaper_sec,\n wallpaper_mode=\"fill\",\n top=secondary_screen_bar,\n bottom=bar.Gap(4),\n left=bar.Gap(4),\n right=bar.Gap(4),\n ),\n )\n","repo_name":"fantasy0x1/dotfiles","sub_path":".config/qtile/modules/screens.py","file_name":"screens.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"}
+{"seq_id":"12044794007","text":"# Parser class that will consume a list of token to build the grammar\nfrom lox.token import LoxToken\nfrom lox.tokentype import TokensDic as Tk\nfrom lox.constants import LoxConstant\nfrom lox.stmt import *\nfrom lox.expr import *\nfrom lox.visitor import Visitor\nfrom lox.error import ParserError, LoxError\nfrom lox.functiontypes import FunctionType\n\nfrom typing import List\n\n\nclass Parser:\n #\n # The parser is initialized with the list of tokens to parse\n def __init__(self, tokens: LoxToken):\n self.tokens = tokens\n self.current = 0\n\n #\n # get the current token without moving forward\n def peek(self) -> LoxToken:\n return self.tokens[self.current]\n\n #\n # Get previous token\n def previous(self) -> LoxToken:\n if self.current > 0:\n return self.tokens[self.current-1]\n else:\n return None\n\n #\n # Get next token\n def next(self) -> LoxToken:\n if self.is_at_end():\n raise ParserError(\n self.peek(), \"reached end of file, cannot get next token.\")\n return self.tokens[self.current+1]\n\n #\n # Check we did not reach the end of the file\n def is_at_end(self) -> bool:\n return self.peek().type == Tk.EOF\n\n #\n # Check the token type whithout moving forward\n def check(self, ttype: str) -> bool:\n if self.is_at_end():\n return False\n return self.peek().type == ttype\n\n #\n # Check the next token type whithout moving forward\n def checknext(self, ttype: str) -> bool:\n if self.is_at_end():\n return False\n return self.next().type == ttype\n\n #\n # To check if the current token type is any of the input list.\n # If so consume the token (advance) that can be retrieved with previous()\n def match(self, *tokentypes: List[str]) -> bool:\n for t in tokentypes:\n if self.check(t):\n self.advance()\n return True\n return False\n\n #\n # Consume and return token\n def advance(self) -> LoxToken:\n if not self.is_at_end():\n self.current += 1\n return self.previous()\n\n #\n # Check if the next token is of the specified type and return it, otherwise return an error\n # moves forward if so.\n def consume(self, ttype: str, message: str) -> LoxToken:\n # print(\"--> in parserconsume: Token:\",\n # self.tokens[self.current].type, \" to compare with: \", ttype)\n if self.check(ttype):\n return self.advance()\n raise ParserError(self.peek(), message)\n\n #\n # -------------------------------------------------\n # Statement functions\n # -------------------------------------------------\n #\n def declaration(self) -> Stmt:\n try:\n # variable declaration statement\n if self.match(Tk.VAR):\n return self.vardeclaration()\n # function declaration statement\n elif self.check(Tk.FUN): # and self.checknext(Tk.IDENTIFIER):\n return self.fundeclaration()\n # class declaration statement\n elif self.match(Tk.CLASS):\n return self.classdeclaration()\n # generic statement\n else:\n return self.statement()\n\n except ParserError as err:\n print(\"error in parsing at token \" +\n str(self.previous()) + \" \" + err.message)\n return None\n # self.synchronize()\n\n def vardeclaration(self) -> Stmt:\n # Let's get the identifier\n tk_varname = self.consume(\n Tk.IDENTIFIER, \"Expect a variable identifier\")\n # Retrieve the initializing expression\n initializer = None\n if self.match(Tk.EQUAL):\n initializer = self.expression()\n # Make sure the statement has a termination statement\n self.consume(Tk.SEMICOLON, \"Expect ';' after variable declaration.\")\n return Var(tk_varname, initializer)\n\n def fundeclaration(self) -> Stmt:\n self.consume(Tk.FUN, \"Expect 'fun' for function statement.\")\n # a function statement should not be anonymous and is not a lambda\n return Function(self.functionbody(FunctionType.FUNCTION))\n\n def classdeclaration(self) -> Stmt:\n superclass = None\n name = self.consume(\n Tk.IDENTIFIER, \"Expect a class name after 'class'.\")\n if self.match(Tk.LESS):\n self.consume(\n Tk.IDENTIFIER, \"Expect a super class name after '<'.\")\n superclass = Variable(self.previous())\n self.consume(Tk.LEFT_BRACE, \"Expect a '{' name after class name.\")\n methods = []\n while not self.check(Tk.RIGHT_BRACE) and not self.is_at_end():\n methods.append(self.functionbody(FunctionType.METHOD))\n self.consume(Tk.RIGHT_BRACE, \"Expect '}' after class body.\")\n return Class(name, superclass, methods)\n\n def statement(self) -> Stmt:\n if self.match(Tk.PRINT):\n return self.printstatement()\n if self.match(Tk.LEFT_BRACE):\n return Block(self.blockstatement())\n if self.match(Tk.IF):\n return self.ifstatement()\n if self.match(Tk.WHILE):\n return self.whilestatement()\n if self.match(Tk.FOR):\n return self.forstatement()\n if self.match(Tk.BREAK):\n return self.breakstatement()\n if self.match(Tk.RETURN):\n return self.returnstatement()\n return self.expressionstatement()\n\n def ifstatement(self) -> Stmt:\n self.consume(Tk.LEFT_PAREN,\n \"expect a ( at the start of the if condition.\")\n condition = self.expression()\n self.consume(Tk.RIGHT_PAREN,\n \"expect a ) at the end of the if condition.\")\n thenbranch = self.statement()\n elsebranch = None\n if self.match(Tk.ELSE):\n elsebranch = self.statement\n return If(condition, thenbranch, elsebranch)\n\n def whilestatement(self) -> Stmt:\n self.consume(Tk.LEFT_PAREN,\n \"expect a ( at the start of the 'while' condition.\")\n condition = self.expression()\n self.consume(Tk.RIGHT_PAREN,\n \"expect a ) at the end of the 'while' condition.\")\n body = self.statement()\n return While(condition, body)\n\n def forstatement(self) -> Stmt:\n self.consume(Tk.LEFT_PAREN, \"expect a ( after a 'for'.\")\n initializer = None\n if self.match(Tk.VAR):\n initializer = self.vardeclaration()\n elif not self.match(Tk.SEMICOLON):\n initializer = self.expressionstatement()\n condition = None\n if not self.check(Tk.SEMICOLON):\n condition = self.expression()\n self.consume(Tk.SEMICOLON, \"expect a ; after 'for' condition.\")\n increment = None\n if not self.check(Tk.RIGHT_PAREN):\n increment = self.expression()\n self.consume(Tk.RIGHT_PAREN,\n \"expect a ) at the end of the 'for' increment.\")\n body = self.statement()\n # Build the equivalent while loop\n # --> first create a while loop with condition + (body, increment)\n body = Block([body, Expression(increment)])\n if condition is None:\n condition = Literal(True)\n body = While(condition, body)\n # --> add the initializer\n if initializer is not None:\n body = Block([initializer, body])\n return body\n\n def blockstatement(self) -> List[Stmt]:\n statements = []\n while not self.is_at_end() and not self.check(Tk.RIGHT_BRACE):\n statements.append(self.declaration())\n self.consume(Tk.RIGHT_BRACE, \"expect } at the end of block.\")\n return statements\n\n def breakstatement(self) -> Stmt:\n self.consume(Tk.SEMICOLON, \"expect a ';' after a 'break'.\")\n return Break(self.previous())\n\n # def varstatement(self) -> Stmt:\n # try:\n # if self.match(Tk.VAR):\n # return self.vardeclaration()\n # return self.statement()\n # except ParserError as err:\n # pass\n # # self.synchronize(err)\n\n def printstatement(self) -> Stmt:\n value = self.expression()\n # consume the semicolon that should be at the end of the statement\n self.consume(Tk.SEMICOLON, \"expecting a ';' at the end of the line.\")\n return Print(value)\n\n def returnstatement(self) -> Stmt:\n keyword = self.previous()\n value = None\n if not self.check(Tk.SEMICOLON):\n value = self.expression()\n self.consume(\n Tk.SEMICOLON, \"expect a ';' at the end of the 'return' statement.\")\n return Return(keyword, value)\n\n def expressionstatement(self) -> Stmt:\n # Retrieve the expression in the statement\n expr = self.expression()\n # Consume the ; token\n self.consume(Tk.SEMICOLON, \"expecting a ; at the end of the line\")\n return Expression(expr)\n #\n # --------------------------------------\n # Expressions functions\n # --------------------------------------\n #\n\n def expression(self) -> Expr:\n return self.assignment()\n\n def assignment(self) -> Expr:\n # Consider the possible left hand side of the assignment as any expression\n expr = self.logic_or()\n # if we match the = we know it is a candidate for assignment\n if self.match(Tk.EQUAL):\n tk_equals = self.previous()\n # assignment are right associative, so we neeed to parse them right recursively\n right = self.assignment()\n # if the l-value is a possible variable, return the assignment\n if isinstance(expr, Variable):\n return Assign(expr.name, right)\n # we can also assign instances property\n elif isinstance(expr, Get):\n return Set(expr.getobject, expr.name, right)\n # we do not have a variable like, error\n raise ParserError(tk_equals, \"invalid assignment target\")\n # the expression is not an assignment\n return expr\n\n def logic_or(self) -> Expr:\n expr = self.logic_and()\n while (self.match(Tk.OR)):\n logic_op = self.previous()\n right = self.logic_and()\n expr = Logical(expr, logic_op, right)\n return expr\n\n def logic_and(self) -> Expr:\n expr = self.equality()\n while (self.match(Tk.AND)):\n logic_op = self.previous()\n right = self.equality()\n expr = Logical(expr, logic_op, right)\n return expr\n\n def equality(self) -> Expr:\n expr = self.comparison()\n\n while self.match(Tk.EQUAL_EQUAL, Tk.BANG_EQUAL):\n operator = self.previous()\n right = self.comparison()\n expr = Binary(expr, operator, right)\n return expr\n\n def comparison(self) -> Expr:\n expr = self.addition()\n\n while self.match(Tk.GREATER_EQUAL, Tk.GREATER,\n Tk.LESS, Tk.LESS_EQUAL):\n operator = self.previous()\n right = self.addition()\n expr = Binary(expr, operator, right)\n return expr\n\n def addition(self) -> Expr:\n expr = self.multiplication()\n\n while self.match(Tk.PLUS, Tk.MINUS):\n operator = self.previous()\n right = self.multiplication()\n expr = Binary(expr, operator, right)\n return expr\n\n def multiplication(self) -> Expr:\n expr = self.unary()\n\n while self.match(Tk.SLASH, Tk.STAR):\n operator = self.previous()\n right = self.unary()\n expr = Binary(expr, operator, right)\n return expr\n\n def unary(self) -> Expr:\n # this time we have a right associative operator\n while self.match(Tk.MINUS, Tk.BANG):\n operator = self.previous()\n right = self.unary()\n # we loop until all operators are consumed\n return Unary(operator, right)\n # no more operator, we have a call expr\n return self.call()\n\n def call(self) -> Expr:\n expr = self.primary()\n # Let's parse function call (passing the callee) as long as we have paranthesis\n while True:\n if self.match(Tk.LEFT_PAREN):\n expr = self.finishcall(expr)\n elif self.match(Tk.DOT):\n name = self.consume(\n Tk.IDENTIFIER, \"Expect a property name after the '.'\")\n expr = Get(expr, name)\n else:\n break\n return expr\n\n def finishcall(self, callee: Expr) -> Expr:\n arguments = []\n if not self.check(Tk.RIGHT_PAREN):\n while \"let's parse arguments as long as we have commas\":\n arguments.append(self.expression())\n if not self.match(Tk.COMMA):\n break\n if len(arguments) > LoxConstant.max_param:\n LoxError.error(self.peek(),\n \"function cannot have more than 8 arguments\")\n call_left_paren = self.consume(\n Tk.RIGHT_PAREN, \"expect a ')' at the end of a function call.\")\n return Call(callee, call_left_paren, arguments)\n\n def functionbody(self, kind: FunctionType):\n funcid = None\n functiontype = kind\n if kind is FunctionType.FUNCTION:\n funcid = self.consume(\n Tk.IDENTIFIER, \"Expect a function name after 'fun'.\")\n self.consume(Tk.LEFT_PAREN, \"expect a '(' after function name.\")\n elif kind is FunctionType.LAMBDA:\n self.consume(Tk.LEFT_PAREN, \"expect a '(' after 'fun' for lambda.\")\n elif kind is FunctionType.METHOD:\n funcid = self.consume(Tk.IDENTIFIER, \"Expect a method name.\")\n if funcid is LoxConstant.init_method:\n functiontype = FunctionType.INIT\n self.consume(Tk.LEFT_PAREN, \"expect a '(' after method name.\")\n else:\n raise ParserError(\n self.previous(), \"Unexpected function type in parser.\")\n parameters = []\n # All parameters should be identifiers\n if not self.check(Tk.RIGHT_PAREN):\n while \"we have comma after the parameter\":\n parameters.append(\n self.consume(Tk.IDENTIFIER, \"expect identifiers as function parameters.\"))\n if len(parameters) > LoxConstant.max_param:\n LoxError.error(self.previous(\n ), \"function can take at most \" + LoxConstant.max_param + \" parameters.\")\n if not self.match(Tk.COMMA):\n break\n self.consume(Tk.RIGHT_PAREN,\n \"expect a ) at the end of the function parameters.\")\n # Parse the body of the function\n self.consume(\n Tk.LEFT_BRACE, \"expect a { after function parameters list.\")\n body = self.blockstatement()\n return FunctionExp(funcid, parameters, body, functiontype)\n\n def primary(self) -> Expr:\n if self.match(Tk.FALSE):\n return Literal(False)\n\n if self.match(Tk.TRUE):\n return Literal(True)\n\n if self.match(Tk.NIL):\n return Literal(None)\n\n if self.match(Tk.NUMBER, Tk.STRING):\n return Literal(self.previous().literal)\n\n if self.match(Tk.IDENTIFIER):\n return Variable(self.previous())\n\n # I consider function definition in expression as lambda. Open to discussion\n if self.match(Tk.FUN):\n return self.functionbody(FunctionType.LAMBDA)\n\n if self.match(Tk.THIS):\n return This(self.previous())\n\n if self.match(Tk.SUPER):\n keyword = self.previous()\n self.consume(Tk.DOT, \"Expect a '.' after 'super'.\")\n method = self.consume(\n Tk.IDENTIFIER, \"Expect superclass method name after 'super.'.\")\n return Super(keyword, method)\n\n if self.match(Tk.LEFT_PAREN):\n expr = self.expression()\n self.consume(Tk.RIGHT_PAREN, \"Expect ')' after expr.\")\n return Grouping(expr)\n\n raise ParserError(self.peek(), \"expecting an expr.\")\n #\n # Parsing the list of statements\n\n def parse(self) -> List[Stmt]:\n try:\n statements = []\n while not self.is_at_end():\n statements.append(self.declaration())\n\n return statements\n except ParserError as e:\n print(e.message)\n","repo_name":"marcjourneux/pylox","sub_path":"lox/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":16570,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"}
+{"seq_id":"43144010380","text":"'''\nLicensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nTests some different ways to Bootstrap nodes. \n(Note: The APITest does some of this type also!)\n\nCreated on Oct 17, 2014\n\n@author: dfleck\n'''\n\nfrom twisted.trial import unittest\nfrom twisted.internet import task, defer, reactor\nfrom twisted.python import log, failure\n\n\nfrom gmu.chord import NetworkUtils, Config\nfrom gmu.chord.NodeLocation import NodeLocation\nfrom gmu.chord.MetricsMessageObserver import MetricsMessageObserver\n\nfrom gmu.netclient.classChordNetworkChord import classChordNetworkChord\n\n# Testing Modules\nfrom ConnectivityCounter import ConnectivityCounter\nfrom SampleClient import SampleClient\nimport TestUtils\nimport sys, random\nfrom TestMessageObserver import TestMessageObserver\n\n\nnumNodes = 5 # Number of nodes per enclave\nnumMessages=5 # Total number of messages to send to each node\nstartingPort = 12350\n\n\nclass BootstrapTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(BootstrapTests, cls).setUpClass()\n BootstrapTests.logObs = log.startLogging(sys.stdout)\n \n \n @classmethod\n def tearDownClass(cls):\n super(BootstrapTests, cls).tearDownClass()\n if BootstrapTests.logObs is not None:\n BootstrapTests.logObs.stop()\n \n def setUp(self):\n '''Start the reactor so we don't have to do it in the nodes.'''\n\n # Turn ON warning for this test!\n Config.WARN_NO_MESSAGE_AUTHENTICATOR = True\n Config.ALLOW_NO_AUTHENTICATOR = False\n\n # This is the IP of the node. Note: This MUST be \n # an external ID or the code won't work!\n self.myIP = NetworkUtils.getNonLoopbackIP (None, None)\n log.msg('Got IP: %s:%s' % (self.myIP, type(self.myIP)))\n\n return\n \n \n \n\n\n \n \n @defer.inlineCallbacks\n def testReBootstrap(self):\n '''Create a BS node, then a client node, then kill the BS Node, wait, restart a BS node and \n check for re-Bootstrap\n '''\n global startingPort\n \n # Create Bootstrap\n port = 12345\n bootstrapNodeLocation = NodeLocation(None, self.myIP, port)\n self.allNodes = []\n self.allMetricsObservers = []\n self.allTestObservers = []\n\n # Build the BS node\n log.msg(\"building BS node...\")\n (status, bsClientAPI, bsNetworkAPI) = yield TestUtils.startNodeUsingAPI(self.myIP, port, None, 'theEnclave', False, True)\n self.assertTrue(status, 'Could not build bootstrap node')\n \n # Build the client node\n log.msg(\"building client node...\")\n (status, clClientAPI, clNetworkAPI) = yield TestUtils.startNodeUsingAPI(self.myIP, port+1, bootstrapNodeLocation, 'theEnclave', False, False)\n self.assertTrue(status, 'Could not build client node')\n \n # Check that the client is connected to something\n connected = yield clNetworkAPI.isConnected()\n self.assertTrue(connected, \"Client did not connect to the bootstrap node in testReBootstrap!\")\n \n # Now kill the BS node\n yield bsNetworkAPI.disconnect()\n bsNetworkAPI = None\n bsClientAPI = None\n \n # Gotta wait for disconnect to really finish\n yield TestUtils.waitForConnectionCache()\n \n # Check that the client is connected to something\n connected = yield clNetworkAPI.isConnected()\n self.assertTrue(not connected, \"Client remains connected to the bootstrap node in testReBootstrap after killing BS node!\")\n \n # Now startup another bootstrap node\n log.msg(\"building BS node...\")\n (status, bsClientAPI, bsNetworkAPI) = yield TestUtils.startNodeUsingAPI(self.myIP, port, None, 'theEnclave', False, True)\n self.assertTrue(status, 'Could not build second bootstrap node')\n \n # Wait for it to connect or fail -- basically waiting for the\n # maintenance call to run correctly.\n for _ in range(10):\n # Check that the client is connected to something\n connected = yield clNetworkAPI.isConnected()\n if connected:\n break\n yield TestUtils.wait(1)\n \n self.assertTrue(connected, \"Client could not re-bootstrap!\")\n\n \n # Now shut everything down\n \n log.msg(\"\\n\\ntestReBootstrap: Shutting down now...\\n\\n\")\n yield clNetworkAPI.disconnect()\n yield bsNetworkAPI.disconnect()\n\n if Config.USE_CONNECTION_CACHE:\n yield TestUtils.waitForConnectionCache()\n else: \n yield TestUtils.wait(5)\n\n defer.returnValue(True)\n \n \n @defer.inlineCallbacks\n def testReBootstrapNewNodeLoc(self):\n '''Create a BS node, then a client node, then kill the BS Node, wait, restart a BS node at a different location and \n check for re-Bootstrap\n '''\n global startingPort\n \n # Create Bootstrap\n port = 12345\n bootstrapNodeLocation = NodeLocation(None, self.myIP, port)\n self.allNodes = []\n self.allMetricsObservers = []\n self.allTestObservers = []\n\n # Build the BS node\n log.msg(\"building BS node...\")\n (status, bsClientAPI, bsNetworkAPI) = yield TestUtils.startNodeUsingAPI(self.myIP, port, None, 'theEnclave', False, True)\n self.assertTrue(status, 'Could not build bootstrap node')\n \n # Build the client node\n log.msg(\"building client node...\")\n (status, clClientAPI, clNetworkAPI) = yield TestUtils.startNodeUsingAPI(self.myIP, port+1, bootstrapNodeLocation, 'theEnclave', True, False)\n self.assertTrue(status, 'Could not build client node')\n \n # Check that the client is connected to something\n connected = yield clNetworkAPI.isConnected()\n self.assertTrue(connected, \"Client did not connect to the bootstrap node in testReBootstrap!\")\n \n # Now kill the BS node\n yield bsNetworkAPI.disconnect()\n bsNetworkAPI = None\n bsClientAPI = None\n \n \n # Gotta wait for network timeout disconnect to really finish.\n yield TestUtils.wait(30)\n \n \n # Check that the client is connected to something\n connected = yield clNetworkAPI.isConnected()\n self.assertTrue(not connected, \"Client remains connected to the bootstrap node in testReBootstrap after killing BS node!\")\n \n # Now startup another bootstrap node\n log.msg(\"building BS node...\")\n (status, bsClientAPI, bsNetworkAPI) = yield TestUtils.startNodeUsingAPI(self.myIP, port+3, None, 'theEnclave', False, True)\n self.assertTrue(status, 'Could not build second bootstrap node')\n \n # Wait for it to connect or fail -- basically waiting for the\n # maintenance call to run correctly.\n for _ in range(30):\n # Check that the client is connected to something\n connected = yield clNetworkAPI.isConnected()\n if connected:\n break\n yield TestUtils.wait(1)\n \n\n \n # Now shut everything down\n \n log.msg(\"\\n\\ntestReBootstrap: Shutting down now...\\n\\n\")\n yield clNetworkAPI.disconnect()\n yield bsNetworkAPI.disconnect()\n \n if Config.USE_CONNECTION_CACHE:\n yield TestUtils.waitForConnectionCache()\n else: \n yield TestUtils.wait(5)\n \n \n # This is the final question\n self.assertTrue(connected, \"Client could not re-bootstrap!\")\n\n defer.returnValue(True)\n \n \n @defer.inlineCallbacks\n def testDisconnectedBootstraps(self):\n '''Create a BS node and some clients. Create another bootstrap node and some clients (so we essentially have two rings). \n Verify, that the bootstrap nodes autodiscover each other and connect together \n '''\n global startingPort\n \n # Create Bootstrap\n port = 12345\n bootstrapNodeLocation = NodeLocation(None, self.myIP, port)\n bootstrapNodeLocation2 = NodeLocation(None, self.myIP, port+1)\n self.allNodes = []\n self.allMetricsObservers = []\n self.allTestObservers = []\n\n # Build the BS node\n (status, bsClientAPI, bsNetworkAPI) = yield TestUtils.startNodeUsingAPI(bootstrapNodeLocation.ip, bootstrapNodeLocation.port, None, 'theEnclave', True, True)\n self.allMetricsObservers.append(MetricsMessageObserver(bsNetworkAPI.chordNode))\n self.assertTrue(status, 'Could not build bootstrap node')\n\n # Build second BS node\n (status, bsClientAPI2, bsNetworkAPI2) = yield TestUtils.startNodeUsingAPI(bootstrapNodeLocation2.ip, bootstrapNodeLocation2.port, None, 'theEnclave', True, True)\n self.allMetricsObservers.append(MetricsMessageObserver(bsNetworkAPI2.chordNode))\n self.assertTrue(status, 'Could not build bootstrap node 2')\n\n \n # Build the client node\n (status, clClientAPI, clNetworkAPI) = yield TestUtils.startNodeUsingAPI(self.myIP, port+2, bootstrapNodeLocation, 'theEnclave', False, False)\n self.allMetricsObservers.append(MetricsMessageObserver(clNetworkAPI.chordNode)) \n self.assertTrue(status, 'Could not build client node')\n \n # Build the client node\n (status, clClientAPI2, clNetworkAPI2) = yield TestUtils.startNodeUsingAPI(self.myIP, port+3, bootstrapNodeLocation2, 'theEnclave', False, False)\n self.allMetricsObservers.append(MetricsMessageObserver(clNetworkAPI2.chordNode))\n self.assertTrue(status, 'Could not build client node')\n\n\n # Wait for flooding to reach all the nodes\n waiter = ConnectivityCounter()\n yield waiter.waitForConnectivity(3, clNetworkAPI.chordNode) # Does not count clNode itself.\n \n \n # Now shut everything down\n yield clNetworkAPI.disconnect()\n yield clNetworkAPI2.disconnect()\n yield bsNetworkAPI.disconnect()\n yield bsNetworkAPI2.disconnect()\n \n if Config.USE_CONNECTION_CACHE:\n yield TestUtils.waitForConnectionCache()\n else: \n yield TestUtils.wait(5)\n \n defer.returnValue(True)\n \n \n \n \n \n \n ","repo_name":"danfleck/Class-Chord","sub_path":"network-client/src/tests/BootstrapTests.py","file_name":"BootstrapTests.py","file_ext":"py","file_size_in_byte":10560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28533466495","text":"import sqlite3\nimport time\nfrom flask import Flask, jsonify, request, abort\nfrom argparse import ArgumentParser\n\n\nDB = 'db.sqlite'\n\n \napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"\"\"It works!
\"\"\"\n\n@app.route('/api/login', methods=['POST'])\ndef login():\n if not request.json:\n abort(404)\n \n user_id = fetch_user_id(request.json[\"email\"], request.json[\"password\"])\n\n session_id = int(round(time.time() * 1000))\n changeData(\"\"\"\n INSERT INTO session (id, user_id)\n VALUES(?,?)\n \"\"\", (session_id, user_id))\n\n return jsonify({\n \"matching_user_id\": user_id,\n \"session_id\": session_id\n }), 200\n\n@app.route('/api/logout', methods=['POST'])\ndef logout():\n if not request.json:\n abort(404)\n \n return jsonify(changeData(\"\"\"\n DELETE FROM session\n WHERE id = ?\n \"\"\", (request.json[\"session_id\"],))\n ) , 200\n\n\n@app.route('/api/admin/see_table/', methods=['GET'])\ndef admin_get_table(table):\n parser = {\n 'user': parseUser,\n 'task': parseTask,\n 'reminder': parseReminder\n }\n data = fetchData(parser[table], f'SELECT * FROM {table}')\n return jsonify(data), 200\n\n \n@app.route('/api/task/', methods=['GET'])\ndef retrieve_task(session_id):\n user_id = fetch_user_id_using_session(session_id)\n tasks = fetch_task(user_id)\n for task in tasks:\n task[\"reminder\"] = fetch_reminder(task[\"id\"])\n return jsonify(tasks), 200\n\n@app.route('/api/task', methods=['POST'])\ndef create_task():\n if not request.json:\n abort(404)\n \n user_id = fetch_user_id_using_session(request.json[\"session_id\"])\n\n new_task = (\n user_id,\n request.json['title'],\n request.json['content'],\n request.json['pinned']\n )\n \n response = changeData(\"\"\"\n INSERT INTO task (user_id,title,content,pinned)\n VALUES(?,?,?,?)\n \"\"\", new_task)\n\n task_id = response[\"id\"]\n\n # Insert reminders\n reminders = request.json[\"reminders\"]\n for r in reminders:\n changeData(\"\"\"\n INSERT INTO reminder(task_id, date)\n VALUES(?,?)\n \"\"\", (task_id, r[\"date\"]))\n\n return jsonify(response), 200 \n\n@app.route('/api/task', methods=['DELETE'])\ndef delete_task():\n if not request.json:\n abort(404)\n \n if not fetch_user_id_using_session(request.json[\"session_id\"]):\n abort(404)\n\n response = changeData(\"\"\"\n DELETE FROM task WHERE id=?\n \"\"\", (request.json['task_id'],))\n \n return jsonify(response), 200\n\n@app.route('/api/task', methods=['PUT'])\ndef update_task():\n print(request.json[\"session_id\"])\n if not fetch_user_id_using_session(request.json[\"session_id\"]):\n abort(404)\n\n new_task = (\n request.json['title'],\n request.json['content'],\n request.json['pinned'],\n request.json['task_id'],\n )\n response = changeData(\"\"\"\n UPDATE task \n SET\n title = ?,\n content = ?,\n pinned = ?\n WHERE id = ?\n \"\"\", new_task)\n\n\n task_id = request.json['task_id']\n\n # Delete related reminders\n changeData(\"\"\"\n DELETE FROM reminder WHERE task_id=?\n \"\"\", (task_id,))\n\n # Update reminders\n reminders = request.json[\"reminders\"]\n\n for r in reminders:\n changeData(\"\"\"\n INSERT INTO reminder(task_id, date)\n VALUES(?,?)\n \"\"\", (task_id, r[\"date\"]))\n\n return jsonify(response), 200 \n\n\ndef fetch_reminder(task_id):\n return fetchData(parseReminder, \n \"\"\"\n SELECT * FROM reminder\n WHERE task_id = ?\n \"\"\", (task_id,))\n \ndef fetch_task(user_id):\n return fetchData(parseTask, \n \"\"\"\n SELECT * FROM task\n WHERE user_id = ?\n \"\"\", (user_id,))\n\ndef fetch_user_id_using_session(session_id):\n result = fetchData(parseSession,\n \"\"\"\n SELECT * FROM session\n WHERE id = ?\n \"\"\", (session_id,))\n\n if len(result) > 0:\n return result[0][\"user_id\"]\n else:\n return None\n\ndef fetch_user_id(email, password):\n result = fetchData(parseUser, \n \"\"\"\n SELECT * FROM user\n WHERE email = ?\n AND password = ?\n \"\"\", (email,password))\n \n if len(result) > 0:\n return result[0][\"id\"]\n else:\n return None\n\n\ndef parseSession(row):\n return {\n 'id': row[0],\n 'user_id': row[1],\n }\n\ndef parseUser(row):\n return {\n 'id': row[0],\n 'email': row[1],\n }\n \ndef parseTask(row):\n return {\n 'id': row[0],\n 'user_id': row[1],\n 'title': row[2],\n 'content': row[3],\n 'pinned': row[4],\n }\n\ndef parseReminder(row):\n return {\n 'id': row[0],\n 'task_id': row[1],\n 'date': row[2],\n }\n \ndef fetchData(parser, query, queryParam=None):\n db = sqlite3.connect(DB)\n cursor = db.cursor()\n if queryParam:\n cursor.execute(query, queryParam)\n else:\n cursor.execute(query)\n rows = cursor.fetchall()\n db.close()\n result = []\n for row in rows:\n result.append(parser(row))\n return result\n\n# Change means : INSERT, UPDATE or DELETE\ndef changeData(query, queryParam):\n db = sqlite3.connect(DB)\n cursor = db.cursor()\n cursor.execute(query, queryParam)\n id = cursor.lastrowid\n db.commit()\n response = {\n 'id': id,\n 'affected': db.total_changes,\n }\n db.close()\n return response\n \n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')\n args = parser.parse_args()\n port = args.port\n\n app.run(host='0.0.0.0', port=port)\n","repo_name":"wongjiahau/wireless-app-assignment-backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29495187222","text":"import unittest\nfrom rnamake import sqlite_library, settings, motif, residue_type, util, motif_state_tree\nfrom rnamake.unittests import build\n\n\nclass BasicLibrariesUnittests(unittest.TestCase):\n\n def setUp(self):\n self.rts = residue_type.ResidueTypeSet()\n\n path = settings.RESOURCES_PATH + \"/motifs/base.motif\"\n self.base_motif = motif.file_to_motif(path)\n self.added_motif = motif.file_to_motif(path)\n\n self.base_ms = self.base_motif.get_state()\n self.added_ms = self.added_motif.get_state()\n\n def _test_correct_build(self, mlib, ms_lib):\n\n for m in mlib.all():\n m1 = motif.get_aligned_motif(self.base_motif.ends[1], m.ends[0], m)\n m2 = motif.get_aligned_motif(m1.ends[1],\n self.added_motif.ends[0],\n self.added_motif)\n\n ms = ms_lib.get(name=m.name,\n end_name=m.ends[0].name(),\n end_id=m.end_ids[0])\n\n ms1 = motif.get_aligned_motif_state_single(self.base_ms.end_states[1], ms)\n ms2 = motif.get_aligned_motif_state_single(ms1.end_states[1], self.added_ms)\n\n #print m2.ends[1].d()\n #print self.added_ms.end_states[1].d\n diff = ms2.end_states[1].diff(m2.ends[1].state())\n if diff > 0.01:\n self.fail(m.name + \" did not give the same answer as its state\")\n\n\n def test_correct_build_twoway(self):\n mlib = sqlite_library.MotifSqliteLibrary(\"twoway\")\n mlib.load_all()\n ms_lib = sqlite_library.MotifStateSqliteLibrary(\"twoway\")\n ms_lib.load_all()\n self._test_correct_build(mlib, ms_lib)\n\n def test_correct_build_nway(self):\n mlib = sqlite_library.MotifSqliteLibrary(\"nway\")\n mlib.load_all()\n ms_lib = sqlite_library.MotifStateSqliteLibrary(\"nway\")\n ms_lib.load_all()\n self._test_correct_build(mlib, ms_lib)\n\n\nclass LargeBuildUnittests(unittest.TestCase):\n\n def test_large_random_builds(self):\n for i in range(100):\n builder = build.BuildMotifTree()\n mt = builder.build(10)\n\n mst = motif_state_tree.MotifStateTree(mt=mt)\n mt_end= mt.last_node().data.ends[1].state()\n mst_end = mst.last_node().data.cur_state.end_states[1]\n\n diff = mst_end.diff(mt_end)\n if diff > 0.1:\n self.fail(\" did not give the same answer as its state\")\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()","repo_name":"zhuoyuzhang/RNAMake","sub_path":"rnamake/unittests/intergration/motif_compared_to_ms.py","file_name":"motif_compared_to_ms.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20055595116","text":"from DB import DataBase\nfrom tkinter import END, ACTIVE, filedialog\nfrom copy import copy\nfrom PIL import ImageTk, Image\nfrom sqlite3 import IntegrityError\nfrom Tagger import Tagger\nfrom collections.abc import Iterable\nfrom os.path import isdir, join\nfrom threading import Thread\n\n\nclass Intermediary:\n\n def __init__(self, ui):\n\n self.ui = ui\n self.any = True\n self.queue = None\n self.curr_img = 0\n\n def confirmed(self, event):\n\n #Listbox items\n tags = self.ui.builder.get_object(\"ListSelected\").get(0, END)\n# Unique images' ids\n res = set()\n for tg in tags:\n ids = DataBase.get_tagged_items(tg)\n #TODO maybe sth better if nth found\n if len(ids) < 1:\n return\n# Add else intersection\n if self.any:\n res |= set(ids)\n else:\n res &= set(ids)\n\n lt = list()\n for r in res:\n lt.append(DataBase.get_path(r))\n self.queue_images(lt)\n\n def clear(self, event):\n \"\"\"Clears tags listbox\"\"\"\n\n #Listbox clearing\n self.ui.builder.get_object(\"ListSelected\").delete(0, END)\n\n def list_tag(self, event):\n \"\"\"Add tag to listbox ListSelected\"\"\"\n\n eadd = self.ui.builder.get_object(\"EAdd\")\n val = eadd.get()\n eadd.delete(0, END)\n\n self.ui.builder.get_object(\"ListSelected\").insert(END, val)\n\n def remove_tag(self, event):\n \"\"\"Remove tag from listbox ListSelected\"\"\"\n\n event.widget.delete(ACTIVE)\n\n def rany(self, event):\n \"\"\"Changes search method\"\"\"\n\n var = self.ui.builder.get_variable(\"VarAny\").get()\n if var != \"Any\":\n self.any = True\n else:\n self.any = False\n\n def queue_images(self, ids):\n \"\"\"Queue images to display\"\"\"\n\n# Converting to list from different types of arguments\n if isinstance(ids, str):\n ids = [ids]\n elif isinstance(ids, Iterable):\n ids = list(ids)\n\n self.queue = copy(ids)\n self.curr_img = -1\n self.list_queue()\n self.show_image()\n\n def show_image(self, pth=None):\n \"\"\"\n Display image, if no argument is present get from queue. Called automatically when queue changes\n :arg pth: Path to image\n \"\"\"\n\n# Next image from queue\n if pth is None:\n self.curr_img += 1\n self.curr_img %= len(self.queue)\n pth = self.queue[self.curr_img]\n else:\n self.queue_images(list(pth))\n\n# Wrong path\n if pth is None:\n return\n\n# Prepare image\n img = Image.open(pth)\n\n if img.width > 800 or img.height > 450:\n factor = min(800/img.width, 450/img.height)\n img = img.resize((int(img.width*factor), int(img.height*factor)))\n\n img = ImageTk.PhotoImage(img)\n\n# Display image\n label = self.ui.builder.get_object(\"LImage\")\n label.config(image=img)\n label.image = img\n\n# Mark current image in listbox\n lb = self.ui.builder.get_object(\"ListResults\")\n lb.selection_clear(0, END)\n lb.selection_set(self.curr_img)\n\n# List tags\n self.list_image_tags()\n\n def list_queue(self):\n \"\"\"Called by queue_images. Lists queued paths in ListResults listbox\"\"\"\n\n lb = self.ui.builder.get_object(\"ListResults\")\n self.clear_results()\n for pth in self.queue:\n lb.insert(END, pth.split(sep=\"\\\\\")[-1])\n\n def list_image_tags(self):\n \"\"\"Called by show image. Adds current tags to ListTags listbox\"\"\"\n\n tags = DataBase.get_image_tags(pth=self.queue[self.curr_img])\n lt = self.ui.builder.get_object(\"ListTags\")\n self.clear_tags()\n for tag in tags:\n lt.insert(END, tag)\n\n def clear_results(self):\n\n self.ui.builder.get_object(\"ListResults\").delete(0, END)\n\n def clear_tags(self):\n\n self.ui.builder.get_object(\"ListTags\").delete(0, END)\n\n def path_input(self, pth):\n \"\"\"Handle request for new input file. If new tag and display else display\"\"\"\n\n self.clear_results()\n tfiles = None\n\n# Single image path\n if not isdir(pth):\n new = True\n# SQL exception if path is not unique\n try:\n DataBase.add_image(pth)\n except IntegrityError:\n new = False\n\n# Tag new\n if new:\n tags = Tagger.tag_file(pth)\n for tag in tags:\n DataBase.tag_image(tag, pth=pth)\n# Directory path\n else:\n tags, tfiles = Tagger.tag_dir(pth)\n for i in range(len(tfiles)):\n f = tfiles[i]\n# Full path to image\n fpth = join(pth, f)\n tfiles[i] = fpth\n# Continue if already present\n if DataBase.exists(pth=fpth):\n continue\n else:\n DataBase.add_image(fpth)\n# Tuple results\n if not isinstance(tags[i], str):\n for t in tags[i]:\n DataBase.tag_image(t, pth=fpth)\n# String result\n else:\n DataBase.tag_image(tags[i], pth=fpth)\n\n L = 1\n# Display\n if tfiles is None:\n self.queue_images(pth)\n else:\n #Number of listbox for results length\n L = len(tfiles)\n nlb = max(3, L)\n nlb = min(nlb, 12)\n self.ui.builder.get_object(\"ListResults\").config(height=nlb)\n\n self.queue_images(tfiles)\n\n self.update_info(\"Processed \" + str(L) + \" images\")\n\n def choose_dir(self, event):\n\n directory = filedialog.askdirectory()\n self.path_input(directory)\n\n def choose_file(self, event):\n\n file = filedialog.askopenfilename(title=\"Select file\", filetypes=((\"jpeg files\", \"*.jpg\"),\n (\"jpeg files\", \"*.jpeg\")))\n self.path_input(file)\n\n def next_image(self, event):\n\n self.show_image()\n\n def prev_image(self, event):\n\n self.curr_img -= 2\n if self.curr_img < 0:\n self.curr_img = -1\n self.show_image()\n\n def listbox_image(self, event):\n\n idx = event.widget.index(ACTIVE)\n self.curr_img = idx - 1\n self.show_image()\n\n def update_info(self, info=\" \"):\n\n linfo = self.ui.builder.get_object(\"LInfo\")\n linfo.configure(text=info)\n","repo_name":"Maciej-R/CV_Object_Detection","sub_path":"Intermediary.py","file_name":"Intermediary.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15650192941","text":"# otvoriti romeo.txt file\nfhand = open('romeo.txt')\n\n# ucitati sve rijeci u dict i prikazati koliko puta se rijec ponavlja\ncounts = dict()\n\nfor line in fhand:\n words = line.split()\n # varijabla words je objekt list sastavljen od stringova iz te linije\n for word in words:\n counts[word] = counts.get(word, 1) + 1\n\n# da bi vrijednosti u dictionary sortirali, moramo od njega napraviti listu\n\nprint ( sorted( [ (v,k) for k,v in counts.items() ] ) )\n'''\n[(1, 'b'), (10,'b'), (22, 'c')]\n'''\n","repo_name":"thecodereule/exercises","sub_path":"Tuples2.py","file_name":"Tuples2.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"hr","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"11775942028","text":"import numpy as np\nimport util\nimport sys\nfrom random import random\n\nsys.path.append('../linearclass')\n\n# NOTE : You need to complete logreg implementation first!\n\nfrom logreg_loop_version import LogisticRegression\n\n# Character to replace with sub-problem letter in plot_path/save_path\nWILDCARD = 'X'\n# Ratio of class 0 to class 1\nkappa = 0.1\n\n\ndef main(train_path, validation_path, save_path):\n \"\"\"Problem 2: Logistic regression for imbalanced labels.\n\n Run under the following conditions:\n 1. naive logistic regression\n 2. upsampling minority class\n\n Args:\n train_path: Path to CSV file containing training set.\n validation_path: Path to CSV file containing validation set.\n save_path: Path to save predictions.\n \"\"\"\n output_path_naive = save_path.replace(WILDCARD, 'naive')\n output_path_upsampling = save_path.replace(WILDCARD, 'upsampling')\n\n # *** START CODE HERE ***\n # Part (b): Vanilla logistic regression\n # Make sure to save predicted probabilities to output_path_naive using np.savetxt()\n\n x_train, y_train = util.load_dataset(train_path, add_intercept=True)\n x_valid, y_valid = util.load_dataset(validation_path, add_intercept=True)\n y_train = np.expand_dims(y_train, 1)\n #\"\"\"\n vanilla = LogisticRegression()\n vanilla.fit(x_train, y_train)\n y_prediction = vanilla.predict(x_valid)\n np.savetxt(output_path_naive, y_prediction)\n util.plot(x_valid, y_valid, vanilla.theta, '/Users/cindyxu/Documents/cs229/assignments/ps1/src/imbalanced/vanilla.png')\n return confusionMatrix(y_valid, y_prediction)\n #\"\"\"\n # Part (d): Upsampling minority class\n # Make sure to save predicted probabilities to output_path_upsampling using np.savetxt()\n # Repeat minority examples 1 / kappa times\n\n \"\"\"\n m = x_train.shape[0]\n for i in range(m):\n if y_train[i, 0] == 1.0:\n for j in range(int(1/kappa)):\n x_train = np.vstack((x_train, x_train[i, :]))\n y_train = np.vstack((y_train, y_train[i, :]))\n\n upsampling = LogisticRegression()\n upsampling.fit(x_train, y_train)\n y_prediction_up = upsampling.predict(x_valid)\n np.savetxt(output_path_upsampling, y_prediction_up)\n util.plot(x_valid, y_valid, upsampling.theta, '/Users/cindyxu/Documents/cs229/assignments/ps1/src/imbalanced/upsampling.png')\n return confusionMatrix(y_valid, y_prediction_up)\n \"\"\"\n # *** END CODE HERE\n\n\ndef confusionMatrix(y_true, y_pred):\n TN, FP, FN, TP = 0, 0, 0, 0\n for i in range(y_pred.shape[0]):\n if y_pred[i, 0] > 0.5:\n y_pred[i, 0] = 1.0\n if y_pred[i, 0] < 0.5:\n y_pred[i, 0] = 0.0\n\n if y_pred[i, 0] == y_true[i]:\n if y_pred[i, 0] == 0.0:\n TN += 1\n if y_pred[i, 0] == 1.0:\n TP += 1\n if y_pred[i, 0] - y_true[i] == 1.0:\n FP += 1\n if y_pred[i, 0] - y_true[i] == -1.0:\n FN += 1\n\n A = (TP + TN) / y_true.shape[0]\n A0, A1 = TN / (TN + FP), TP / (TP + FN)\n A_balanced = 1 / 2 * (A0 + A1)\n print(\"A: \", A)\n print(\"A0: \", A0)\n print(\"A1: \", A1)\n print(\"A_balanced: \", A_balanced)\n return A, A_balanced, A0, A1\n\n\nif __name__ == '__main__':\n main(train_path='/Users/cindyxu/Documents/cs229/assignments/ps1/src/imbalanced/train.csv',\n validation_path='/Users/cindyxu/Documents/cs229/assignments/ps1/src/imbalanced/validation.csv',\n save_path='/Users/cindyxu/Documents/cs229/assignments/ps1/src/imbalanced/imbalanced_X_pred.txt')\n","repo_name":"xingzix/cs229hw","sub_path":"ps1/src/imbalanced/imbalanced.py","file_name":"imbalanced.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35380592369","text":"import cv2 as cv\r\nimport numpy as np\r\n\r\n# img_path = 'C:/Users/Sam_She/Desktop/dinasaur.jpg'\r\nimg_path = './yun.png'\r\n\r\n\r\n# img = np.reshape(cv.imread(img_path), (80, 80, 3))\r\nimg = cv.imread(img_path, -1) \r\nmBackground = img[:,:,3] == 0\r\nimg[mBackground] = (255,255,255,255)\r\n\r\nimg = img[:,:,0:3]\r\n\r\n\r\n# fix rgba color, refer to stackoverflow.com/questions/3803888/how-to-load-png-images-with-4-channels\r\nh, w = img.shape[0:2]\r\nh = h//2\r\nw = w//2\r\n\r\n# img = np.reshape(img, (h//2, w//2, 3))\r\n# h, w = img.shape[0:2]\r\n# img = np.reshape(img, (h//2, w//2, 3))\r\n# h, w = img.shape[0:2]\r\n\r\nresult_array = [0 for i in range(h*w)]\r\ndemo = np.zeros([h,w,3],dtype='uint8')\r\n\r\n\r\nfor i in range(h*w):\r\n y = (i//w) * 2\r\n x = (i%w) * 2\r\n bgr = img[y, x]\r\n u16 = (int(bgr[2]/255*31)<<11) + (int(bgr[1]/255*63)<<5) +( int(bgr[0]/255*31))\r\n result_array[i] = u16 # draw point\r\n demo[i//w, i%w] = bgr\r\n # result_array[x+y*80] = u16\r\n # print(y,x)\r\nresult_array = [str(hex(i)) for i in result_array]\r\nresult = ','.join(result_array)\r\nprint(result)\r\n\r\n\r\ncv.imwrite('img.png', demo)\r\nfor i in range(h):\r\n\tfor j in range(w):\r\n\t\tif (result_array[j + w * i]) == result_array[0]:\r\n\t\t\tprint(\"-\", end=\"\")\r\n\t\telse:\r\n\t\t\tprint(\"0\", end=\"\")\r\n\tprint(\"\")\r\ncv.imwrite('img.png', demo)\r\n\r\n# for i in range(img.shape[0]):\r\n# \tfor j in range(img.shape[1]):\r\n# \t\tif (img[i,j,1]) >= 100:\r\n# \t\t\tprint(\"-\", end=\"\")\r\n# \t\telse:\r\n# \t\t\tprint(\"0\", end=\"\")\r\n# \tprint(\"\")\r\n","repo_name":"wangsy503/Dinosaur-Game","sub_path":"ref_pics/imgProcess.py","file_name":"imgProcess.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71026378114","text":"import os\nimport subprocess \nimport json\nimport smtplib\nimport datetime\nimport time\nfrom threading import Thread\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\n#Extract properties \ndef getProperties():\n with open('configP2P.json', 'r') as file:\n properties = json.load(file)\n return properties\n\ndef run_torrent(torrentName):\n os.system('deluged')\n time.sleep(5)\n os.system('deluge-console add ' + torrentName)\n\ndef unlink_torrent(fileName):\n os.system('deluge-console rm ' + fileName)\n os.system('rm /home/maro96leon/Downloads/*')\n\ndef check_status():\n status = subprocess.check_output(\"deluge-console info\", shell=True).decode()\n if \"Seeding\" not in status:\n return False\n return True\n\ndef gitUpdate():\n os.system('git pull')\n\ndef logStartNetstat(i, n):\n os.system(\"netstat -s | grep segments >> Logs/Netstat_T\" + str(n) + \"_C\" + str(i) + \"_Start.log\")\n\ndef logEndNetstat(i, n):\n os.system(\"netstat -s | grep segments >> Logs/Netstat_T\" + str(n) + \"_C\" + str(i) + \"_End.log\")\n\ndef handleIfTop(t, i, n):\n os.system(\"sudo iftop -t -s \" + str(t) + \" >> Logs/P2P_T\" + str(n) + \"_C\" + str(i) + \"_traffic.log\")\n\ndef makeDirFile():\n os.system('rm -rf Logs')\n os.system('mkdir Logs')\n\ndef sout(l):\n log.write(l + '\\n')\n log.flush()\n print(l)\n\ndef getId():\n with open('id','r') as f:\n nId = json.load(f)['id']\n return nId\n\ndef send_mail_gmail(username,password,sender,toaddrs_list,msg_text,subject,attachment_path_list):\n s = smtplib.SMTP('smtp.gmail.com:587')\n s.starttls()\n s.login(username, password)\n #s.set_debuglevel(1)\n msg = MIMEMultipart()\n recipients = toaddrs_list\n msg['Subject'] = subject\n msg['From'] = sender\n msg['To'] = \", \".join(recipients)\n if attachment_path_list is not None:\n for each_file_path in attachment_path_list:\n try:\n file_name=each_file_path.split(\"/\")[-1]\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(each_file_path, \"rb\").read())\n\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment' ,filename=file_name)\n msg.attach(part)\n except:\n print(\"could not attach file\")\n msg.attach(MIMEText(msg_text,'html'))\n s.sendmail(sender, recipients, msg.as_string())\n\ngitUpdate()\nmakeDirFile()\nproperties = getProperties()\ni = getId()\nprint(\"Running client #\" + i)\nt = properties['runtime']\nn = properties['numberClients']\nunlink_torrent(properties['fileName'])\nthread = Thread(target=handleIfTop, args=[t, i, n])\nwith open('Logs/P2P_T' + str(n) + \"_C\" + str(i) + \".log\", 'w') as log:\n thread.start()\n logStartNetstat(i, n)\n run_torrent(properties['torrentName'])\n tStart = datetime.datetime.now()\n done = False\n while not done:\n done = check_status()\n summary = str(datetime.datetime.now() - tStart) + \"s\"\n logEndNetstat(i, n)\n sout(\"C: Transfered in \" + summary)\nthread.join()\nunlink_torrent(properties['fileName'])\nlogs = ['Logs/P2P_T' + str(n) + \"_C\" + str(i) + \".log\",\n\"Logs/P2P_T\" + str(n) + \"_C\" + str(i) + \"_traffic.log\",\n\"Logs/Netstat_T\" + str(n) + \"_C\" + str(i) + \"_End.log\",\n\"Logs/Netstat_T\" + str(n) + \"_C\" + str(i) + \"_Start.log\"]\nsend_mail_gmail(properties['email'], properties['passwd'],properties['email'],properties['dest'],\n\"Working OK\", \"Logs for T\" + str(n) + \" C\" + str(i) + \" \" + properties['fileName'], logs)\n","repo_name":"JkRuiz/RedesLab4","sub_path":"P2P/p2pRunner.py","file_name":"p2pRunner.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39032700583","text":"#!/usr/bin/env python\nimport os\nimport sqlite3\nimport xml.etree.ElementTree as ET # for xml\n\n# 读取数据库并存入 whitelist_dics {{{\n# whitelist_dics = {table_name_list[i]:{key_domain:(prefix, suffix, score, description)}}\n\nconn = sqlite3.connect('whitelists.db') # 如果文件不存在,会自动在当前目录创建:\ncursor = conn.cursor() # 创建一个Cursor:\n\n\n# get all table_name from database and save it to the variable {table_name_list} {{{\ntable_name_list = []\n\ncursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\ntable_names = cursor.fetchall() \n# print(table_names) # [('wiki',), ('blogs',), ('library',), ('software',), ('video',), ('repository',), ('bbs',)]\nfor _ in table_names:\n table_name_list.append(_[0])\n\n#print(table_name_list)\n# }}}\n\n\nwhitelist_dics = {} # {table_name_list[i]:{key_domain:(prefix, suffix, score, description)}}\n\nfor i in range(len(table_name_list)):\n sq = f\"select * from {table_name_list[i]};\"\n cursor.execute(sq)\n data_all = cursor.fetchall()\n \n #print(data_all) #[(1,2,3,4,5),(1,2,3,4,5),]\n\n # 存入 whitelist_dics {{{\n tmp_dic = {}\n\n key_domain = ''\n tmp_lis = []\n\n\n for data in data_all:\n key_domain = data[0]\n tmp_lis = data[1:]\n\n tmp_dic[key_domain] = tmp_lis\n\n whitelist_dics[table_name_list[i]] = tmp_dic\n\n # }}}\n \n \n\n#print(whitelist_dics)\n\n\ncursor.close() # 关闭Cursor:\nconn.commit() # 提交事务:\nconn.close() # 关闭Connection:\n\n# }}}\n\nlis = [] # 临时列表\nlis_total = [] # 总列表,递增添加所有名单,不减少\n\n\n# generate urls_list {{{\n\ndef gen_urls_list(whitelist_dic, startwith_at=False):\n if not startwith_at : # @ 符号是 uBlacklist 的白名单的前缀\n '''\n uBlacklist whitelist rule:\n with prefix\n @*://*.prefix.domain_name.suffix/*\n no prefix\n @*://*.domain_name.suffix/*\n no prefix and no domain name, only the suffix\n @*://*.suffix/*\n '''\n # k,v 即 domain:[prefix, suffix, score, description]\n # 将内容存到临时列表 lis = [url, url,,,], url='@*://prefix.domain_name.suffix'\n for k,v in whitelist_dic.items(): \n url = '@*://*.'\n # 加前缀\n if v[0] != '':\n if v[0].startswith('http://') or v[0].startswith('https://'):\n url = '@'+v[0]+'.' # uBlacklist suppot rules like \"@https://www.cnblogs.com/*\"\n \n elif v[0].startswith('www'): # www.cnblogs.com/*\n # 在 cse.google.com 中,\"*.www.cnblogs.com/*\" 不会匹配 https://www.cnblogs.com/*\n # 但是 \"*.my.oschina.net/*\" 能匹配到 https://my.oschina.net/*\n url = \"@https://\"+v[0]+'.'\n else:\n url+=v[0] + '.'\n # 加域名\n if k != '':\n url += k.lower() # uBlacklist 对域名区分大小写 @*://*.stackoverflow.com/* 与 @*://*.StackOverflow.com/* 拦截效果不同\n else:\n url = url[:-1] # change url('@*://*.') to '@*://*', 为了添加指定后缀的域名,如 @*://*.edu\n\n # 加后缀\n if v[1] != '': \n # @*://*.docin.com/p-* , 后缀以 \"p-\" 开头,如 “https://www.docin.com/p-1706944942.html”\n if '/' in v[1]:\n url+='.'+v[1]+'*' # 这个可以取代下面的写法\n # 添加完全后缀, @*://*.mathsisfun.com/*\n else: \n url+='.'+v[1]+'/*'\n\n #print(url)\n lis.append(url)\n lis_total.append(url)\n else:\n # for google cse annotations\n # lis = [[url,score,description], [url,score,description],,, ]\n '''\n cse whitelist rule:\n with prefix\n *.prefix.domain_name.suffix/*\n https://prefix.domain_name.suffix/*\n\n no prefix\n *.domain_name.suffix/*\n no prefix and no domain name, only the suffix\n *.suffix/*\n '''\n for k,v in whitelist_dic.items(): \n url = '*.'\n # 加前缀\n if v[0] != '':\n if v[0].startswith('http://') or v[0].startswith('https://'): # http(s)://www\n url = v[0]+'.' \n elif v[0].startswith('www'): # www.cnblogs.com/*\n # 在 cse.google.com 中,\"*.www.cnblogs.com/*\" 不会匹配 https://www.cnblogs.com/*\n # 但是 \"*.my.oschina.net/*\" 能匹配到 https://my.oschina.net/*\n url = \"https://\"+v[0]+'.'\n else:\n url+=v[0] + '.'\n # 加域名\n if k != '':\n url += k.lower()\n else:\n url = url[:-1] # 为了添加指定后缀的域名,如 *.edu\n\n # 加后缀, *.docin.com/p-* , 后缀以 \"p-\" 开头,如 “https://www.docin.com/p-1706944942.html”\n if v[1] != '': \n if '/' in v[1]:\n url+='.'+v[1]+'*'\n # 添加完全后缀, *.mathsisfun.com/*\n else: \n url+='.'+v[1]+'/*'\n\n #print(url)\n # annotations.xml 中的 score 是字符串格式: https://developers.google.com/custom-search/docs/annotations\n lis.append([url, str(v[2]), v[3]]) # lis = [[url,str(score),description], [url,str(score),description],,, ]\n lis_total.append(url)\n ...\n# }}}\n\n# uBlacklist txt subscription txt {{{\ndef gen_subscription_txt():\n # generate whitelist rule text\n with open( output + '/whitelist.txt', 'w') as f:\n f.write(r'*://*/*')\n\n for k,v in whitelist_dics.items():\n #print(k, v)\n gen_urls_list(v)\n filename = output + '/' + k + '.txt'\n with open(filename, 'w') as f:\n for each in lis:\n f.write(each+'\\n')\n\n lis.clear()\n# }}}\n\n# 汇总 txt {{{\n# 汇总列表,for uBlacklist\ndef gen_subscription_combined_txt():\n with open(output + '/whitelists_combined.txt', 'w') as f:\n for each in lis_total:\n f.write(each+'\\n')\n\n# 汇总域名列表,for other ways:\"cse.google.com\",油猴插件\ndef gen_domain_name_txt():\n with open(output + '/domain_name.txt', 'w') as f:\n for each in lis_total:\n if each.startswith('@http'): # @http(s)://www.cnblogs.com/*\n f.write(each[1:]+'\\n') # http(s)://www.cnblogs.com/*\n else:\n f.write(each[5:]+'\\n') # *.prefix.domain_name.suffix\n #print(each[5:])\n\n# }}}\n\n# 增加换行符 {{{\n# https://vae-0118.github.io/2017/11/06/Python%E4%B8%ADXML%E7%9A%84%E8%AF%BB%E5%86%99%E6%80%BB%E7%BB%93/\ndef __indent(elem, level=0):\n i = \"\\n\" + level*\"\\t\"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \"\\t\"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n __indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n# }}}\n\n# facet_items {{{\n# weight from -1.0 to 1.0\nfacet_items = {\n 'wiki':{'Label_name':'wiki',\n 'Label_mode':'FILTER',\n 'Label_weight':'0.9',\n 'Label_enable_for_facet_search':'true',\n 'Rewrite':'',\n 'entities':['/m/01mf_','/m/05z1_','/m/01mkq','/m/04rjg']},\n 'bbs':{'Label_name':'bbs',\n 'Label_mode':'FILTER',\n 'Label_weight':'0.8',\n 'Label_enable_for_facet_search':'true',\n 'Rewrite':'',\n 'entities':['/m/01mf_','/m/05z1_','/m/01mkq','/m/04rjg']},\n 'repository':{'Label_name':'repository',\n 'Label_mode':'FILTER',\n 'Label_weight':'0.8',\n 'Label_enable_for_facet_search':'true',\n 'Rewrite':'',\n 'entities':['/m/01mf_','/m/05z1_','/m/01mkq','/m/04rjg']},\n 'blogs':{'Label_name':'blogs',\n 'Label_mode':'FILTER',\n 'Label_weight':'0.7',\n 'Label_enable_for_facet_search':'true',\n 'Rewrite':'',\n 'entities':['/m/01mf_','/m/05z1_','/m/01mkq','/m/04rjg']},\n 'library':{'Label_name':'library',\n 'Label_mode':'FILTER',\n 'Label_weight':'0.4',\n 'Label_enable_for_facet_search':'true',\n 'Rewrite':'',\n 'entities':['/m/01mf_','/m/05z1_','/m/01mkq','/m/04rjg']},\n 'software':{'Label_name':'software',\n 'Label_mode':'FILTER',\n 'Label_weight':'0.5',\n 'Label_enable_for_facet_search':'false',\n 'Rewrite':'',\n 'entities':[]},\n 'pdf':{'Label_name':'pdf',\n 'Label_mode':'BOOST',\n 'Label_weight':'0.5',\n 'Label_enable_for_facet_search':'false',\n 'Rewrite':'filetype:pdf',\n 'entities':[]},\n 'video':{'Label_name':'video',\n 'Label_mode':'FILTER',\n 'Label_weight':'0',\n 'Label_enable_for_facet_search':'true',\n 'Rewrite':'',\n 'entities':['/m/01mf_']},\n 'edu':{'Label_name':'edu',\n 'Label_mode':'BOOST',\n 'Label_weight':'0.1',\n 'Label_enable_for_facet_search':'true',\n 'Rewrite':'site:.edu',\n 'entities':[]},\n}\n# }}}\n\n# 这个文件或许手修改更方便, 所以只生成该文件中的标签部分 {{{\ndef gen_cse_xml():\n root = ET.Element('Facet') # 创建根节点\n tree = ET.ElementTree(root) # 创建文档\n\n\n for facet in list(facet_items.values()):\n FacetItem = ET.Element('FacetItem') # 子节点\n\n Label = ET.SubElement(FacetItem, 'Label')\n Label.set('name', facet['Label_name'])\n Label.set('mode', facet['Label_mode'])\n Label.set('weight',facet['Label_weight'])\n Label.set('enable_for_facet_search',facet['Label_enable_for_facet_search'])\n\n Rewrite = ET.SubElement(Label, 'Rewrite')\n rewrite_text = facet['Rewrite']\n if rewrite_text != '':\n Rewrite.text = rewrite_text\n\n entities = ET.SubElement(Label, 'entities')\n for mid in facet['entities']:\n entity = ET.SubElement(entities, 'entity')\n entity.set('mid', mid)\n\n Title = ET.SubElement(FacetItem, 'Title')\n Title.text = facet['Label_name']\n\n root.append(FacetItem) # 放到根节点下\n\n __indent(root) # 增加换行符\n tree.write(output + '/cse_FacetLabels.xml', encoding='utf-8', xml_declaration=True)\n ...\n\n# }}}\n\n# generate annotations.xml {{{\n# https://vae-0118.github.io/2017/11/06/Python%E4%B8%ADXML%E7%9A%84%E8%AF%BB%E5%86%99%E6%80%BB%E7%BB%93/\ndef gen_annotations_xml():\n total_length = 0\n\n root = ET.Element('Annotations') # 创建根节点\n tree = ET.ElementTree(root) # 创建文档\n\n # add Annotation {{{\n\n # whitelists 总字典 { 'bbs':{'':[],'':[]}, 'blogs':{} }\n for k,v in whitelist_dics.items():\n #print('k = {}, v={}'.format(k, v)) # 'wiki':{'domain':(prefix,suffix,score,description)}\n # 将 k 对应的字典转为存储到临时列表 lis\n gen_urls_list(v, True)\n # print(lis) # [[url, score, description], ['*.stackoverflow.com/*', '0.8', description]]\n\n # 遍历列表\n for each in lis:\n # each 的属性,权重\n element = ET.Element('Annotation') # 子节点\n element.set('about', each[0]) # about 存 url pattern\n element.set('score', each[1]) # str(score)\n\n Label = ET.SubElement(element, 'Label')\n Label.set('name', '_include_')\n Comment = ET.SubElement(element, 'Comment')\n Comment.text = each[2] # description\n\n # 添加标签,如 \n Label = ET.SubElement(element, 'Label')\n Label.set('name', facet_items[k]['Label_name'])\n\n root.append(element) # 放到根节点下\n\n\n \n # 每次处理一张表, total_length 是递增的\n #print(lis, len(lis), end='\\n')\n total_length += len(lis)\n lis.clear()\n\n # }}}\n\n # Annotations 的三个非必要属性\n root.set('start', '0')\n root.set('num', str(total_length))\n root.set('total', str(total_length))\n\n __indent(root) # 增加换行符\n tree.write(output + '/annotations.xml', encoding='utf-8', xml_declaration=True)\n# }}} \n\n\ndef main():\n # txt, xml 的输出目录\n global output\n output = './whitelists'\n if not os.path.exists(output):\n os.mkdir(output)\n\n gen_subscription_txt()\n gen_subscription_combined_txt()\n gen_domain_name_txt()\n # BackgroundLabels 下的 Lables 只能有两个,且是两种(_include_ 和 _exclude_)\n gen_cse_xml()\n gen_annotations_xml()\n\n\nif __name__ == '__main__':\n\n main()\n\n","repo_name":"bcaso/Computer-Science-Whitelist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13171,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"61"}
+{"seq_id":"26354660579","text":"import minqlx\nfrom pydbus import SystemBus\nfrom gi.repository import GLib\n\nbus = SystemBus()\nloop = GLib.MainLoop()\nsignal = bus.get('org.asamk.Signal', '/org/asamk/Signal/_')\n\nclass signal_read(minqlx.Plugin):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tglobal signal\n\t\tsignal.onMessageReceived = self.msgRcv\n\t\tself.add_hook(\"unload\", self.handle_unload)\n\t\tself.start_loop()\n\n\tdef handle_unload(self, plugin):\n\t\tif plugin == self.__class__.__name__:\n\t\t\tloop.quit()\n\t\t\tglobal signal\n\t\t\tsignal.onMessageReceived = None\n\n\t@minqlx.thread\n\tdef start_loop(self):\n\t\tloop.run()\n\n\tdef msgRcv(self, timestamp, source, groupID, message, attachments):\n\t\tif groupID == []:\n\t\t\treturn\n\t\ttry:\n\t\t\tgroup_name = signal.getGroupName(groupID)\n\t\texcept:\n\t\t\tminqlx.log_exception()\n\t\t\treturn\n\t\tif group_name != \" ken[i][kenhigh]:\r\n naomiscore[i] = naomiscore[i] + 1\r\n kenlow = kenlow + 1\r\n naomihigh = naomihigh - 1\r\n else:\r\n kenhigh = kenhigh - 1\r\n naomihigh = naomihigh -1\r\nscore1 = naomiscore\r\nnaomiscore =[0]*casenum\r\nfor i in range(casenum):\r\n kenlow=0\r\n naomilow=0\r\n naomihigh = num[i] - 1\r\n kenhigh=naomihigh;\r\n for j in range(num[i]):\r\n if naomi[i][naomilow] > ken[i][kenlow]:\r\n kenlow = kenlow + 1\r\n naomilow = naomilow +1\r\n naomiscore[i] = naomiscore[i] + 1\r\n else:\r\n kenhigh = kenhigh - 1\r\n naomilow = naomilow + 1\r\nscore2 = naomiscore\r\nfor i in range(casenum):\r\n print('Case #{}:'.format(i+1),score2[i],score1[i])\r\n\r\n\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/1844.py","file_name":"1844.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29704786644","text":"# Phoenix has n coins with weights 2^1,2^2,…,2^n. He knows that n is even.\n# He wants to split the coins into two piles such that each pile has exactly\n# n2 coins and the difference of weights between the two piles is minimized.\n# Formally, let a denote the sum of weights in the first pile,\n# and b denote the sum of weights in the second pile. Help Phoenix minimize |a−b|,\n# the absolute value of a−b.\n\n\nt = int(input())\nfor i in range(t):\n n = int(input())\n a, b = 2**n, 0\n\n print(a)\n print(b)\n k = int(n//2)\n i = 1\n while i < k:\n a += (2**i)\n i += 1\n while i < n:\n b += (2**i)\n i += 1\n print(a-b)\n\n# t = int(input())\n# p = 0\n# for i in range(t):\n# n = int(input())\n# l = int((n/2) + 1)\n# p = 2**l\n# print(p-2)\n","repo_name":"TahminaTania/Code-Forces-Practices","sub_path":"nuber series balance.py","file_name":"nuber series balance.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43305136001","text":"from django.core.management.base import BaseCommand, CommandError, make_option\nfrom collections.abc import MutableMapping\nfrom extract.models import *\nfrom web.models import *\nfrom web.views import SLIDER_MAX, SLIDER_MIN\nfrom bs4 import BeautifulSoup\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom random import randint\nfrom . import readability\nimport math\nimport os\nimport sys\nimport re\n\n\nclass Metadata(MutableMapping):\n\n \"\"\"Store metadata of an article and perserve global min/max values for\n normalization.\n \"\"\"\n\n max_values = dict()\n min_values = dict()\n\n def __init__(self, *args, **kwargs):\n self.store = dict()\n self.update(dict(*args, **kwargs))\n\n def __getitem__(self, key):\n return self.store[key]\n\n def __setitem__(self, key, value):\n self.store[key] = value\n Metadata.min_values[key] = min(Metadata.min_values.get(key, sys.maxsize), value)\n Metadata.max_values[key] = max(Metadata.max_values.get(key, 0), value)\n\n def __delitem__(self, key):\n raise NotImplementedError()\n\n def __iter__(self):\n return iter(self.store)\n\n def __len__(self):\n return len(self.store)\n\n def norm(self, key):\n a = self.store[key] - self.min_values[key]\n b = self.max_values[key] - self.min_values[key]\n return 0 if b == 0 else a / b\n\n def normint(self, key, scale_min=SLIDER_MIN, scale_max=SLIDER_MAX):\n return int(self.norm(key) * (scale_max - scale_min)) + scale_min\n\n\nclass Command(BaseCommand):\n args = \"\"\n help = \"Run data analysis\"\n option_list = BaseCommand.option_list + (\n make_option(\"--sim\", action=\"store_true\", dest=\"sim\",\n default=False, help=\"Run similarity computation.\"),\n )\n\n def extract_words(self, text):\n \"\"\"Tokenize a string using NLTK and return a set of lowercased words.\"\"\"\n content = text.lower()\n tokens = self.tokenizer.tokenize(content)\n return set(w for w in tokens if w not in self.stopwords)\n\n def get_article_text(self, article):\n \"\"\"Return the text content of an article without HTML tags.\"\"\"\n soup = BeautifulSoup(article.content)\n return soup.get_text()\n\n def handle(self, *args, **options):\n self.stdout.write(\"Current NLTK data path: \" + os.environ[\"NLTK_DATA\"])\n\n self.stdout.write(\"Computing stats...\")\n result = self.compute_stats()\n for article, stats in result.items():\n try:\n aa = ArticleAttr.objects.get(article=article)\n except ArticleAttr.DoesNotExist:\n aa = ArticleAttr(article=article)\n aa.length = stats.normint(\"word_count\")\n aa.media = stats.media\n aa.care = stats.care\n aa.reading = stats.normint(\"reading\")\n aa.is_local = article.source in (\"BHC\")\n aa.is_video = article.source in (\"BHCYT\")\n aa.save()\n\n if options[\"sim\"]:\n self.stdout.write(\"Computing similarity values...\")\n result = self.compute_similarity()\n for a, b, stats in result:\n try:\n row = ArticleSimilarity.objects.get(a=a, b=b)\n except ArticleSimilarity.DoesNotExist:\n row = ArticleSimilarity(a=a, b=b)\n row.similarity = stats.normint(\"co_word_count\")\n if stats.linked_flag:\n row.similarity = min(SLIDER_MAX, row.similarity + 10)\n row.save()\n\n def compute_stats(self):\n re_care = re.compile(r\"\\b(care|caring|manage|managing|management|family)\\b\",\n flags=re.IGNORECASE)\n re_cond = re.compile(r\"\\b(condition[s]?|treatment[s]?)\\b\",\n flags=re.IGNORECASE)\n\n d = dict()\n qs = Article.objects.all().prefetch_related(\"image_set\")\n for article in qs:\n md = Metadata()\n d[article] = md\n\n # Compute number of images.\n md[\"image_count\"] = article.image_set.count()\n\n # Compute length.\n content = self.get_article_text(article)\n md[\"word_count\"] = len(content.split(\" \"))\n\n # Compute the media dimension based on above two numbers.\n md.media = 11 - md.normint(\"word_count\", 1, 10)\n if md.get(\"image_count\"):\n md.media += md.normint(\"image_count\", 1, 10)\n\n # Compute reading level.\n md[\"reading\"] = readability.grade_level(content)\n\n # Compute article nature: caring <-> conditions\n t, c = article.title, article.category\n if re_care.search(t) or re_care.search(c):\n md.care = randint(1, 7)\n elif re_cond.search(t) or re_cond.search(c):\n md.care = randint(13, 20)\n else:\n md.care = randint(8, 12)\n\n return d\n\n def compute_similarity(self):\n l = list()\n c = 0\n qs1 = Article.objects.order_by(\"id\").prefetch_related(\"outlink_set\")\n for a in qs1:\n qs2 = Article.objects.filter(id__gt=a.id).order_by(\"id\")\n a_links = set(x.alt.lower() for x in a.outlink_set.all())\n a_words = self.extract_words(self.get_article_text(a))\n for b in qs2:\n assert a.id < b.id\n b_words = self.extract_words(self.get_article_text(b))\n md = Stats()\n md[\"co_word_count\"] = len(a_words & b_words)\n md.linked_flag = b.title.lower() in a_links\n l.append((a.id, b.id, md))\n c += 1\n if c % 100 == 0:\n self.stdout.write(\"{0} records written.\".format(c))\n return l\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.tokenizer = RegexpTokenizer(r'[a-z]+')\n self.stopwords = set(stopwords.words(\"english\"))\n","repo_name":"cipang/better-health-explorer","sub_path":"web/management/commands/da.py","file_name":"da.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"6712390936","text":"# Declaration of Depedencies\nimport numpy as np\nimport sqlalchemy\nimport datetime as dt\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n# Database setup\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# Reflect databse into new model\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nStation = Base.classes.station\nMeasurement = Base.classes.measurement\n\n# Fill the Flask\napp = Flask(__name__)\n\n# Plan some routes with da flask\n# Homepage\n@app.route(\"/\")\ndef homepage():\n return (\n f\"Welcome! \"\n f\"Looking to vacation in Hawaii? B/c same :( \"\n f\"Checkout these Available Routes so we can see what we're missing out on together: \"\n f\"/api/v1.0/precipitation \"\n f\"/api/v1.0/stations \"\n f\"/api/v1.0/tobs \"\n f\"/api/v1.0/start \"\n f\"/api/v1.0/start/end\"\n )\n\n# Precipitaton \n# (assuming that \"convert query results\" means use same dates as OG query?)\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n\n #Find dates so we can have same results as og query\n latest_date = session.query(Measurement.date)\\\n .order_by(Measurement.date.desc()).first()\n latest_date = (dt.datetime.strptime(latest_date[0], '%Y-%m-%d'))\n year_ago = (latest_date - dt.timedelta(days=365)).strftime('%Y-%m-%d')\n \n # QueryQueryQuery\n results = session.query(Measurement.date, Measurement.prcp)\\\n .filter(Measurement.date >= year_ago).all()\n\n session.close()\n\n prcp_data = []\n for date, prcp in results:\n prcp_dict = {}\n prcp_dict[date] = prcp\n prcp_data.append(prcp_dict)\n \n return jsonify(prcp_data)\n\n# Stations \n# (kinda unclear on what info is wanted from stations, so hope this is enough and rip me if not :o )\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n session = Session(engine)\n results = session.query(Station.station).all()\n\n session.close()\n\n station_list = list(np.ravel(results))\n\n return jsonify(station_list)\n\n# Temperature \n# (also p unclear on this one, does previous year = last year of data? hope so)\n# (also hope you only want temperatures returned as the list, unsure if instructions implied we should also get the date)\n@app.route(\"/api/v1.0/tobs\")\ndef temperature():\n session = Session(engine)\n\n # Find dates again woooo\n latest_date = session.query(Measurement.date)\\\n .order_by(Measurement.date.desc()).first()\n latest_date = (dt.datetime.strptime(latest_date[0], '%Y-%m-%d'))\n year_ago = (latest_date - dt.timedelta(days=365)).strftime('%Y-%m-%d')\n\n #QueryQueryQuery\n results = session.query(Measurement.tobs)\\\n .filter(Measurement.date >= year_ago)\\\n .filter(Measurement.station == \"USC00519281\").all()\n\n session.close()\n\n temp_list = list(np.ravel(results))\n\n return jsonify(temp_list)\n\n# Start\n@app.route(\"/api/v1.0/\")\ndef start(start):\n session = Session(engine)\n\n results = session.query(func.min(Measurement.tobs), \n func.max(Measurement.tobs), \n func.avg(Measurement.tobs))\\\n .filter(Measurement.date >= start).all() \n session.close()\n\n data = []\n for min, max, avg in results:\n data_dict = {}\n data_dict[\"Min\"] = min\n data_dict[\"Max\"] = max\n data_dict[\"Avg\"] = avg\n data.append(data_dict)\n \n return jsonify(data)\n\n# Start-End\n@app.route(\"/api/v1.0//\")\ndef dates(start, end):\n session = Session(engine)\n\n results = session.query(func.min(Measurement.tobs), \n func.max(Measurement.tobs), \n func.avg(Measurement.tobs))\\\n .filter(Measurement.date >= start)\\\n .filter(Measurement.date <= end).all() \n session.close()\n\n data = []\n for min, max, avg in results:\n data_dict = {}\n data_dict[\"Min\"] = min\n data_dict[\"Max\"] = max\n data_dict[\"Avg\"] = avg\n data.append(data_dict)\n \n return jsonify(data)\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"julialeonoff/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72222094913","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.core import serializers\nfrom Apps.Productos.models import Categoria,Producto \nfrom Apps.Productos.forms import FormularioCategoria,FormularioProducto\nimport json \nfrom django.http import JsonResponse\n#-------------------------------------ss-\n#--------------------------------------\n# Create your views here.\n\ndef Man(request):\n\t\n\treturn render(request, 'Inicio/UrbanMan.html')\n\ndef Contactanos(request):\n\t\n\treturn render(request, 'Inicio/Contactanos.html')\n\ndef Productos(request):\n\t\n\treturn render(request, 'Inicio/Productos.html')\n\ndef Tienda(request):\n\tListaProductos=Producto.objects.all()\n\tListaCategoria= Categoria.objects.all()\n\tcontexto={\n\t\t\t'ListaProductos':ListaProductos,\n\t\t\t'ListaCategoria':ListaCategoria,\n\t\t\t}\n\treturn render(request, 'Productos/Ropa.html',contexto)\n\tif request.is_ajax():\n\t\tProductos = Producto.objects.filter(nombre__startswith= request.GET['nombre'] ).values('nombre', 'id')\n\t\treturn HttpResponse( json.dumps( list(Productos)), content_type='application/json' ) \n\telse:\n\t \treturn HttpResponse(\"Solo Ajax\")\n\ndef TiendaCategoria(request, id_Categoria1):\n\tCategoriaid = Categoria.objects.get(id= id_Categoria1)\n\tNombreCategoria = request.GET.get('Nombre_Categoria', None)\n\tNombre= Categoriaid.Nombre\n\tListaProductos=Producto.objects.filter(Categoria1 = Nombre )\n\tListaCategoria= Categoria.objects.all()\n\tcontexto={\n\t\t\t'ListaProductos':ListaProductos,\n\t\t\t'ListaCategoria':ListaCategoria,\n\t\t\t}\n\treturn render(request, 'Productos/Ropa.html',contexto)\n\n \ndef Busqueda(request):\n\tNombre1 = request.GET.get('nombre',None)\n\tListaProductos = Producto.objects.filter(Nombre=Nombre1 )\n\tListaCategoria= Categoria.objects.all()\n\tcontexto= {\n\t\t'ListaProductos':ListaProductos,\n\t\t'ListaCategoria':ListaCategoria,\n\t}\n\treturn render(request, 'Productos/Ropa.html',contexto) ","repo_name":"DavidJMS/UrbanMan","sub_path":"Apps/Man/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11879921289","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom nets.Modules.DCNV2 import DeformableConv2d\nfrom nets.Backbones.MobileNetV2 import mobilenetv2_xbn\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nclass MobileNetV2_XBN(nn.Module):\n def __init__(self, downsample_factor=8, pretrained=True):\n super(MobileNetV2_XBN, self).__init__()\n from functools import partial\n \n model = mobilenetv2_xbn(pretrained)\n self.features = model.features[:-1]\n\n self.total_idx = len(self.features)\n self.down_idx = [2, 4, 7, 14]\n\n if downsample_factor == 8:\n for i in range(self.down_idx[-2], self.down_idx[-1]):\n self.features[i].apply(\n partial(self._nostride_dilate, dilate=2)\n )\n for i in range(self.down_idx[-1], self.total_idx):\n self.features[i].apply(\n partial(self._nostride_dilate, dilate=4)\n )\n elif downsample_factor == 16:\n for i in range(self.down_idx[-1], self.total_idx):\n self.features[i].apply(\n partial(self._nostride_dilate, dilate=2)\n )\n \n def _nostride_dilate(self, m, dilate):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n if m.stride == (2, 2):\n m.stride = (1, 1)\n if m.kernel_size == (3, 3):\n m.dilation = (dilate//2, dilate//2)\n m.padding = (dilate//2, dilate//2)\n else:\n if m.kernel_size == (3, 3):\n m.dilation = (dilate, dilate)\n m.padding = (dilate, dilate)\n\n def forward(self, x):\n l1 = self.features[:4](x) \n l2 = self.features[4:7](l1)\n l4 = self.features[7:](l2) \n return l1, l2, l4 \n\n\n\nclass ASPP(nn.Module):\n\tdef __init__(self, dim_in, dim_out, rate=1, bn_mom=0.1):\n\t\tsuper(ASPP, self).__init__()\n\t\tself.branch1 = nn.Sequential(\n\t\t\t\tnn.Conv2d(dim_in, dim_out, 1, 1, padding=0, dilation=rate,bias=True),\n\t\t\t\tnn.BatchNorm2d(dim_out, momentum=bn_mom),\n\t\t\t\tnn.ReLU(inplace=True),\n\t\t)\n\t\tself.branch2 = nn.Sequential(\n\t\t\t\tnn.Conv2d(dim_in, dim_out, 3, 1, padding=6*rate, dilation=6*rate, bias=True),\n\t\t\t\tnn.BatchNorm2d(dim_out, momentum=bn_mom),\n\t\t\t\tnn.ReLU(inplace=True),\t\n\t\t)\n\t\tself.branch3 = nn.Sequential(\n\t\t\t\tnn.Conv2d(dim_in, dim_out, 3, 1, padding=12*rate, dilation=12*rate, bias=True),\n\t\t\t\tnn.BatchNorm2d(dim_out, momentum=bn_mom),\n\t\t\t\tnn.ReLU(inplace=True),\t\n\t\t)\n\t\tself.branch4 = nn.Sequential(\n\t\t\t\tnn.Conv2d(dim_in, dim_out, 3, 1, padding=18*rate, dilation=18*rate, bias=True),\n\t\t\t\tnn.BatchNorm2d(dim_out, momentum=bn_mom),\n\t\t\t\tnn.ReLU(inplace=True),\t\n\t\t)\n\t\tself.branch5_conv = nn.Conv2d(dim_in, dim_out, 1, 1, 0,bias=True)\n\t\tself.branch5_bn = nn.BatchNorm2d(dim_out, momentum=bn_mom)\n\t\tself.branch5_relu = nn.ReLU(inplace=True)\n\n\t\tself.conv_cat = nn.Sequential(\n\t\t\t\tnn.Conv2d(dim_out*5, dim_out, 1, 1, padding=0,bias=True),\n\t\t\t\tnn.BatchNorm2d(dim_out, momentum=bn_mom),\n\t\t\t\tnn.ReLU(inplace=True),\t\t\n\t\t)\n\n\tdef forward(self, x):\n\t\t[b, c, row, col] = x.size()\n\n\t\tconv1x1 = self.branch1(x)\n\t\tconv3x3_1 = self.branch2(x)\n\t\tconv3x3_2 = self.branch3(x)\n\t\tconv3x3_3 = self.branch4(x)\n\n\t\tglobal_feature = torch.mean(x,2,True)\n\t\tglobal_feature = torch.mean(global_feature,3,True)\n\t\tglobal_feature = self.branch5_conv(global_feature)\n\t\tglobal_feature = self.branch5_bn(global_feature)\n\t\tglobal_feature = self.branch5_relu(global_feature)\n\t\tglobal_feature = F.interpolate(global_feature, (row, col), None, 'bilinear', True)\n\t\t\n\t\tfeature_cat = torch.cat([conv1x1, conv3x3_1, conv3x3_2, conv3x3_3, global_feature], dim=1)\n\t\tresult = self.conv_cat(feature_cat)\n\t\treturn result\n\nclass MSLNet(nn.Module):\n def __init__(self, num_classes, backbone=\"mobilenetv2_xbn\", pretrained=True, downsample_factor=8, Use_DCN=True):\n super(MSLNet, self).__init__()\n if backbone==\"mobilenetv2_xbn\":\n self.backbone = MobileNetV2_XBN(downsample_factor=downsample_factor, pretrained=pretrained)\n else:\n raise ValueError('Unsupported backbone - `{}`!!!'.format(backbone))\n\n l1_channels = 24\n l2_channels = 32\n l4_channels = 320\n\n self.aspp = ASPP(dim_in=l4_channels, dim_out=256, rate=16//downsample_factor)\n \n self.cat_convUp1= nn.Sequential(\n nn.Conv2d(64+256, 256, 3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n )\n self.cat_convUp2= nn.Sequential(\n nn.Conv2d(256+48, 256, 3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n )\n\n self.shortcut_convl1 = nn.Sequential(\n nn.Conv2d(l1_channels, 48, 1),\n nn.BatchNorm2d(48),\n nn.ReLU(inplace=True)\n )\t \n self.shortcut_convl2 = nn.Sequential(\n nn.Conv2d(l2_channels, 64, 1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True)\n )\t\n self.conv33 = nn.Sequential(\n DeformableConv2d(256, 256, 3, stride=1, padding=1) if Use_DCN else nn.Conv2d(256, 256, 3, stride=1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Dropout(0.1),\n )\n self.cls_conv = nn.Conv2d(256, num_classes, 1, stride=1)\n\n def forward(self, x):\n H, W = x.size(2), x.size(3)\n # 获取初始三层特征\n # l1: 24 128 128\n # l2: 32 64 64\n # l4: 320 64 64\n l1, l2, l4 = self.backbone(x) \n # ASPP\n up1 = self.aspp(l4) # 320 64 64 -> 256,64,64\n # 调整通道数\n l2 = self.shortcut_convl2(l2) # 64,64,64\n # 特征融合\n up2 = self.cat_convUp1(torch.cat((up1, l2), dim=1)) # 256, 64, 64\n # 调整通道数\n l1 = self.shortcut_convl1(l1) #->24 128 128 -> 48 128 128\n # 修改尺寸\n up2 = F.interpolate(up2, size=(l1.size(2), l1.size(3)), mode='bilinear', align_corners=True) #256, 128, 128\n # 特征融合\n x = self.cat_convUp2(torch.cat((up2, l1), dim=1)) # 256, 128, 128\n # 可变形卷积\n x = self.conv33(x) # 256 128 128\n # 通道调整为类别数2\n x = self.cls_conv(x) # 2 128 128\n # 调整到跟输入图片一样的大小\n x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) # 2,512,512\n return x\n","repo_name":"ParkourX/MSLNet","sub_path":"nets/MSLNet.py","file_name":"MSLNet.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"14403582376","text":"def check(prefix):\n\tsame = 11 - 3\n\tminus1 = min(int(prefix[3]) + 1, 8)\n\tplus1 = min(10 - int(prefix[3]), 8)\n\n\tcntplus = 0\n\tcntminus = 0\n\tcntsame = 0\n\tfor i in [2, 1, 0]:\n\t\tif (cntsame + i == 2) & (int(prefix[i]) == int(prefix[i+1])): cntsame+=1\n\t\tif (cntplus + i == 2) & (int(prefix[i]) == int(prefix[i+1])-1): cntplus+=1\n\t\tif (cntminus + i == 2) & (int(prefix[i]) == int(prefix[i+1])+1): cntminus+=1\n\n\tsame += cntsame\n\tplus1 += cntplus\n\tminus1 += cntminus * 10 ** (8 - minus1)\n\n\tquantity = [1, 10 ** (8 - plus1 + cntplus), 10 ** (8 - minus1 + cntminus)]\n\n\tvars = [same, plus1, minus1]\n\tleader = max(vars) \n\tres = 0\n\tfor i in range(3):\n\t\tif vars[i] == leader: res += quantity[i]\n\treturn res\n\nprefix = input()\n\nprint(check(prefix))\n\n","repo_name":"nikulinleo/seminars","sub_path":"task1MTS.py","file_name":"task1MTS.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9272646732","text":"# Databricks notebook source\n# MAGIC %md-sandbox\n# MAGIC\n# MAGIC \n# MAGIC
\n# MAGIC
\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # ストリーミング重複排除\n# MAGIC\n# MAGIC このノートブックでは、Structured Streaming と Delta Lake を使って重複レコードを削除する方法を学びます。Spark Structured Streaming は正確に一度だけの処理を保証しますが、多くのソースシステムでは重複レコードが発生します。\n# MAGIC\n# MAGIC ## 学習目標\n# MAGIC このレッスンの終わりまでに、以下のことができるようになります:\n# MAGIC - **`dropDuplicates`** をストリーミングデータに適用する\n# MAGIC - Watermarkを使用して状態情報を管理する\n# MAGIC - デルタテーブルに重複レコードを挿入しないように、挿入のみのマージを記述する\n# MAGIC - **`foreachBatch`** を使用して、ストリーミング・アップサートを実行する。\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## セットアップ\n# MAGIC データベースを宣言し、すべてのパス変数を設定します。\n\n# COMMAND ----------\n\n# MAGIC %run ../Includes/Classroom-Setup-4.1\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 重複レコードの特定\n# MAGIC\n# MAGIC Kafkaはデータ配信においてat-least-once保証を提供するため、すべてのKafkaコンシューマーは重複レコードを処理する準備をする必要があります。\n# MAGIC\n# MAGIC ここで紹介する重複排除の方法は、Delta Lake アプリケーションの他の部分でも必要に応じて適用することができます。\n# MAGIC\n# MAGIC まず、ブロンズテーブルの **`bpm`** トピックで重複レコードの数を特定することから始めましょう。\n\n# COMMAND ----------\n\ntotal = (spark.read\n .table(\"bronze\")\n .filter(\"topic = 'bpm'\")\n .count())\n\nprint(f\"Total: {total:,}\")\n\n# COMMAND ----------\n\nfrom pyspark.sql import functions as F\n\njson_schema = \"device_id LONG, time TIMESTAMP, heartrate DOUBLE\"\n\nold_total = (spark.read\n .table(\"bronze\")\n .filter(\"topic = 'bpm'\")\n .select(F.from_json(F.col(\"value\").cast(\"string\"), json_schema).alias(\"v\"))\n .select(\"v.*\")\n .dropDuplicates([\"device_id\", \"time\"])\n .count())\n\nprint(f\"Old Total: {old_total:,}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC レコードの約10-20%が重複しているようだ。ここでは、ブロンズレベルではなくシルバーレベルで重複排除を適用することにしている。重複レコードを保存している一方で、ブロンズ・テーブルはストリーミング・ソースの真の状態 の履歴を保持し、すべてのレコードを到着したときの状態で表示する(いくつかの追加メタデータが記録されている)。これにより、必要であればダウンストリーム・システムのあらゆる状態を再作成することができ、最初の取り込み時に過度に積極的な品質管理が行われることによる潜在的なデータ損失を防ぐだけでなく、データ取り込みの待ち時間を最小限に抑えることができます。\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## ブロンズBPMレコードのストリーミングリードを定義する\n# MAGIC\n# MAGIC ここで、前回のノートにあった最後のロジックを復活させる。\n\n# COMMAND ----------\n\njson_schema = \"device_id LONG, time TIMESTAMP, heartrate DOUBLE\"\n\nbpm_df = (spark.readStream\n .table(\"bronze\")\n .filter(\"topic = 'bpm'\")\n .select(F.from_json(F.col(\"value\").cast(\"string\"), json_schema).alias(\"v\"))\n .select(\"v.*\"))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC ストリーミング重複排除を扱う場合、静的データと比較して複雑なレベルがある。\n# MAGIC\n# MAGIC 各マイクロバッチが処理されるとき、以下のことを確認する必要がある:\n# MAGIC - マイクロバッチに重複レコードが存在しない。\n# MAGIC - 挿入されるレコードがターゲットテーブルに既に存在しない\n# MAGIC\n# MAGIC Spark Structured Streamingは、マイクロバッチ内やマイクロバッチ間で重複レコードが存在しないように、uniqueキーの状態情報を追跡できます。時間の経過とともに、この状態情報はすべての履歴を表すようにスケールする。適切な期間のWatermarkを適用することで、レコードが遅延する可能性があると合理的に予想される時間のウィンドウの状態情報だけを追跡することができる。ここでは、このWatermarkを30秒と定義する。\n# MAGIC\n# MAGIC 以下のセルは、前回のクエリを更新したものである。\n\n# COMMAND ----------\n\njson_schema = \"device_id LONG, time TIMESTAMP, heartrate DOUBLE\"\n\ndeduped_df = (spark.readStream\n .table(\"bronze\")\n .filter(\"topic = 'bpm'\")\n .select(F.from_json(F.col(\"value\").cast(\"string\"), json_schema).alias(\"v\"))\n .select(\"v.*\")\n .withWatermark(\"time\", \"30 seconds\")\n .dropDuplicates([\"device_id\", \"time\"]))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 挿入のみのマージ\n# MAGIC Delta Lake は、挿入のみのマージに最適化された機能を持っています。この操作は重複排除に理想的です。一意のキーでマッチするロジックを定義し、まだ存在しないキーのレコードだけを��入します。\n# MAGIC\n# MAGIC このアプリケーションでは、同じマッチングキーを持つ2つのレコードが同じ情報を表していることが分かっているため、この方法で処理を進めることに注意されたい。後に到着したレコードが既存のレコードに必要な変更を示していた場合、 **`WHEN MATCHED`** 句を含むようにロジックを変更する必要があります。\n# MAGIC\n# MAGIC 以下のSQLでは、 **`stream_updates`** というタイトルのビューに対するマージクエリを定義しています。\n\n# COMMAND ----------\n\nsql_query = \"\"\"\n MERGE INTO heart_rate_silver a\n USING stream_updates b\n ON a.device_id=b.device_id AND a.time=b.time\n WHEN NOT MATCHED THEN INSERT *\n\"\"\"\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## **`foreachBatch`** 用マイクロバッチ関数の定義\n# MAGIC\n# MAGIC Spark Structured Streaming の **`foreachBatch`** メソッドでは、書き込み時にカスタムロジックを定義できます。\n# MAGIC\n# MAGIC **`foreachBatch`** で適用されるロジックは、現在のマイクロバッチを(ストリーミングではなく)バッチデータであるかのように扱います。\n# MAGIC\n# MAGIC 以下のセルで定義されているクラスは、構造化ストリーミング書き込みで使用するために、任意の SQL **`MERGE INTO`** クエリを登録できるようにする単純なロジックを定義しています。\n\n# COMMAND ----------\n\nclass Upsert:\n def __init__(self, sql_query, update_temp=\"stream_updates\"):\n self.sql_query = sql_query\n self.update_temp = update_temp \n \n def upsert_to_delta(self, microBatchDF, batch):\n microBatchDF.createOrReplaceTempView(self.update_temp)\n microBatchDF._jdf.sparkSession().sql(self.sql_query)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC SQLを使ってデルタ・テーブルに書き込むので、始める前にこのテーブルが存在することを確認する必要がある。\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC\n# MAGIC CREATE TABLE IF NOT EXISTS heart_rate_silver \n# MAGIC (device_id LONG, time TIMESTAMP, heartrate DOUBLE)\n# MAGIC USING DELTA\n# MAGIC LOCATION '${da.paths.user_db}/heart_rate_silver'\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ここで、先に定義した **`sql_query`** を **`Upsert`** クラスに渡します。\n\n# COMMAND ----------\n\nstreaming_merge = Upsert(sql_query)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC そして、このクラスを **`foreachBatch`** ロジックで使用する。\n\n# COMMAND ----------\n\nquery = (deduped_df.writeStream\n .foreachBatch(streaming_merge.upsert_to_delta)\n .outputMode(\"update\")\n .option(\"checkpointLocation\", f\"{DA.paths.checkpoints}/recordings\")\n .trigger(availableNow=True)\n .start())\n\nquery.awaitTermination()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **`heart_rate_silver`** テーブルに処理された一意なエントリの数が、上記のバッチ重複排除クエリと一致していることがわかる。\n\n# COMMAND ----------\n\nnew_total = spark.read.table(\"heart_rate_silver\").count()\n\nprint(f\"Old Total: {old_total:,}\")\nprint(f\"New Total: {new_total:,}\")\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC Run the following cell to delete the tables and files associated with this lesson.\n\n# COMMAND ----------\n\nDA.cleanup()\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC © 2022 Databricks, Inc. All rights reserved. \n# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the Apache Software Foundation . \n# MAGIC \n# MAGIC Privacy Policy | Terms of Use | Support \n","repo_name":"skotani-db/partner-elevate","sub_path":"Advanced-Data-Engineering-with-Databricks-JA/03 - Promoting to Silver/ADE 3.1 - Streaming Deduplication.py","file_name":"ADE 3.1 - Streaming Deduplication.py","file_ext":"py","file_size_in_byte":9914,"program_lang":"python","lang":"ja","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"}
+{"seq_id":"22907183418","text":"# PROGRAM UNTUK PERSEGI PANJANG\nprint(\"\\033c\") # To close all\n\n# LIBRARY YANG DIBUTUHKAN\nimport numpy as np\nimport matplotlib.pyplot as matplot\n\n# ==============================================================================================\n# ======================================= MEMBUAT FUNGSI =======================================\n# ==============================================================================================\n# Funsi untuk membuat titik\ndef create_dot(no_of_points, hd, cordinate_x, cornidate_y, screen):\n # MENGGAMBAR 4 TITIK DENGAN WARNA PUTIH (loop, condition, comparation)\n for k in range(1, no_of_points + 1):\n x = cordinate_x[k]\n y = cornidate_y[k]\n for i in range(x - hd, x + hd):\n for j in range(y - hd, y + hd):\n if ((i - x) ** 2 + (j - y) ** 2) < hd ** 2:\n screen[j, i, :] = 255 #225 adalah warna putih\n\ndef create_line(no_of_points, cordinate_x, cordinate_y, hw, screen):\n for k in range(1, no_of_points + 1):\n xa = cordinate_x[k];\txb = cordinate_x[k + 1]\n ya = cordinate_y[k];\tyb = cordinate_y[k + 1]\n x_min = min(xa, xb);\tx_max = max(xa, xb)\n y_min = min(ya, yb);\ty_max = max(ya, yb)\n dy = yb - ya;\tdx = xb - xa\n\n if abs(dy) <= abs(dx):\n line(dy, dx, x_min, x_max, xa, ya, hw, screen, \"column\")\n\n if abs(dx) < abs(dy):\n line(dx, dy, y_min, y_max, ya, xa, hw, screen, \"row\")\n\ndef line(d1, d2, min, max, a1, a2, diameter_w, screen, direction):\n my = d1 / d2\n for i in range(min, max):\n j = int(my * (i - a1) + a2) # MENCARI Y MENGGUNAKAN NILAI X\n # print('x, y =', i, ',', j)\n\n screen[j, i, 0] = 255\n screen[j, i, 1] = 0\n screen[j, i, 2] = 0\n\n if direction == \"column\":\n border_line(i, j, diameter_w, screen)\n else:\n # row\n border_line(j, i, diameter_w, screen)\n\ndef border_line(x, y, diameter_w, screen):\n for i in range(x - diameter_w, x + diameter_w): # MEMBUAT LINGKARAN DI SEKITAR (X,Y) DAN MEWARNAINYA DENGAN WARNA BIRU\n for j in range(y - diameter_w, y + diameter_w):\n if ((i - x) ** 2 + (j - y) ** 2) < diameter_w ** 2:\n #screen[j, i, 2] = 255\n screen[j, i, 0] = 255\n screen[j, i, 1] = 0\n screen[j, i, 2] = 0\n\n# ==============================================================================================\n# ======================================= PROGRAM UTAMA ========================================\n# ==============================================================================================\n\n# PENGATURAN UKURAN LAYAR\n# col = int(1920); row = int(1080)\ncol = int(800); row = int(800)\n\n# MENENRUKAN JUMLAH TITIK YANG DIGUNAKAN, UNTUK PERSEGI PANJANG ADALAH 4 TITIK.\n# TITIK JUGA BISA DIBUAT MENJADI INPUTAN OLEH USER\nno_of_points = 7\n\n#Structure of the array relative to the point-1 up to point-7:\n#The value of the last position is the same as x1\n#This is to anticipate the drawing of the line between p4 and p1\n# xn ADALAH KOORDINAT X DAN yn ADALAH KOORDINAT Y\n# XN DAN YN HARUS BERJUMLAH SAMA DAN PEMASANGANNYA BERDASARKAN NILAI INDEX,\n# CONTOH X PADA INDEX 1 DIPASANGKAN DENGAN Y PADA INDEX 1\ncordinate_y = [0, 400, 200, 200, 400, 600, 600, 400, 400]\ncordinate_x = [0, 169, 285, 515, 631, 515, 285, 169, 631]\n\n# PENGATURAN UNTUK DIAMETER TITIK DAN LEBAR BARIS\npd = int(20); lw = int(10)\n\n# MENGHITUNG DIAMTER SETENGAH TITIK DAN LEBAR SETENGAH GARIS\nhd = int(pd/2); hw = int(lw/2)\n\n# MEMPERSIAPKAN LAYAR DENGAN WARNA HITAM\nscreen = np.zeros(shape=(row, col, 3), dtype=np.uint16)\nscreen[:, :, :] = 255\n\n# MENGGAMBAR 4 TITIK DENGAN WARNA PUTIH (loop, condition, comparation)\n#create_dot(no_of_points, hd, cordinate_x, cordinate_y, screen)\n\n# MEMBUAT BARIS DIANTARA TITIK-1 DAN TITIK-2, DIANTARA TITIK-2 DAN TITIK LAINNYA\ncreate_line(no_of_points, cordinate_x, cordinate_y, hw, screen)\n\nmatplot.figure()\nmatplot.imshow(screen)\nmatplot.show()","repo_name":"aditya1-github/IFA515-UTS-Computer-Vision","sub_path":"002. Project/VisKomp/PraktikumSegiEnam.py","file_name":"PraktikumSegiEnam.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6471681662","text":"a=int(input())\nl=list(map(int,input().split()))\nc=[]\nd=0\nfor i in l:\n i=str(i)\n l1=list(i)\n l1.reverse()\n e=(''.join(l1))\n c.append(e)\nfor i in range(a):\n if l[i]==int(c[i]):\n d+=1\nprint(d)\n","repo_name":"21a91a04f5/codemind-python","sub_path":"Count_palindromes.py","file_name":"Count_palindromes.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29843353926","text":"import os\n\nclass Plugin:\n\n def __init__(self, *args, **kwargs): \n self.plugin_name = os.path.basename(__file__)\n super()\n \n\n def execute(self, args):\n print('request',self.plugin_name,args)\n\n if \"text\" in args:\n return {\n \"contents\": args['text'].upper()\n }\n else:\n return {\n \"contents\": ''\n }","repo_name":"cristianvasquez/obsidian-lab-py","sub_path":"examples/to_upper_case.py","file_name":"to_upper_case.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"61"}
+{"seq_id":"20259481036","text":"# UVa 514 - Rails\n# https://onlinejudge.org/external/5/514.pdf\n\ndef test_case(n):\n\twhile True:\n\t\tb = [int(x) for x in input().split()]\n\t\tif len(b) == 1 and b[0] == 0:\n\t\t\tbreak\n\t\tstack = []\n\t\tk = 0\n\t\tfor i in range(1, n + 1):\n\t\t\tstack.append(i)\n\t\t\twhile len(stack) > 0 and k < n and b[k] == stack[-1]:\n\t\t\t\tstack.pop()\n\t\t\t\tk += 1\n\t\tfor i in range(k, n):\n\t\t\tif b[i] == stack[-1]:\n\t\t\t\tstack.pop()\n\t\tprint(\"Yes\" if len(stack) == 0 else \"No\")\n\n\nif __name__ == \"__main__\":\n\twhile True:\n\t\tn = int(input())\n\t\tif n == 0:\n\t\t\tbreak\n\t\ttest_case(n)\n\t\tprint()\n","repo_name":"eloyhz/competitive-programming","sub_path":"cpbook/2_data_structures/514_rails.py","file_name":"514_rails.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23427249311","text":"txt = [line.strip() for line in open('input.txt').readlines()]\noutput = open('output.txt','w')\ncaseCount = int(txt[0])\n\ndef solver(costOfFarm,farmProd,numOfCookies):\n newTimeFin = numOfCookies/2.0 \n farms= 0.0\n rate = costOfFarm/2.0\n timeFin = float('inf') \n while newTimeFin < timeFin:\n timeFin = newTimeFin\n farms +=1\n newRate = farmProd*farms + 2.0 \n newTimeFin = numOfCookies/newRate + rate\n rate += costOfFarm/newRate\n return timeFin\n \n\nfor ind in range(caseCount):\n cost,fprod,target = txt[ind+1].split()\n out = str(solver(float(cost),float(fprod),float(target)))\n output.write('Case #'+ str(ind+1)+': '+out+'\\n')\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/3227.py","file_name":"3227.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33563639997","text":"import deck\n\nclass Player:\n players = []\n def __init__(self, name):\n self.name = name\n self.hand= []\n self.players.append(self)\n \n def deal(self, my_deck):\n for i in range(26):\n my_cards = deck.Deck.get_card(my_deck)\n self.hand.append(my_cards)\n print(self.hand)\n\n @classmethod\n def get_players(cls):\n for player in cls.players:\n print(f\"Player Name: {player.name}\")\n\n @staticmethod\n def is_valid(name):\n if type(name) == str: \n x = Player(name)\n print(\"Valid\")\n return x\n else:\n print(\"Invalid input\")\n","repo_name":"c-osornio/python_stack","sub_path":"fundamentals/oop/deck_of_cards/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"29678206623","text":"import sys\r\n\r\ndef main():\r\n \r\n aa={0:'G',1:'A',2:'V',3:'I',4:'L',5:'M',6:'F',7:'Y',8:'W',9:'S',10:'T',11:'N',12:'Q',13:'C',14:'P',15:'H',16:'R',17:'K',18:'D',19:'E'}\r\n aa2={'G':0,'A':1,'V':2,'I':3,'L':4,'M':5,'F':6,'Y':7,'W':8,'S':9,'T':10,'N':11,'Q':12,'C':13,'P':14,'H':15,'R':16,'K':17,'D':18,'E':19}\r\n\r\n f1 = sys.argv[1] # Input file (Counts file with NNN codons, instead of amino acids)\r\n f2 = sys.argv[2] # Output file name\r\n \r\n length = 0\r\n with open(f1) as f:\r\n for line in f:\r\n length = length + 1\r\n print(\"number of lines in the input: \" + str(length) + \" (674 for the input file provided)\")\n aaCounts = [[0 for i in range(20)] for j in range(length-1)]\n \r\n startaa = 35 ### Env mutagenesis starts at site 35\r\n \r\n with open(f1) as f:\r\n firstLine = f.readline().rstrip().split('\\t') # Need to keep this in order to skip first line\r\n firstLine = \"POSITION,WT,K,N,K,N,T,T,T,T,R,S,R,S,I,I,M,I,Q,H,Q,H,P,P,P,P,R,R,R,R,L,L,L,L,E,D,E,D,A,A,A,A,G,G,G,G,V,V,V,V,STOP,Y,STOP,Y,S,S,S,S,STOP,C,W,C,L,F,L,F\".split(',')\r\n ## Comment this out if not needed\r\n print(\"number of columns in the input: \" + str(len(firstLine)) + \" (66 for the input file provided)\") ## 2 fields (site, WT) + 64 codons\r\n for lineText in f:\r\n line = lineText.split(',')\r\n for pos in range(2,len(line)):\r\n if(firstLine[pos] != \"STOP\"):\r\n aaCounts[int(line[0])-startaa][aa2[firstLine[pos]]] += int(line[pos])\r\n #print(aaCounts[0]) \r\n with open(f2,'w') as f:\r\n for line in aaCounts:\r\n for pos in range(len(line)):\r\n f.write(str(line[pos]))\r\n if(pos int:\r\n \"\"\"map 2-space to 1-space\r\n \"\"\"\r\n x, y = two_tuple\r\n return width * x + y + 1\r\n\r\ndef one_to_two(n, width) -> (int, int):\r\n \"\"\"map 1-space to 2-space\r\n \"\"\"\r\n return (n//width - int(n % width == 0), (n - 1) % width)\r\n\r\ndef find_neighbors(index, width, height, pixels, reach) -> list:\r\n \"\"\"find all neighbors\r\n 1) map point in 1-space to 2-space\r\n 2) add all possible combinations of offsets <= reach\r\n to compute a pixels \"neighborhood\"\r\n 3) map the neighborhood back to 1-space and find the pixel value\r\n at each index.\r\n \"\"\"\r\n x, y = one_to_two(index, width)\r\n neighborhood = set() # lazy way to filter duplicates\r\n # generate neighborhood - set of all neighbors.\r\n for i in range(0, reach+1):\r\n for j in range(0, reach+1):\r\n neighborhood.add((x-i, y+j))\r\n neighborhood.add((x+i, y-j))\r\n neighborhood.add((x-i, y-j))\r\n neighborhood.add((x+i, y+j))\r\n # filter impossible neighbors\r\n neighborhood = [(i,j) for i,j in neighborhood if 0 <= i < height and 0 <= j < width]\r\n # map 2-tuples back to integers, subtract 1 to create valid indicies.\r\n neighborhood = [two_to_one((i,j), width) - 1 for i,j in neighborhood]\r\n # find pixel value of each neighbor\r\n return [pixels[i] for i in neighborhood]\r\n\r\ndef median(arr):\r\n \"\"\"sort array with mergesort, then calculate the median\r\n improvements: \r\n 1) for len(arr) <= 20 insertion sort might be faster; idk worth testing.\r\n for instance the reach parameter in denoise is greter than 2 maybe stick\r\n with merge.\r\n 2) the code given in the assignment arr[len(arr)//2] only computes the median\r\n for lists with odd lengths. The following function correctly \r\n calculates the median for even and odd length lists.\r\n \"\"\"\r\n merge_sort(arr)\r\n return (arr[len(arr)//2] + arr[len(arr)//2 - int(len(arr) % 2 == 0)])/2\r\n\r\ndef merge_sort(arr):\r\n if len(arr) > 1:\r\n \r\n # Finding middle of input list\r\n m = len(arr)//2\r\n \r\n # Bifurcate input list \r\n L, R = arr[:m], arr[m:]\r\n \r\n # Sort each half recursively\r\n merge_sort(L)\r\n merge_sort(R)\r\n \r\n i, j, k = 0, 0, 0\r\n \r\n # Copy elements from arr to L & R\r\n while i < len(L) and j < len(R):\r\n if L[i] < R[j]:\r\n arr[k] = L[i]\r\n i += 1\r\n else:\r\n arr[k] = R[j]\r\n j += 1\r\n k += 1\r\n \r\n # Final while loops check for leftover elements.\r\n while i < len(L):\r\n arr[k] = L[i]\r\n i += 1\r\n k += 1\r\n \r\n while j < len(R):\r\n arr[k] = R[j]\r\n j += 1\r\n k += 1\r\n\r\ndef denoise_image(pixels, width, height, reach = 2, beta = 0.2):\r\n \"\"\"if abs(original - median) / (original + 0.1) > beta, replace pixel\r\n ***\r\n this function assumes pixels has been transformed prior to invokation\r\n [i,i,i ...] -> [i, ...]\r\n remove redundancy in pixel list.\r\n \"\"\"\r\n from copy import deepcopy\r\n new_pixels = deepcopy(pixels)\r\n\r\n for i in range(len(pixels)):\r\n neighbors = find_neighbors(i, width, height, pixels, reach)\r\n med = median(neighbors)\r\n if abs(pixels[i] - med)/ (pixels[i]+ 0.1) > beta:\r\n new_pixels[i] = med\r\n return new_pixels\r\n\r\n\r\ndef create_pixels(read_this):\r\n \"\"\"\r\n improvements:\r\n - when opening a file use the with keyword.\r\n adds error handling & auto closes file.\r\n - no need to read all 3 integers from each line,\r\n because image in black & white the rgb channels\r\n all have the same value. Reading all 3 values \r\n triples your memory requirement.\r\n \"\"\"\r\n f = open(read_this, 'r')\r\n p3 = f.readline()\r\n pixel_list = []\r\n\r\n while True:\r\n line = f.readline()\r\n if line == '':\r\n break\r\n line_list = line.split()\r\n for pix_str in line_list:\r\n pixel_list.append(int(pix_str))\r\n f.close()\r\n return pixel_list\r\n\r\ndef test():\r\n name = \"cat.ppm\"\r\n data = create_pixels(name)\r\n width, height, maxval = data[0:3]\r\n pixels = data[3:]\r\n\r\n # reduce the pixel list, we only need 1 channel.\r\n r_pixels = [pixels[i] for i in range(0, len(pixels), 3)]\r\n print(width, height, maxval)\r\n new_pixels = denoise_image(r_pixels, width, height)\r\n print(\"new: \",new_pixels)\r\n\r\n\r\nif __name__ == \"__main__\": test()\r\n\r\n","repo_name":"nps01/jack","sub_path":"denoise/denoise.py","file_name":"denoise.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9365050919","text":"#\n# @lc app=leetcode.cn id=598 lang=python3\n#\n# [598] 范围求和 II\n#\n\n# @lc code=start\nclass Solution:\n def maxCount(self, m: int, n: int, ops: List[List[int]]) -> int:\n minc = minr = 2**31\n for op in ops:\n minc = min(op[0],minc)\n minr = min(op[1],minr)\n if minc == minr == 2**31:\n return m * n\n else:\n return minc * minr\n# @lc code=end\n\n","repo_name":"mqinbin/python_leetcode","sub_path":"598.范围求和-ii.py","file_name":"598.范围求和-ii.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3687327229","text":"#Quick Sort :D\n\ndef quicksort(arr, start, end):\n\tif start >= end:\n\t\treturn\n\tneedle = arr[start]\n\tplace = start\n\t\n\tlesserP = start+1\n\tgreaterP = end\n\t\n\twhile lesserP < greaterP:\n\t\tif (arr[lesserP] > needle and arr[greaterP] < needle):\n\t\t\tarr[lesserP], arr[greaterP] = arr[greaterP], arr[lesserP]\n\t\telif arr[lesserP] <= needle:\n\t\t\tlesserP += 1\n\t\telif arr[greaterP] >= needle:\n\t\t\tgreaterP -= 1\n\t\n\tif arr[lesserP] > needle:\n\t\tlesserP -= 1\n\t\n\tarr[lesserP], arr[place] = arr[place], arr[lesserP]\n\tplace = lesserP\n\t\n\tquicksort(arr, start, place - 1)\n\tquicksort(arr, place + 1, end)\n\t\n\t\n\t\narray = [7, 4, 4, 2, 4, 8, 9, 3, 1]\nquicksort(array, 0, len(array)-1)\nprint(array)\n\n","repo_name":"rasakereh/my_DataStructure_practice","sub_path":"qsort/qsort.py","file_name":"qsort.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6570670978","text":"# coding: utf-8\n\nimport os\nimport xarray as xr\nimport pandas as pd\nimport warnings\nfrom dateutil.relativedelta import relativedelta\n\n# todo select single or ensemble model run\n# todo overwrite if observation data is not available\n\n\nclass EnsembleAnalyses(object):\n\n def __init__(self, forecast_dir, grdc_dir):\n self.initialized = False\n # Initalize ensemble_analyses class\n # Open specified forecast directory based\n self.forecast_dir = forecast_dir\n # If not specified open the last created folder in subdirectory\n if forecast_dir == \"\":\n all_subdirs = [d for d in os.listdir('.') if os.path.isdir(d)]\n try:\n latest_subdir = max(all_subdirs, key=os.path.getmtime)\n except:\n # no subdirectories to take the latest of\n latest_subdir = os.path.abspath('.')\n self.forecast_dir = latest_subdir\n\n # Open specified GRDC station directory\n self.grdc_dir = grdc_dir\n\n def forecast_read(self):\n # Create empty dataset and list\n forecast_ds = xr.Dataset()\n forecast_ds_stats = xr.Dataset()\n\n # Append all ensemble member in directory to xarray dataset\n for file in os.listdir(self.forecast_dir):\n # break up name and extension\n fName, fExt = os.path.splitext(file)\n\n # only use netcdf, ignore 'dischargeEns.nc file\n if fExt == '.nc' and fName != 'dischargeEns':\n fpath = os.path.join(self.forecast_dir, file)\n var_name = fName[:8]\n forecast_ds[var_name] = xr.open_dataarray(fpath)\n\n # Append forecast ensemble statistics to xarray dataset\n if fName == 'dischargeEns':\n fpath = os.path.join(self.forecast_dir, file)\n forecast_ds_stats = xr.open_dataset(fpath)\n\n self.forecast_ds = forecast_ds\n self.forecast_ds_stats = forecast_ds_stats\n\n self.initialized = True\n return self.forecast_ds, self.forecast_ds_stats\n\n def grdc_read(self, grdc_station_id, lat=None, lon=None):\n if not self.initialized:\n raise NotImplementedError(\n 'Run def forecast_read before def grdc_read')\n\n # Set grdc_station filename based on grdc_station_ID\n grdc_station_filename = str(grdc_station_id) + \\\n '.day'\n self.grdc_station_path = os.path.join(self.grdc_dir,\n grdc_station_filename)\n self.grdc_station_id = grdc_station_id\n\n # Read all GRDC station metadata with grdc_metadata_reader function\n if not os.path.isfile(self.grdc_station_path):\n raise NotImplementedError('Could not find file',\n self.grdc_station_path)\n self.metadata = grdc_metadata_reader(self.grdc_station_path)\n\n # Overwrite GRDC metadata lat/lon with specified lat/lon when present\n if lat is not None:\n self.metadata[\"grdc_latitude_in_arc_degree\"] = lat\n if lon is not None:\n self.metadata[\"grdc_longitude_in_arc_degree\"] = lon\n\n\n # Import GRDC data into dataframe and modify dataframe format\n grdc_station_df = pd.read_table(self.grdc_station_path, skiprows=40,\n delimiter=';')\n grdc_station_df = grdc_station_df.rename(columns={'YYYY-MM-DD': 'date',\n ' Original': 'discharge'})\n grdc_station_df = grdc_station_df.reset_index().set_index(\n pd.DatetimeIndex(grdc_station_df['date']))\n grdc_station_df = grdc_station_df.drop(columns=['hh:mm',\n ' Calculated', ' Flag', 'index', 'date'])\n\n # Select GRDC station data that matches the forecast results Date\n grdc_station_select = grdc_station_df.loc[pd.to_datetime(\n str(self.forecast_ds.time.min().values)).strftime(\"%Y-%m-%d\"):pd.\n to_datetime(str(self.forecast_ds.time.max().values)).\n strftime(\"%Y-%m-%d\")]\n\n # Raise warning and use 10 year old data when data mismatch between\n # forecast results and GRDC station observation occurs\n if grdc_station_select.empty:\n warnings.warn('GRDC station does not contain observations for \\\n forecast date. From the forecast date 10 years are subtracted in \\\n order to find observation data')\n # todo Change hardcoded - 10 years Remove or give alternative\n # year statement\n tstart = pd.to_datetime(self.forecast_ds.time.min().values) - \\\n relativedelta(years=10)\n tstart = tstart.strftime(\"%Y-%m-%d\")\n tend = pd.to_datetime(self.forecast_ds.time.max().values) - \\\n relativedelta(years=10)\n tend = tend.strftime(\"%Y-%m-%d\")\n\n grdc_station_select = grdc_station_df.loc[str(tstart):str(tend)]\n\n self.grdc_station_select = grdc_station_select\n\n return self.metadata, self.grdc_station_select\n\n\ndef grdc_metadata_reader(grdc_station_path):\n \"\"\"\n # Initiating a dictionary that will contain all GRDC attributes.\n # This function is based on earlier work by Rolf Hut.\n # https://github.com/RolfHut/GRDC2NetCDF/blob/master/GRDC2NetCDF.py\n # DOI: 10.5281/zenodo.19695\n # that function was based on earlier work by Edwin Sutanudjaja\n # from Utrecht University.\n # https://github.com/edwinkost/discharge_analysis_IWMI\n # Modified by Susan Branchett\n \"\"\"\n\n # initiating a dictionary that will contain all GRDC attributes:\n attributeGRDC = {}\n\n # read the file\n f = open(grdc_station_path)\n allLines = f.read()\n f.close()\n\n # split the content of the file into several lines\n allLines = allLines.replace(\"\\r\", \"\")\n allLines = allLines.split(\"\\n\")\n\n # get grdc ids (from files) and check their consistency with their\n # file names\n id_from_file_name = int(os.path.basename(grdc_station_path).split(\".\")[0])\n id_from_grdc = None\n if id_from_file_name == int(allLines[8].split(\":\")[1].strip()):\n id_from_grdc = int(allLines[8].split(\":\")[1].strip())\n else:\n print(\"GRDC station \" + str(id_from_file_name) + \" (\" +\n str(grdc_station_path) + \") is NOT used.\")\n\n if id_from_grdc is not None:\n\n attributeGRDC[\"grdc_file_name\"] = grdc_station_path\n attributeGRDC[\"id_from_grdc\"] = id_from_grdc\n\n try:\n attributeGRDC[\"file_generation_date\"] = \\\n str(allLines[6].split(\":\")[1].strip())\n except:\n attributeGRDC[\"file_generation_date\"] = \"NA\"\n\n try:\n attributeGRDC[\"river_name\"] = \\\n str(allLines[9].split(\":\")[1].strip())\n except:\n attributeGRDC[\"river_name\"] = \"NA\"\n\n try:\n attributeGRDC[\"station_name\"] = \\\n str(allLines[10].split(\":\")[1].strip())\n except:\n attributeGRDC[\"station_name\"] = \"NA\"\n\n try:\n attributeGRDC[\"country_code\"] = \\\n str(allLines[11].split(\":\")[1].strip())\n except:\n attributeGRDC[\"country_code\"] = \"NA\"\n\n try:\n attributeGRDC[\"grdc_latitude_in_arc_degree\"] = \\\n float(allLines[12].split(\":\")[1].strip())\n except:\n attributeGRDC[\"grdc_latitude_in_arc_degree\"] = \"NA\"\n\n try:\n attributeGRDC[\"grdc_longitude_in_arc_degree\"] = \\\n float(allLines[13].split(\":\")[1].strip())\n except:\n attributeGRDC[\"grdc_longitude_in_arc_degree\"] = \"NA\"\n\n try:\n attributeGRDC[\"grdc_catchment_area_in_km2\"] = \\\n float(allLines[14].split(\":\")[1].strip())\n if attributeGRDC[\"grdc_catchment_area_in_km2\"] <= 0.0:\n attributeGRDC[\"grdc_catchment_area_in_km2\"] = \"NA\"\n except:\n attributeGRDC[\"grdc_catchment_area_in_km2\"] = \"NA\"\n\n try:\n attributeGRDC[\"altitude_masl\"] = \\\n float(allLines[15].split(\":\")[1].strip())\n except:\n attributeGRDC[\"altitude_masl\"] = \"NA\"\n\n try:\n attributeGRDC[\"dataSetContent\"] = \\\n str(allLines[20].split(\":\")[1].strip())\n except:\n attributeGRDC[\"dataSetContent\"] = \"NA\"\n\n try:\n attributeGRDC[\"units\"] = str(allLines[22].split(\":\")[1].strip())\n except:\n attributeGRDC[\"units\"] = \"NA\"\n\n try:\n attributeGRDC[\"time_series\"] = \\\n str(allLines[23].split(\":\")[1].strip())\n except:\n attributeGRDC[\"time_series\"] = \"NA\"\n\n try:\n attributeGRDC[\"no_of_years\"] = \\\n int(allLines[24].split(\":\")[1].strip())\n except:\n attributeGRDC[\"no_of_years\"] = \"NA\"\n\n try:\n attributeGRDC[\"last_update\"] = \\\n str(allLines[25].split(\":\")[1].strip())\n except:\n attributeGRDC[\"last_update\"] = \"NA\"\n\n try:\n attributeGRDC[\"nrMeasurements\"] = \\\n int(str(allLines[38].split(\":\")[1].strip()))\n except:\n attributeGRDC[\"nrMeasurements\"] = \"NA\"\n\n return attributeGRDC\n","repo_name":"eWaterCycle/hydro-analyses","sub_path":"eosc_pilot/Ensemble_Analyses.py","file_name":"Ensemble_Analyses.py","file_ext":"py","file_size_in_byte":9273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74221704194","text":"import pandas as pd\nimport psycopg2 \n\ndef count_nans(row, df, nan_count):\n count = 0\n for col in df.columns:\n if pd.isna(row[col]):\n if col == \"icu_patients\" or col == \"hosp_patients\":\n return False\n count += 1\n return count <= nan_count\n\ndef insert_row(cols: list, conn, cursor):\n dataset_df = pd.read_csv(\"setup/dataset.csv\")\n dataset_df = dataset_df.loc[:,cols].drop_duplicates()\n dataset_df = dataset_df[dataset_df.iso_code.apply(lambda row : row.split(\"_\")[0]!=\"OWID\")]\n dataset_df = dataset_df[dataset_df.apply(lambda row: count_nans(row,dataset_df,2), axis=1)]\n\n # Sütun sayısı kadar %s ekle\n query = \"\"\"INSERT INTO Hospital_AND_ICU(location_id,icu_patients ,icu_patients_per_million,hosp_patients ,hosp_patients_per_million ,weekly_icu_admissions ,weekly_icu_admissions_per_million ,weekly_hosp_admissions ,weekly_hosp_admissions_per_million , date_time) VALUES(%(iso_code)s,%(icu_patients)s,\n %(icu_patients_per_million)s,%(hosp_patients)s,%(hosp_patients_per_million)s, %(weekly_icu_admissions)s, %(weekly_icu_admissions_per_million)s, %(weekly_hosp_admissions)s, %(weekly_hosp_admissions_per_million)s, %(date)s);\"\"\"\n for idx, row in dataset_df.iterrows():\n insert_dict = dict()\n for col in cols:\n if pd.isna(row[col]):\n insert_dict[col] = None\n else:\n insert_dict[col] = row[col]\n cursor.execute(query, insert_dict)\n conn.commit()\n\n\n\n\n\nconn = psycopg2.connect(database=\"postgres\",\n host=\"localhost\",\n user=\"postgres\",\n password=\"1234\",\n port=\"5432\")\n\nqueryTable = \"\"\"DROP TABLE IF EXISTS Hospital_AND_ICU;\"\"\"\ncursor = conn.cursor()\ncursor.execute(queryTable)\nconn.commit()\n\ncursor = conn.cursor()\n\nqueryTable = \"\"\"CREATE TABLE Hospital_AND_ICU (\n ID SERIAL primary key,\n location_id varchar(80) references locations (location_id),\n icu_patients integer,\n icu_patients_per_million numeric,\n hosp_patients integer,\n hosp_patients_per_million numeric,\n weekly_icu_admissions integer,\n weekly_icu_admissions_per_million numeric,\n weekly_hosp_admissions integer,\n weekly_hosp_admissions_per_million numeric,\n date_time date\n);\"\"\"\n\ncursor.execute(queryTable)\nconn.commit()\ninsert_row([\"iso_code\",\"icu_patients\",\"icu_patients_per_million\",\"hosp_patients\",\"hosp_patients_per_million\", \"weekly_icu_admissions\", \"weekly_icu_admissions_per_million\", \"weekly_hosp_admissions\", \"weekly_hosp_admissions_per_million\", \"date\"], conn, cursor)\nconn.close()\n","repo_name":"shintalha/Covid-19-Data-Management-System","sub_path":"setup/setupCelal.py","file_name":"setupCelal.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32407130990","text":"#!/usr/bin/env python\n\"\"\"\n_plusone_\n\nrepo/cirrus independent util for flagging PRs in github as plusone'd\n\n\"\"\"\nimport os\nimport git\nimport sys\nimport json\nimport argparse\nimport requests\nfrom cirrus.configuration import get_github_auth\n\n\nclass GitHubHelper(object):\n \"\"\"\n GitHubHelper\n\n Lightweight helper for this command to set up\n a GH session\n\n \"\"\"\n def __init__(self):\n self.user, self.token = get_github_auth()\n self.auth_headers = {\n 'Authorization': 'token {0}'.format(self.token),\n 'Content-Type': 'application/json'\n }\n self.session = requests.Session()\n self.session.headers.update(self.auth_headers)\n\n def get_pr(self, org, repo, pr_id):\n \"\"\"\n _get_pr_\n\n grab the PR details\n \"\"\"\n url = \"https://api.github.com/repos/{org}/{repo}/pulls/{id}\".format(\n org=org,\n repo=repo,\n id=pr_id\n )\n resp = self.session.get(url)\n resp.raise_for_status()\n data = resp.json()\n return data\n\n def set_branch_state(self, org, repo, context, repo_dir=None, branch=None):\n \"\"\"\n _current_branch_mark_status_\n\n Mark the CI status of the current branch.\n\n :param state: state of the last test run, such as \"success\" or \"failure\"\n :param context: The GH context string to use for the state, eg\n \"continuous-integration/travis-ci\"\n\n :param branch: Optional branch name or sha to set state on,\n defaults to current active branch\n\n \"\"\"\n if repo_dir is None:\n repo_dir = os.getcwd()\n git_repo = git.Repo(repo_dir)\n if branch is None:\n branch = git_repo.active_branch.name\n\n # pull branch_from from remote\n ref = \"refs/heads/{0}:refs/remotes/origin/{0}\".format(branch)\n git_repo.remotes.origin.pull(ref)\n\n sha = git_repo.head.commit.hexsha\n url = \"https://api.github.com/repos/{org}/{repo}/statuses/{sha}\".format(\n org=org,\n repo=repo,\n sha=sha\n )\n data = json.dumps(\n {\n \"state\": 'success',\n \"description\": \"State after cirrus check.\",\n \"context\": context\n }\n )\n resp = self.session.post(url, data=data)\n resp.raise_for_status()\n\n def plus_one(self, org, repo, sha, context, issue_url):\n \"\"\"\n _plus_one_\n\n Set the status for the given context to success on the\n provided sha\n \"\"\"\n url = \"https://api.github.com/repos/{org}/{repo}/statuses/{sha}\".format(\n org=org,\n repo=repo,\n sha=sha\n )\n data = json.dumps(\n {\n \"state\": 'success',\n \"description\": \"{} set to success by {}\".format(\n context, self.user\n ),\n \"context\": context\n }\n )\n resp = self.session.post(url, data=data)\n resp.raise_for_status()\n\n comment = \"+1 added by {}\".format(self.user)\n comment_url = \"{}/comments\".format(issue_url)\n comment_data = {\n \"body\": comment,\n }\n resp = self.session.post(comment_url, data=json.dumps(comment_data))\n resp.raise_for_status()\n\n\ndef build_parser():\n \"\"\"\n construct a CLI parser and process args\n\n \"\"\"\n parser = argparse.ArgumentParser(\n description='plusone command for adding +1 to a github PR'\n )\n\n parser.add_argument(\n '--id', '-i',\n default=None,\n type=int,\n dest='id',\n help='ID of pull request to approve/+1'\n )\n parser.add_argument(\n '--plus-one-context', '-c',\n default='+1',\n dest='plus_one_context',\n help='Github context string to use as +1 tag'\n )\n parser.add_argument(\n '--repo', '-r',\n default=None,\n dest='repo',\n help='Github Repo name'\n )\n parser.add_argument(\n '--org', '-o',\n default=None,\n dest='org',\n help='Github Organisation name'\n )\n parser.add_argument(\n '--branch', '-b',\n default=None,\n dest='branch',\n help='Branch name if not using a PR ID'\n )\n parser.add_argument(\n '--repo-dir', '-d',\n default=None,\n dest='repo_dir',\n help='Local repo path if not using a PR ID'\n )\n\n opts = parser.parse_args()\n return opts\n\n\ndef main():\n \"\"\"\n _main_\n\n GitHub plusone tool using status and PR API\n\n \"\"\"\n opts = build_parser()\n gh = GitHubHelper()\n\n if not (opts.id or opts.branch):\n print(\"request id={} branch_name={}\".format(opts.id, opts.branch))\n msg = \"Must supply either pull request ID or branch name\"\n print(msg)\n sys.exit(1)\n\n if opts.branch is not None:\n gh.set_branch_state(\n opts.org,\n opts.repo,\n opts.plus_one_context,\n opts.repo_dir,\n branch=opts.branch\n )\n else:\n pr = gh.get_pr(opts.org, opts.repo, int(opts.id))\n sha = pr['head']['sha']\n issue_url = pr['issue_url']\n created_by = pr[\"user\"][\"login\"]\n if created_by == gh.user:\n msg = \"Reviewing your own Pull Requests is not allowed\"\n raise RuntimeError(msg)\n\n gh.plus_one(opts.org, opts.repo, sha, opts.plus_one_context, issue_url)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"evansde77/cirrus","sub_path":"src/cirrus/plusone.py","file_name":"plusone.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"}
+{"seq_id":"42229679439","text":"import re\nimport contractions\nfrom nltk.corpus import stopwords\nimport en_core_web_sm\n\ndef textprep(input_str):\n str1 = contractions.fix(input_str)\n str2 = re.sub('[^a-zA-z0-9\\s]','',str1)\n \n def lowerCase(input_str):\n input_str = input_str.lower()\n return input_str\n \n str3 = lowerCase(str2)\n \n sp = en_core_web_sm.load()\n stpw = set(stopwords.words('english'))\n \n def lemma(input_str):\n s = sp(input_str)\n \n input_list = []\n for word in s:\n w = word.lemma_\n input_list.append(w)\n \n output = ' '.join(word for word in input_list if word not in stpw)\n return output\n \n str4 = lemma(str3)\n return str4","repo_name":"xga0/textprep-package","sub_path":"textprep.py","file_name":"textprep.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71003471876","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n\tPrácticas de Python 2.7 y bases de datos biológicas\n\tPrograma pdbParse.py (parseado de ficheros PDB)\n\tMáster en Bioinformática y Biología Computacional, ENS-ISCIII\n\tCurso: 2015-2016\n\tAsignatura: Big Data Parsing and Processing\n\tAutor: José María Fernández\n'''\n\nfrom __future__ import print_function\nimport sys\nimport re\nimport subprocess\nimport time\n\n'''\nTabla de correspondencia entre los códigos IUPAC de aminoácidos\nformato 3 letras, a códigos de 1 letra. No incluye aminoácidos\nexóticos o infrecuentes como p.ej. selenocisteína o selenometionina\n'''\nthree2one = {\n 'ALA': 'A',\n 'ARG': 'R',\n 'ASN': 'N',\n 'ASP': 'D',\n 'ASX': 'B',\n 'CYS': 'C',\n 'GLU': 'E',\n 'GLN': 'Q',\n 'GLX': 'Z',\n 'GLY': 'G',\n 'HIS': 'H',\n 'ILE': 'I',\n 'LEU': 'L',\n 'LYS': 'K',\n 'MET': 'M',\n 'PHE': 'F',\n 'PRO': 'P',\n 'SER': 'S',\n 'THR': 'T',\n 'TRP': 'W',\n 'TYR': 'Y',\n 'VAL': 'V'\n}\n\nclass PDBParser(object):\n\tdef __init__(self,filenames):\n\t\tself.filenames = filenames\n\t\n\tdef __iter__(self):\n\t\t# Hacemos un iterador, para no perder la posición\n\t\tself.files = iter(self.filenames)\n\t\t\n\t\treturn self\n\t\n\t#def __next__(self):\n\tdef next(self):\n\t\tfor filename in self.files:\t\n\t\t\t# Si se pone el comando a ejecutar en una sola línea, sin array, hay que poner shell=True\n\t\t\tpdb = subprocess.Popen(['gunzip','-c',filename],stdout=subprocess.PIPE,universal_newlines=True)\n\t\t\tprint(\"Procesando fichero \",filename)\n\t\t\n\t\t\ttry:\n\t\t\t\t# Inicialización de variables\n\t\t\t\tpdbid = None\n\t\t\t\tpdbdate = None\n\t\t\t\ttitle = ''\n\t\t\t\tcompndBuff = ''\n\t\t\t\tisCompnd = False\n\t\t\t\t\n\t\t\t\t# Vamos a guardar las moléculas en un array\n\t\t\t\t# aunque se podrían guardar sin problemas en un hash\n\t\t\t\tpdbmolecule = []\n\t\t\t\t\n\t\t\t\t# Las cadenas las vamos a guardar en otro hash separado,\n\t\t\t\t# para poder almacenar fácilmente la secuencia y las referencias\n\t\t\t\t# de forma anidada\n\t\t\t\tpdbchain = {}\n\t\t\t\t\n\t\t\t\t# Estructura alternativa, sin anidamiento\n\t\t\t\t# pdbchainSeqs = {}\n\t\t\t\t# pdbchainAccs = {}\n\t\t\t\t\n\t\t\t\t# ¡A leer el fichero PDB!\n\t\t\t\tfor line in pdb.stdout:\n\t\t\t\t\tline = line.rstrip('\\n')\n\n\t\t\t\t\t# Detección usando expresión regular\n\t\t\t\t\tif re.search('^HEADER ',line) is not None:\n\t\t\t\t\t\t###########################################################\n\t\t\t\t\t\t# Estilo que seguiríamos si no tuviéramos manuales de PDB #\n\t\t\t\t\t\t###########################################################\n\t\t\t\t\t\tline = line.rstrip()\n\t\t\t\t\t\t\n\t\t\t\t\t\ttok = re.split(r\"[ \\t]+\",line)\n\t\t\t\t\t\t\n\t\t\t\t\t\tpdbid = tok[-1]\n\t\t\t\t\t\tpdbdate = tok[-2]\n\t\t\t\t\t\t\n\t\t\t\t\t\t########################################\n\t\t\t\t\t\t# Estilo siguiendo los manuales de PDB #\n\t\t\t\t\t\t########################################\n\t\t\t\t\t\t# pdbid = line[62:66]\n\t\t\t\t\t\t# pdbdate = line[50:59]\n\t\t\t\t\t\t\n\t\t\t\t\t# Detección usando index (recomendable para búsqueda de\n\t\t\t\t\t# patrones exactos en ficheros de varios GB)\n\t\t\t\t\telif line.startswith('TITLE '):\n\t\t\t\t\t\t#######################################\n\t\t\t\t\t\t# Estilo usando expresiones regulares #\n\t\t\t\t\t\t#######################################\n\t\t\t\t\t\t# titlematch = re.search(r\"^TITLE +[0-9]*( ?.*[^ ]) *$\",line)\n\t\t\t\t\t\t# title += titlematch.group(1)\n\t\t\t\t\t\t\n\t\t\t\t\t\tline = line.rstrip()\n\t\t\t\t\t\ttitle += line[10:]\n\t\t\t\t\telif line.startswith('COMPND '):\n\t\t\t\t\t\t# Como COMPND tiene sub-registros e información\n\t\t\t\t\t\t# distribuídos en varias líneas, mejor ir guardando\n\t\t\t\t\t\t# todo en una variable para más tarde procesarlo\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Así se pasa de\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# COMPND MOL_ID: 1; \n\t\t\t\t\t\t# COMPND 2 MOLECULE: MOLYBDENUM COFACTOR BIOSYNTHESIS PROTEIN A; \n\t\t\t\t\t\t# COMPND 3 CHAIN: A, B; \n\t\t\t\t\t\t# COMPND 4 SYNONYM: MOAA; \n\t\t\t\t\t\t# COMPND 5 ENGINEERED: YES \n\t\t\t\t\t\t#\n\t\t\t\t\t\t# a\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# MOL_ID: 1; MOLECULE: MOLYBDENUM COFACTOR BIOSYNTHESIS PROTEIN A; CHAIN: A, B; SYNONYM: MOAA; ENGINEERED: YES\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\tisCompnd = True\n\t\t\t\t\t\tline = line.rstrip()\n\t\t\t\t\t\t\n\t\t\t\t\t\tcompndBuff += line[10:]\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Vamos a procesar lo que venía de COMPND\n\t\t\t\t\t\tif isCompnd:\n\t\t\t\t\t\t\t# Pero, ¡sólo una vez!\n\t\t\t\t\t\t\tisCompnd = False\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t# Cada sub-registro de molécula empieza con MOL_ID\n\t\t\t\t\t\t\tmol = compndBuff.split('MOL_ID: ')\n\t\t\t\t\t\t\tfor moltok in mol:\n\t\t\t\t\t\t\t\t# Saltamos posibles elementos vacíos\n\t\t\t\t\t\t\t\tif moltok == '':\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# Cada línea tiene el MOL_ID, un MOLECULE y un CHAIN\n\t\t\t\t\t\t\t\tmolmatch = re.search(r\"^([0-9]+); MOLECULE: ([^;]+); CHAIN: ([^;]+)\",moltok)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# Obtengamos los nombres de las cadenas, que\n\t\t\t\t\t\t\t\t# van separadas por comas\n\t\t\t\t\t\t\t\t# y tal vez algún espacio\n\t\t\t\t\t\t\t\tchains = re.split(r\"\\s*,\\s*\",molmatch.group(3))\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# Con los nombres de cadena preparamos el terreno\n\t\t\t\t\t\t\t\t# para lo que leeremos más adelante, secuencia\n\t\t\t\t\t\t\t\t# y array de referencias a UniProt\n\t\t\t\t\t\t\t\tfor chain in chains:\n\t\t\t\t\t\t\t\t\tpdbchain[chain] = {\n\t\t\t\t\t\t\t\t\t\t'seq': '',\n\t\t\t\t\t\t\t\t\t\t'accs': []\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t# Aquí podríamos haber usado hashes separados, uno\n\t\t\t\t\t\t\t\t\t# para la secuencia y otro para los accessions\n\t\t\t\t\t\t\t\t\t# en lugar de hashes anidados\n\t\t\t\t\t\t\t\t\t# pdbchainSeqs[chain] = ''\n\t\t\t\t\t\t\t\t\t# pdbchainAccs[chain] = []\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# Guardemos el moldid, su descripción, y las cadenas\n\t\t\t\t\t\t\t\t# en un hash\n\t\t\t\t\t\t\t\tpdbmolecule.append({'molid': molmatch.group(1),'moldesc': molmatch.group(2),'chains': chains})\n\t \n\t\t\t\t\t\t# Las líneas SEQRES, que tienen el nombre de cadena y \n\t\t\t\t\t\t# los resíduos a guardar\n\t\t\t\t\t\t# SEQRES 3 B 340 ARG CYS ASP TYR CYS MET PRO LYS GLU VAL PHE GLY ASP \n\t\t\t\t\t\tseqmatch = re.search(r\"^SEQRES +\\d+ *([^ ]) *\\d* ([^ ].*[^ ])\",line)\n\t\t\t\t\t\tmatched = seqmatch is not None\n\t\t\t\t\t\t\n\t\t\t\t\t\tdbmatch = None if matched else re.search(r\"^DBREF +[^ ]+ ([^ ]) +\\d+ +\\d+ +UNP +([^ ]+)\",line)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif seqmatch is not None:\n\t\t\t\t\t\t\tchain = seqmatch.group(1)\n\n\t\t\t\t\t\t\t# Almacenamos en un array los resíduos\n\t\t\t\t\t\t\tresidues = re.split(r\" +\",seqmatch.group(2))\n\n\t\t\t\t\t\t\t# Guardaremos aquí temporalmente la secuencia\n\t\t\t\t\t\t\tseqbuf = ''\n\t\t\t\t\t\t\tfor residue in residues:\n\t\t\t\t\t\t\t\t# Detectemos si tenemos correspondencia\n\t\t\t\t\t\t\t\t# Si no la hay, almacenemos una X\n\t\t\t\t\t\t\t\t# Esto pasa con aminoácidos exóticos, con\n\t\t\t\t\t\t\t\t# resíduos que no son aminoácidos (ADN, ARN)\n\t\t\t\t\t\t\t\t# y algunas moléculas\n\t\t\t\t\t\t\t\tseqbuf += three2one.get(residue,'X')\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# para ir acumulándola en su sitio\n\t\t\t\t\t\t\tpdbchain[chain]['seq'] += seqbuf\n\n\t\t\t\t\t\t\t# alternativa\n\t\t\t\t\t\t\t# pdbchainSeqs[chain] += seqbuf\n\t\t\t\t\t\t# Y ahora, nos quedaremos con la cadena y el accession\n\t\t\t\t\t\t# pero sólo con los de UniProt\n\t\t\t\t\t\t# DBREF 1TV8 B 1 340 UNP P65388 MOAA_STAAN 1 340\n\t\t\t\t\t\telif dbmatch is not None:\n\t\t\t\t\t\t\tchain = dbmatch.group(1)\n\t\t\t\t\t\t\tacc = dbmatch.group(2)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# Usamos un array porque podría haber más de un accession asociado\n\t\t\t\t\t\t\t# a una cadena (por ejemplo, en el caso de quimeras)\n\t\t\t\t\t\t\tpdbchain[chain]['accs'].append(acc)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# alternativa\n\t\t\t\t\t\t\t# pdbchainAccs[chain].append(acc)\n\t\t\t\t\n\t\t\t\t# Al final devolvemos todo lo recopilado\n\t\t\t\treturn pdbid,pdbdate,title,pdbmolecule,pdbchain\n\t\t\t\t\n\t\t\t\t# alternativa\n\t\t\t\t# return pdbid,pdbdate,title,pdbmolecule,pdbchainSeqs,pdbchainAccs\n\t\t\t\t\n\t\t\texcept IOError as e:\n\t\t\t\tprint(\"Error de lectura de fichero {0}: {1}\".format(e.errno, e.strerror),file=sys.stderr)\n\t\t\t\t#raise\n\t\t\tfinally:\n\t\t\t\tif pdb.returncode is None:\n\t\t\t\t\t# Primer intento de terminación, educado\n\t\t\t\t\tpdb.terminate()\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\tif pdb.returncode is None:\n\t\t\t\t\t\t# Segundo intento de terminación, forzado\n\t\t\t\t\t\tpdb.kill()\n\t\t\n\t\t# Se cierra la lista de ficheros procesado\n\t\tself.files = None\n\t\t# Y como hemos terminado, lo indicamos\n\t\traise StopIteration\n\t\t\n\t\t\n# Comprobación del número de parámetros de entrada\nif __name__ == '__main__':\n\tif len(sys.argv)>1:\n\t\t# Procesamiento de cada fichero\n\t\ttry:\n\t\t\t# alternativa\n\t\t\t# for pdbid,pdbdate,title,pdbmolecule,pdbchainSeqs,pdbchainAccs in PDBParser(sys.argv[1:]):\n\t\t\tfor pdbid,pdbdate,title,pdbmolecule,pdbchain in PDBParser(sys.argv[1:]):\n\t\t\t\t# Aquí es donde tendremos que procesar, filtrar, imprimir ...\n\t\t\t\tprint(\"PDB: {0} Date: {1} Title: {2}\".format(pdbid,pdbdate,title))\n\t\t\t\tprint(\"Mols ({0})\".format(len(pdbmolecule)))\n\t\t\t\t\n\t\t\t\t# Aquí pondríamos el resto de datos\n\t\t\t\tfor pdbmol in pdbmolecule:\n\t\t\t\t\tmolid = pdbmol['molid']\n\t\t\t\t\tprint(\"\\tMOL ID: {0}\".format(molid))\n\t\t\t\t\t# Ahora, ¡a por las cadenas!\n\t\t\t\t\tchains = pdbmol['chains']\n\t\t\t\t\tprint(\"\\tChains ({0})\".format(len(chains)))\n\t\t\t\t\t\n\t\t\t\t\tfor chain in chains:\n\t\t\t\t\t\t# Recuperamos el hash anidado\n\t\t\t\t\t\tchaindata = pdbchain[chain]\n\t\t\t\t\t\t\n\t\t\t\t\t\tseq = chaindata['seq']\n\t\t\t\t\t\taccs = chaindata['accs']\n\t\t\t\t\t\t\n\t\t\t\t\t\t# alternativa\n\t\t\t\t\t\t# seq = pdbchainSeqs[chain]\n\t\t\t\t\t\t# accs = pdbchainAccs[chain]\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Aquí se procesaría cada dato\n\t\t\t\t\t\tprint(\"\\t\\tAccessions: {0}\".format(\", \".join(accs)))\n\t\t\t\t\t\tprint(\"\\t\\tSequence Length: {0}\".format(len(seq)))\n\t\texcept:\n\t\t\tprint(\"Error inesperado: \", sys.exc_info()[0],file=sys.stderr)\n\t\t\traise\n\t\t\n\telse:\n\t\traise AssertionError(\"Debes introducir al menos un fichero comprimido con formato PDB.\")\n","repo_name":"jmfernandez/bioparsing-master-isciii","sub_path":"pdbParse.py","file_name":"pdbParse.py","file_ext":"py","file_size_in_byte":9296,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74448766275","text":"while(True):\n ent = int(input())\n if(ent == 0):\n break\n l = input()\n pos = 0\n #0 N\n #1 L\n #2 S\n #3 O\n for i in range(ent):\n if(l[i]==\"D\"):\n pos+=1\n elif(l[i]==\"E\"):\n pos-=1\n if(pos<0):\n pos=3\n if(pos>3):\n pos=0\n if(pos == 0):\n print(\"N\")\n elif(pos == 1):\n print(\"L\")\n elif(pos==2):\n print(\"S\")\n else:\n print(\"O\")","repo_name":"franciscowllima/URI-Questoes","sub_path":"python/1437.py","file_name":"1437.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"39862202369","text":"\"\"\"Bus Routes.\n\nhttps://leetcode.com/contest/weekly-contest-79/problems/bus-routes/\n\n- Difficulty: Hard\n\nWe have a list of bus routes. Each routes[i] is a bus route that the i-th bus\nrepeats forever. For example if routes[0] = [1, 5, 7], this means that the\nfirst bus (0-th indexed) travels in the sequence 1->5->7->1->5->7->... forever.\n\nWe start at bus stop S (initially not on a bus), and we want to go to bus stop\nT. Travelling by buses only, what is the least number of buses we must take to\nreach our destination? Return -1 if it is not possible.\n\n\"\"\"\nimport sys\n\nfrom collections import defaultdict, deque\n\n\ndef buses_to_destination(routes, start, end):\n \"\"\"Find the least number of buses required to reach destination.\n\n Args:\n routes (List[List[int]]): list of bus routes.\n start (int): initial bus stop.\n end (int): final bus stop (destination).\n\n Returns:\n int: number of buses to destination.\n\n Example:\n >>> buses_to_destination([[1, 2], [2, 3, 4], [3, 5]], 1, 5)\n 3\n\n \"\"\"\n if start == end:\n return 0\n\n stop_to_bus = defaultdict(set) # mapping of connecting buses on each stop.\n for bus, stops in enumerate(routes):\n for stop in stops:\n stop_to_bus[stop].add(bus)\n\n # Breadth First Search\n queue = deque([(start, 0)]) # Initialized with a tuple of start and count.\n visited = {start}\n\n while queue:\n v, count = queue.pop()\n adj = {stop for bus in stop_to_bus[v] for stop in routes[bus]}\n for u in adj - visited:\n if u == end:\n return count + 1\n visited.add(u)\n queue.appendleft((u, count + 1))\n\n return -1\n\n\nif __name__ == '__main__':\n # Sample inputs: routes, start, end = [[1, 2, 7], [3, 6, 7]], 1, 6\n try:\n routes = eval(input('Bus routes (List of list of stops): ').strip())\n start = int(input('Initial bus stop: ').strip())\n end = int(input('Final bus stop: ').strip())\n except Exception as e:\n print('Error while reading inputs (Exception: {})'.format(e))\n sys.exit(1)\n\n if isinstance(routes, list) and all(isinstance(r, list) for r in routes):\n msg = 'Least number of buses to reach destination: {}'\n print(msg.format(buses_to_destination(routes, start, end)))\n else:\n raise TypeError('\"routes\" must be a list of lists.')\n","repo_name":"cabaleirog/coding-challenges","sub_path":"leet_code/bus_routes.py","file_name":"bus_routes.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23791682446","text":" #!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom units import UnitsLog\nfrom cnotes import Notes\n\ngetunits=UnitsLog()\nunits=getunits.unit_log()\ngetnotes=Notes()\nnote=getnotes.connote()\n\nclass TOPSOIL:\n def __init__(self,width,length,depth): \n self.width=float(width)\n self.length=float(length)\n self.depth=float(depth) \n self.excavation=self.depth*self.length*self.width\n self.area=self.length*self.width\n self.snote=(note['excavation']['exc']+note['excavation']['topsoil']+note['excavation']['dispose'])\n self.data=dict(\n note=self.snote,\n length=self.length,\n width=self.width,\n depth=self.depth,\n area=self.area,\n excavation={'cuft':self.excavation,\n \t\t 'cuyd':round(self.excavation/27,2)\n }\n )\n \n##-----------------------------------------------------------------------------------## \n\n\nclass STRIP:\n def __init__(self,width,depth,length,thickness):\n self.rebarLength=29.5\n self.bend=0.5\n self.cover=0.166\n self.width=width\n self.depth=depth\n self.length=length\n self.thickness=thickness\n \n self.excavation=round(self.width*self.depth*self.length,2)\n self.concrete=round(self.width*self.thickness*self.length,2)\n fww=self.width/3 #approximate width of footing wall\n fwh=self.depth-self.thickness #approximate height of footing wall \n self.backfill=round(self.excavation-((fww*fwh*self.length)+self.concrete),2)##approximate footing wall volumr + footing volume\n self.notexc=(note['excavation']['exc']+note['structural']['fdn']+note['excavation']['dispose'])\n self.notebackfill=(note['excavation']['backfill'])\n self.notecon=(note['concrete']['mix']+note['concrete']['r124']+note['concrete']['conc']+note['structural']['fdn'])\n \n\n self.data=dict(\n length=self.length,\n width=self.width,\n depth=self.depth,\n thickness=self.thickness, \n note= {'excavation':self.notexc,\n 'backfill':self.notebackfill,\n 'concrete':self.notecon\n }, \t\n \n excavation={'cuft':self.excavation,\n \t\t 'cuyd':round(self.excavation/27,2)\n },\n concrete= {'cuft':self.concrete,\n 'cuyd':round(self.concrete/27,2)\n },\n backfill= {'cuft':self.backfill,\n 'cuyd':round(self.backfill/27,2)\n }\n )\n \t\t\t\n \n \n def rebars(self,amt_longbars,link_type,link_spacing):\n long_bars=(amt_longbars*self.length)/29.5\n links=int(round(self.length/link_spacing))+2 # add 2 extra link\n link_length=(self.width-0.25)+0.5\n barnote={'mainbar':note['rebar']['cut']+note['rebar']['brs']+note['structural']['fdn'],\n 'links':note['rebar']['fab']+note['rebar']['lnk']\n }\n \n return dict(rebar={'mainBar':int(round(long_bars))+1,#add 1 extra bar\n \t\t 'links':links,\n \t\t 'linkLength':link_length,\n \t\t 'linkBars': int(round((link_length*links)/29.5)),\n 'note':barnote\n \t\t\t\t\t\n \t\t\t\t\t}\n \t\t\t\t\t\n )\n##-----------------------------------------------------------------------------------## \nclass PAD:\n def __init__(self,width,length,depth,thickness):\n self.rebarLength=29.5\n self.bend=0.5\n self.cover=0.166\n self.width=width\n self.depth=depth\n self.length=length\n self.thickness=thickness\n \n self.excavation=self.width*self.depth*self.length\n self.concrete=self.width*self.thickness*self.length\n \n\n self.data=dict(\n length=self.length,\n width=self.width,\n depth=self.depth,\n thickness=self.thickness, \n concrete= {'cuft':self.concrete,\n 'cuyd':self.concrete/27\n }, \t\n \n excavation={'cuft':self.excavation,\n \t\t 'cuyd':self.excavation/27\n }\n )\n \t\t\t\n \n \n def rebars(self,mainbar_spacing,distbar_spacing):\n if self.width>self.length:\n width=self.length\n length=self.width\n else:\n width=self.width\n length=self.length\n print(\"TEST ::::\",width,length)\n main_bars=(((width/mainbar_spacing)*length)+(self.bend*2))/self.rebarLength\n dist_bars=(((length/distbar_spacing)*width)+(self.bend*2))/self.rebarLength\n \n \n return dict(rebar={'mainBar':int(round(main_bars)),#add 1 extra bar\n 'distributionBars':int(round(dist_bars)),#add 1 extra bar,\n 'amt_mainBar':int(round(width/mainbar_spacing)),\n 'amt_distBar':int(round(length/distbar_spacing)),\n \t\t 'mainbarLength':(width+(self.bend*2)),\n \t\t 'distributionBarLength': int(round((length+(self.bend*2))))\n \t\t\t\t\t\n \t\t\t\t\t}\n \t\t\t\t\t\n )\n##-----------------------------------------------------------------------------------## \nclass PILE:\n def __init__(self,width,depth,length,thickness):\n self.rebarLength=29.5\n self.bend=0.5\n self.cover=0.166\n self.width=width\n self.depth=depth\n self.length=length\n self.thickness=thickness\n \n self.excavation=self.width*self.depth*self.length\n self.concrete=self.width*self.thickness*self.length\n \n\n self.data=dict(\n length=self.length,\n width=self.width,\n depth=self.depth,\n thickness=self.thickness, \n concrete= {'cuft':self.concrete,\n 'cuyd':self.concrete/27\n }, \t\n \n excavation={'cuft':self.excavation,\n \t\t 'cuyd':self.excavation/27\n }\n )\n \t\t\t\n \n \n def rebars(self,amt_longbars,link_type,link_spacing):\n long_bars=(amt_longbars*self.length)/29.5\n links=int(round(self.length/link_spacing))+2 # add 2 extra link\n link_length=(self.width-0.25)+0.5\n \n return dict(rebar={'mainBar':int(round(long_bars))+1,#add 1 extra bar\n \t\t\t\t\t'links':links,\n \t\t\t\t\t'linkLength':link_length,\n \t\t\t\t\t'linkBars': int(round((link_length*links)/29.5))\n \t\t\t\t\t\n \t\t\t\t\t}\n \t\t\t\t\t\n )\n\n##-----------------------------------------------------------------------------------## \nclass RAFT:\n def __init__(self,width,length,depth,thickness):\n self.rebarLength=29.5\n self.bend=0.5\n self.cover=0.166\n self.width=width\n self.depth=depth\n self.length=length\n self.thickness=thickness\n \n self.excavation=self.width*self.depth*self.length\n self.concrete=self.width*self.thickness*self.length\n \n\n self.data=dict(\n length=self.length,\n width=self.width,\n depth=self.depth,\n thickness=self.thickness, \n concrete= {'cuft':self.concrete,\n 'cuyd':self.concrete/27\n }, \t\n \n excavation={'cuft':self.excavation,\n \t\t 'cuyd':self.excavation/27\n }\n )\n \t\t\t\n \n \n def rebars(self,amt_longbars,link_type,link_spacing):\n long_bars=(amt_longbars*self.length)/29.5\n links=int(round(self.length/link_spacing))+2 # add 2 extra link\n link_length=(self.width-0.25)+0.5\n \n return dict(rebar={'mainBar':int(round(long_bars))+1,#add 1 extra bar\n \t\t\t\t\t'links':links,\n \t\t\t\t\t'linkLength':link_length,\n \t\t\t\t\t'linkBars': int(round((link_length*links)/29.5))\n \t\t\t\t\t\n \t\t\t\t\t}\n \t\t\t\t\t\n )\n\n##----------------------------------- END ------------------------------------------------## \ndef test():\n foot=STRIP(1.5,3,86.67,.75)\n data=foot.data\n\n pad=PAD(4,3,10,2)\n\n print(\"STRIP FOUNDATION:\\n\",(data),\"\\nrebars:\\n\",foot.rebars(3,'m10',.667))\n #\"PAD FOUNDATION:\\n\",pad.data,pad.rebars(0.5,0.667)\n\n\n \n","repo_name":"2pees/FastEstimateII","sub_path":"main/Bq/footing.py","file_name":"footing.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"26025504114","text":"import pytest\nimport numpy as np\nimport quafing\nfrom quafing.multipdf.multipdf import create_mdpdf_collection\nfrom quafing.multipdf.multi_dimensional_pdf import MultiDimensionalPDF\n\npath='tests/test_data/test_data.xlsx'\nmetadata_true,data_true=quafing.load(path)\ndata = quafing.preprocessing.PreProcessor(data_true,metadata_true)\ndata.select_columns(select_all=True)\ndata.set_cont_disc([], by_type=True, complement=True)\ndata.set_density_method(method='Discrete1D',cols=['b','c'])\ndata.split_to_groups(0)\n\ndef test_duplicate_labels():\n with pytest.warns(UserWarning, match=r'Duplicate labels were passed '):\n grouplabels = data._grouplabels.copy()\n grouplabels[0] = 'b'\n create_mdpdf_collection('factorized',data._groups,grouplabels,data._groupcolmetadata)\n\ndef test_length_data_metadata():\n with pytest.raises(RuntimeError):\n grouplabels = data._grouplabels.copy()\n grouplabels = np.append(grouplabels, 'e')\n create_mdpdf_collection('factorized',data._groups,grouplabels,data._groupcolmetadata)\n\ndef test_get_distance_matrix():\n mdpdfcol = create_mdpdf_collection('factorized',data._groups,data._grouplabels,data._groupcolmetadata)\n with pytest.raises(ValueError):\n mdpdfcol.get_distance_matrix()\n\ndef test_get_dissimilarity_matrix():\n mdpdfcol = create_mdpdf_collection('factorized',data._groups,data._grouplabels,data._groupcolmetadata)\n with pytest.raises(ValueError):\n mdpdfcol.get_dissimilarity_matrix()\n\ndef test_get_shortest_path_matrix():\n mdpdfcol = create_mdpdf_collection('factorized',data._groups,data._grouplabels,data._groupcolmetadata)\n with pytest.raises(ValueError):\n mdpdfcol.get_shortest_path_matrix()\n\ndef test_calculate_distance_matrix():\n mdpdfcol = create_mdpdf_collection('factorized',data._groups,data._grouplabels,data._groupcolmetadata)\n mdpdfcol.calculate_distance_matrix(method='hellinger',pwdist='rms')\n assert mdpdfcol.distance_matrix is not None\n\ndef test_calculate_shortest_path_matrix():\n mdpdfcol = create_mdpdf_collection('factorized',data._groups,data._grouplabels,data._groupcolmetadata)\n mdpdfcol.calculate_distance_matrix(method='hellinger',pwdist='rms')\n mdpdfcol.calculate_shortest_path_matrix()\n assert mdpdfcol.shortest_path_matrix is not None\n","repo_name":"SDCCA/quafing","sub_path":"tests/test_multipdf_collection.py","file_name":"test_multipdf_collection.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23073347474","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker\nfrom mpl_toolkits import mplot3d\nfrom numba import njit, prange\nfrom copy import deepcopy as cp\nfrom time import time\nfrom matplotlib.ticker import FuncFormatter\n \nfmt = lambda x, pos: '{:.2}'.format(x)\nname_b = np.array(['Ni', 'Mg', 'Bi', 'Cu', 'Pb', 'La', 'Au', 'Pd', 'Sc', 'Y'])\nhm_b = np.array([57720, 1607, 11976, 25696, 32469, -56831, -15599, -20000, -89913, -54585])\nhs_b = np.array([40000, 15192, 25074, 12170, 19622, 25961-86.84*1000, -7889, -16402, -6552-96.0029*1e3, 10456-111.54390727682068*1e3])\n\n \n'''\nuser \n'''\nXs = np.linspace(1, 50, num=100)/100\n\nname = 'Ni'\n\nT = 600 # Temperature\nt = 5e-10 #GB thickness (A -> m)\n\ndmax = 1000\n'''\ncode\n'''\nselect = (name_b==name)\nhm = hm_b[select][0] #Enthalpy of mixing (J/mol)\nhs = hs_b[select][0] #Enthalpy of segregation (J/mol) (in dilute limit)\n\nd = np.linspace(1, dmax, num=1000)*1e-9 # nm -> m\nXgb = np.linspace(0, 1, num=1000)\nMd, MXgb = np.meshgrid(d, Xgb, indexing='ij')\n\n\n@njit(parallel=True)\ndef argminG(hm, hs, X, Md, MXgb, T, t):\n z = 12\n nu = 0.5\n k = 1.380649e-23*6.242e18 #JK-1 -> eVK-1\n \n Oag = 0.0103*1e-26/6 #m3/kmol -> m3\n Gag = 0.75*6.242e18 #Jm-2 -> eVm-2\n Ga = Gag\n Oa = Oag\n Gb = Ga \n Ob = Oa\n \n Hm = hm*6.242e18/6.02e23 #J/mol -> eV\n Hs = hs*6.242e18/6.02e23#J/mol -> eV\n \n Mfgb = 1 - ((Md-t)/Md)**3\n MXc = (X-Mfgb*MXgb)/(1-Mfgb)\n\n vmin = 1e-5\n dG = np.zeros(MXc.shape)\n dGmax=0.1\n for i in prange(MXc.shape[0]):\n for j in prange(MXc.shape[1]):\n if MXc[i][j]1-vmin:\n MXc[i][j]=1-vmin\n MXgb[i][j]=(1-X)/(1-Mfgb[i][j])\n dG[i][j]=dGmax\n \n \n wc = Hm/z\n wgb = 2*(wc - Hs/z - (Ob*Gb - Oa*Ga)/(2*t*z))\n \n MGc = z*wc*MXc*(1-MXc) + k*T*(MXc*np.log(MXc) + (1-MXc)*np.log(1-MXc))\n MGgb = z*wgb*MXgb*(1-MXgb) + k*T*(MXgb*np.log(MXgb) + (1-MXgb)*np.log(1-MXgb))\n MGgb += Oa*Ga*(1-MXgb)/t + Ob*Gb*MXgb/t\n G = (1-Mfgb)*MGc + Mfgb*MGgb\n G += z*nu*Mfgb*(MXgb-MXc)*((2*MXgb-1)*wgb - (Ob*Gb-Oa*Ga)/(z*t))\n G += dG\n \n ind = np.argmin(G)\n I = ind//G.shape[1]\n J = ind%G.shape[1]\n \n x = Md[I][J]*1e9\n y = MXgb[I][J]*100\n \n Mg = Ga-t*(MXgb/Oa)*(Hs + k*T*np.log(MXc))\n #Mg = Ga-t*((MXgb-MXc)/((1-MXc)*Oa))*(Hs + k*T*np.log(MXc))\n g = Mg[I][J]\n return x, y, g, G, Mg, I, J\n\nt0=time()\n\nD0 = np.zeros((Xs.shape[0]))\nX0 = np.zeros((Xs.shape[0]))\ng = np.zeros((Xs.shape[0]))\nGnc = []\nGss = []\nMg = []\n\nfor i in range(Xs.shape[0]):\n ans = argminG(hm, hs, Xs[i], cp(Md), cp(MXgb), T, t)\n D0[i] += ans[0]\n X0[i] = ans[1]\n g[i] = ans[2]\n \n Mg.append(ans[4])\n x = ans[0]\n y = ans[1]\n I = ans[5]\n J = ans[6]\n Gnc.append((ans[3])[I][J])\n Gss.append((ans[3])[-1][J])\n ''' \n vmin = np.nanmin(G[i])\n vmax = vmin+1*(np.nanmax(G[i])-vmin)#vmin + 0.1*(G.max()-G.min())\n levels = 500\n level_boundaries = np.linspace(vmin, vmax, levels + 1)\n\n plt.contourf(d*1e9, Xgb*100, np.transpose(G[i]), level_boundaries, vmin=vmin, vmax=vmax,extend='max')\n plt.plot(x, y, 'o', color='red')\n plt.xlabel('Grain size, nm')\n plt.ylabel('Solute concentrations at GB, %')\n cbar = plt.colorbar(format=FuncFormatter(fmt))\n cbar.ax.set_ylabel('$\\Delta G_{mix}, eV$', rotation=90)\n plt.title(f'Global solute concentration {round(100*Xs[i], 2)}%, T = {T}К')\n plt.show()\n '''\n '''\n plt.plot(d*1e9, G[i][:, J])\n plt.plot(x, G[i][I, J], 'x')\n plt.ylim((G[i][I, J], G[i][I, J]+0.00001))\n plt.show()\n \n plt.plot(Xgb*1e2, G[i][I, :])\n plt.plot(y, G[i][I, J], 'x')\n plt.show()\n '''\n '''\n vmin = np.nanmin(Mg[i])\n vmax = vmin+1*(np.nanmax(Mg[i])-vmin)#vmin + 0.1*(Mg.max()-Mg.min())\n levels = 500\n level_boundaries = np.linspace(vmin, vmax, levels + 1)\n\n plt.contourf(d*1e9, Xgb*100, np.transpose(Mg[i]), level_boundaries, vmin=vmin, vmax=vmax,extend='max')\n plt.plot(x, y, 'o', color='red')\n plt.xlabel('Grain size, nm')\n plt.ylabel('Solute concentrations at GB, %')\n cbar = plt.colorbar(format=FuncFormatter(fmt))\n cbar.ax.set_ylabel('$\\gamma eV/m^2$', rotation=90)\n plt.title(f'Global solute concentration {round(100*Xs[i], 2)}%, T = {T}К')\n plt.show()\n '''\nprint(time()-t0)\n'''\nplt.plot(Xs*100, Gnc)\nplt.plot(Xs*100, Gss, '--')\n'''\npoints_x = [0, 33.3, 50]\npoints_y = [0, -0.299, -0.254]\n'''\npoints_x = [0, 20, 33.3, 50]\npoints_y = [0, -0.2, -0.302, -0.287]\n'''\n#plt.plot(points_x, points_y)\n#plt.ylim((0,1000))\n#plt.show()\nymin = min(np.min(Gnc), np.min(Gss), np.min(points_y))\n\nymax = max(np.max(Gnc), np.max(Gss), 0)\n#plt.plot(Xs*100, (D0-D0.min())+ymin)\n\n#plt.show()\nplt.plot(Xs*100, D0, 'x')\nplt.ylim((0, 10))\nplt.show()\n\n\n\n \n \n \n \n ","repo_name":"MarchiyGV/AnalyticalModelOfSegregation","sub_path":"thermal_stability_numba_element.py","file_name":"thermal_stability_numba_element.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4244183998","text":"\"\"\"\nThe :mod:`sklearn.pls` module implements Partial Least Squares (PLS).\n\"\"\"\nimport numpy as np\nfrom scipy.linalg import pinv, svd\nfrom scipy.sparse.linalg import svds\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.model_selection import GridSearchCV, KFold\n\nfrom warnings import simplefilter\nsimplefilter(action='ignore', category=FutureWarning)\n\ndef _nipals_twoblocks_inner_loop(X, Y, mode=\"A\", max_iter=500, tol=1e-06,\n norm_y_weights=False):\n \"\"\"\n 提供svd(X'Y)的替代方案; 返回X'Y的第一个左右奇异向量。有关参数的含义,\n 请参见PLS。它类似于确定X'Y的特征向量和特征值的幂方法。 \n \"\"\"\n for col in Y.T:\n if np.any(np.abs(col) > np.finfo(np.double).eps):\n y_score = col.reshape(len(col), 1)\n break\n\n x_weights_old = 0\n ite = 1\n X_pinv = Y_pinv = None\n eps = np.finfo(X.dtype).eps\n\n if mode == \"B\":\n X_t = X.dtype.char.lower()\n Y_t = Y.dtype.char.lower()\n factor = {'f': 1E3, 'd': 1E6}\n\n cond_X = factor[X_t] * eps\n cond_Y = factor[Y_t] * eps\n\n # Inner loop of the Wold algo.\n while True:\n # 1.1 Update u: the X weights\n if mode == \"B\":\n if X_pinv is None:\n # We use slower pinv (same as np.linalg.pinv) for stability\n # reasons\n X_pinv = pinv(X, check_finite=False, cond=cond_X)\n x_weights = np.dot(X_pinv, y_score)\n else: # mode A\n # Mode A regress each X column on y_score\n x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)\n # If y_score only has zeros x_weights will only have zeros. In\n # this case add an epsilon to converge to a more acceptable\n # solution\n if np.dot(x_weights.T, x_weights) < eps:\n x_weights += eps\n # 1.2 Normalize u\n x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps\n # 1.3 Update x_score: the X latent scores\n x_score = np.dot(X, x_weights)\n # 2.1 Update y_weights\n if mode == \"B\":\n if Y_pinv is None:\n # compute once pinv(Y)\n Y_pinv = pinv(Y, check_finite=False, cond=cond_Y)\n y_weights = np.dot(Y_pinv, x_score)\n else:\n # Mode A regress each Y column on x_score\n y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)\n # 2.2 Normalize y_weights\n if norm_y_weights:\n y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps\n # 2.3 Update y_score: the Y latent scores\n y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)\n # y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG\n x_weights_diff = x_weights - x_weights_old\n if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:\n break\n if ite == max_iter:\n break\n x_weights_old = x_weights\n ite += 1\n return x_weights, y_weights, ite\n\ndef _svd_cross_product(X, Y):\n C = np.dot(X.T, Y)\n U, s, Vh = svd(C, full_matrices=False)\n u = U[:, [0]]\n v = Vh.T[:, [0]]\n return u, v\n\ndef _center_scale_xy(X, Y, scale=True):\n \"\"\" Center X, Y and scale if the scale parameter==True\n Returns\n -------\n X, Y, x_mean, y_mean, x_std, y_std\n \"\"\"\n # center\n x_mean = X.mean(axis=0)\n X -= x_mean\n y_mean = Y.mean(axis=0)\n Y = Y- y_mean\n # scale\n if scale:\n x_std = X.std(axis=0, ddof=1)\n x_std[x_std == 0.0] = 1.0\n X /= x_std\n y_std = Y.std(axis=0, ddof=1)\n y_std[y_std == 0.0] = 1.0\n Y /= y_std\n else:\n x_std = np.ones(X.shape[1])\n y_std = np.ones(Y.shape[1])\n return X, Y, x_mean, y_mean, x_std, y_std\n\ndef svd_flip(u, v, u_based_decision=True):\n \"\"\" 符号校正以确保SVD的确定输出。调整u的列和v的行, 以使u中绝对值最大的列中的载荷始终为正。\n Parameters\n ----------\n u : ndarray\n u and v are the output of `linalg.svd` or\n :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner\n dimensions so one can compute `np.dot(u * s, v)`.\n v : ndarray\n u and v are the output of `linalg.svd` or\n :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner\n dimensions so one can compute `np.dot(u * s, v)`.\n u_based_decision : boolean, (default=True)\n 如果为True, 则使用u列作为符号翻转的基础。否则, 使用v的行。决定基于哪个变量的选择通常取决于算法。 \n Returns\n -------\n u_adjusted, v_adjusted : arrays with the same dimensions as the input.\n \"\"\"\n if u_based_decision:\n # columns of u, rows of v\n max_abs_cols = np.argmax(np.abs(u), axis=0)\n signs = np.sign(u[max_abs_cols, range(u.shape[1])])\n u *= signs\n v *= signs[:, np.newaxis]\n else:\n # rows of v, columns of u\n max_abs_rows = np.argmax(np.abs(v), axis=1)\n signs = np.sign(v[range(v.shape[0]), max_abs_rows])\n u *= signs\n v *= signs[:, np.newaxis]\n return u, v\n\nclass _PLS():\n def __init__(self, n_components=2, *, scale=True,\n deflation_mode=\"regression\",\n mode=\"A\", algorithm=\"nipals\", norm_y_weights=False,\n max_iter=500, tol=1e-06, copy=True):\n self.n_components = n_components\n self.deflation_mode = deflation_mode\n self.mode = mode\n self.norm_y_weights = norm_y_weights\n self.scale = scale\n self.algorithm = algorithm\n self.max_iter = max_iter\n self.tol = tol\n self.copy = copy\n\n def fit(self, X, Y):\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n\n n = X.shape[0]\n p = X.shape[1]\n q = Y.shape[1]\n\n if self.n_components < 1 or self.n_components > p:\n raise ValueError('Invalid number of components: %d' %\n self.n_components)\n if self.algorithm not in (\"svd\", \"nipals\"):\n raise ValueError(\"Got algorithm %s when only 'svd' \"\n \"and 'nipals' are known\" % self.algorithm)\n if self.algorithm == \"svd\" and self.mode == \"B\":\n raise ValueError('Incompatible configuration: mode B is not '\n 'implemented with svd algorithm')\n if self.deflation_mode not in [\"canonical\", \"regression\"]:\n raise ValueError('The deflation mode is unknown')\n # Scale (in place)\n X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (\n _center_scale_xy(X, Y, self.scale))\n # Residuals (deflated) matrices\n Xk = X\n Yk = Y\n # Results matrices\n self.x_scores_ = np.zeros((n, self.n_components))\n self.y_scores_ = np.zeros((n, self.n_components))\n self.x_weights_ = np.zeros((p, self.n_components))\n self.y_weights_ = np.zeros((q, self.n_components))\n self.x_loadings_ = np.zeros((p, self.n_components))\n self.y_loadings_ = np.zeros((q, self.n_components))\n self.n_iter_ = []\n\n # NIPALS algo: outer loop, over components\n Y_eps = np.finfo(Yk.dtype).eps\n for k in range(self.n_components):\n if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):\n break\n # 1) weights estimation (inner loop)\n # -----------------------------------\n if self.algorithm == \"nipals\":\n # Replace columns that are all close to zero with zeros\n Yk_mask = np.all(np.abs(Yk) < 10 * Y_eps, axis=0)\n Yk[:, Yk_mask] = 0.0\n\n x_weights, y_weights, n_iter_ = \\\n _nipals_twoblocks_inner_loop(\n X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,\n tol=self.tol, norm_y_weights=self.norm_y_weights)\n self.n_iter_.append(n_iter_)\n elif self.algorithm == \"svd\":\n x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)\n # Forces sign stability of x_weights and y_weights\n # Sign undeterminacy issue from svd if algorithm == \"svd\"\n # and from platform dependent computation if algorithm == 'nipals'\n x_weights, y_weights = svd_flip(x_weights, y_weights.T)\n y_weights = y_weights.T\n # compute scores\n x_scores = np.dot(Xk, x_weights)\n if self.norm_y_weights:\n y_ss = 1\n else:\n y_ss = np.dot(y_weights.T, y_weights)\n y_scores = np.dot(Yk, y_weights) / y_ss\n # test for null variance\n if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:\n break\n # 2) Deflation (in place)\n # ----------------------\n # 这里可以减少内存占用:为了避免为秩1近似矩阵分配数据块,然后将其减去Xk,我们建议执行逐列通缩。\n #\n # - regress Xk's on x_score\n x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)\n # - subtract rank-one approximations to obtain remainder matrix\n Xk -= np.dot(x_scores, x_loadings.T)\n if self.deflation_mode == \"canonical\":\n # - regress Yk's on y_score, then subtract rank-one approx.\n y_loadings = (np.dot(Yk.T, y_scores) / np.dot(y_scores.T, y_scores))\n Yk -= np.dot(y_scores, y_loadings.T)\n if self.deflation_mode == \"regression\":\n # - regress Yk's on x_score, then subtract rank-one approx.\n y_loadings = (np.dot(Yk.T, x_scores) / np.dot(x_scores.T, x_scores))\n Yk -= np.dot(x_scores, y_loadings.T)\n # 3) Store weights, scores and loadings # Notation:\n self.x_scores_[:, k] = x_scores.ravel() # T\n self.y_scores_[:, k] = y_scores.ravel() # U\n self.x_weights_[:, k] = x_weights.ravel() # W\n self.y_weights_[:, k] = y_weights.ravel() # C\n self.x_loadings_[:, k] = x_loadings.ravel() # P\n self.y_loadings_[:, k] = y_loadings.ravel() # Q\n # Such that: X = TP' + Err and Y = UQ' + Err\n\n # 4) rotations from input space to transformed space (scores)\n # T = X W(P'W)^-1 = XW* (W* : p x k matrix)\n # U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)\n self.x_rotations_ = np.dot( self.x_weights_, \n pinv(np.dot(self.x_loadings_.T, self.x_weights_),\n check_finite=False))\n if Y.shape[1] > 1:\n self.y_rotations_ = np.dot( self.y_weights_,\n pinv(np.dot(self.y_loadings_.T, self.y_weights_),\n check_finite=False))\n else:\n self.y_rotations_ = np.ones(1)\n\n if True or self.deflation_mode == \"regression\":\n # FIXME what's with the if?\n # Estimate regression coefficient\n # Regress Y on T\n # Y = TQ' + Err,\n # Then express in function of X\n # Y = X W(P'W)^-1Q' + Err = XB + Err\n # => B = W*Q' (p x q)\n self.coefOrigin_ = np.dot(self.x_rotations_, self.y_loadings_.T)\n self.coef_ = self.coefOrigin_ * self.y_std_\n return self\n\n def transform(self, X, Y=None, copy=True):\n \"\"\"Apply the dimension reduction learned on the train data.\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples and\n n_features is the number of predictors.\n Y : array-like of shape (n_samples, n_targets)\n Target vectors, where n_samples is the number of samples and\n n_targets is the number of response variables.\n copy : boolean, default True\n Whether to copy X and Y, or perform in-place normalization.\n Returns\n -------\n x_scores if Y is not given, (x_scores, y_scores) otherwise.\n \"\"\"\n # Normalize\n X -= self.x_mean_\n X /= self.x_std_\n # Apply rotation\n x_scores = np.dot(X, self.x_rotations_)\n if Y is not None:\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n Y -= self.y_mean_\n Y /= self.y_std_\n y_scores = np.dot(Y, self.y_rotations_)\n return x_scores, y_scores\n\n return x_scores\n\n def inverse_transform(self, X):\n \"\"\"Transform data back to its original space.\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n New data, where n_samples is the number of samples\n and n_components is the number of pls components.\n Returns\n -------\n x_reconstructed : array-like of shape (n_samples, n_features)\n Notes\n -----\n This transformation will only be exact if n_components=n_features\n \"\"\"\n # From pls space to original space\n X_reconstructed = np.matmul(X, self.x_loadings_.T)\n\n # Denormalize\n X_reconstructed *= self.x_std_\n X_reconstructed += self.x_mean_\n return X_reconstructed\n\n def predict(self, X, copy=True):\n \"\"\"Apply the dimension reduction learned on the train data.\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples and\n n_features is the number of predictors.\n copy : boolean, default True\n Whether to copy X and Y, or perform in-place normalization.\n Notes\n -----\n This call requires the estimation of a p x q matrix, which may\n be an issue in high dimensional space.\n \"\"\"\n # Normalize\n X -= self.x_mean_\n X /= self.x_std_\n Ypred = np.dot(X, self.coef_)\n return Ypred + self.y_mean_\n\n def fit_transform(self, X, y=None):\n \"\"\"Learn and apply the dimension reduction on the train data.\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples and\n n_features is the number of predictors.\n y : array-like of shape (n_samples, n_targets)\n Target vectors, where n_samples is the number of samples and\n n_targets is the number of response variables.\n Returns\n -------\n x_scores if Y is not given, (x_scores, y_scores) otherwise.\n \"\"\"\n return self.fit(X, y).transform(X, y)\n\n def _more_tags(self):\n return {'poor_score': True,\n 'requires_y': False}\n\nclass PLSRegressionWithoutLib(_PLS):\n def __init__(self, n_components=2, *, scale=True,\n max_iter=500, tol=1e-06, copy=True):\n super().__init__(\n n_components=n_components, scale=scale,\n deflation_mode=\"regression\", mode=\"A\",\n norm_y_weights=False, max_iter=max_iter, tol=tol,\n copy=copy)\n\nclass PLSCanonicalWithoutLib(_PLS):\n def __init__(self, n_components=2, *, scale=True, algorithm=\"nipals\",\n max_iter=500, tol=1e-06, copy=True):\n super().__init__(\n n_components=n_components, scale=scale,\n deflation_mode=\"canonical\", mode=\"A\",\n norm_y_weights=True, algorithm=algorithm,\n max_iter=max_iter, tol=tol, copy=copy)\n\n# 使用官方库,可以多线程计算\ndef PLS_Estim1(X, y, maxLatentVarNum, cv, mpFlag=False):\n '''\n x :光谱矩阵 nxm\n y :浓度阵 (化学值)\n maxLatentVarNum :最大潜变量数\n cv :交叉验证数量\n '''\n if mpFlag:\n n_jobs = -1\n else:\n n_jobs = None\n parameters = {'n_components':[i+1 for i in range(maxLatentVarNum)],}\n clf = PLSRegression()\n GS = GridSearchCV(clf, param_grid=parameters,\n scoring='neg_root_mean_squared_error', # 以最大负均方误差(最小均方误差)为优化目标\n cv=cv, n_jobs=n_jobs) # 运用所有线程计算\n GS = GS.fit(X, y)\n\n latentVarNum = GS.best_params_['n_components'] # 最优主成分\n RMSE = -GS.best_score_ # 最小均方误差\n coef = np.ravel(GS.best_estimator_.coef_) # 最佳回归系数\n\n result = {\n 'RMSE': RMSE,\n 'latentVarNum': latentVarNum,\n 'coef': coef,\n }\n return result\n\n# 使用自己的库,不用多线程计算\ndef PLS_Estim2(X, y, maxLatentVarNum, cv, mpFlag=False):\n '''\n x :光谱矩阵 nxm\n y :浓度阵 (化学值)\n maxLatentVarNum :最大潜变量数\n cv :交叉验证数量\n '''\n scores = []\n model = PLSRegressionWithoutLib\n # model = PLSRegression\n for pcn in range(1, maxLatentVarNum+1):\n clf = model(n_components=pcn)\n KF = KFold(n_splits=cv)\n SR = 0\n for train_index, test_index in KF.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n clf.fit(X_train, y_train)\n y_pre = clf.predict(X_test)\n # print(np.ravel(y_pre).shape, y_test.shape)\n SR = SR + np.sum((np.ravel(y_pre) - y_test)**2)\n # SR += mean_squared_error(y_pre, y_test)/cv\n # plt.plot((np.ravel(y_pre) - y_test)**2)\n # plt.show()\n scores.append((SR/len(y))**0.5)\n\n bestIndex = np.argmin(scores)\n latentVarNum = bestIndex + 1 # 最优主成分\n RMSE = scores[bestIndex] # 最小均方误差\n clf = model(n_components=latentVarNum)\n clf.fit(X, y)\n coef = np.ravel(clf.coef_) # 最佳回归系数\n\n result = {\n 'RMSE': RMSE,\n 'latentVarNum': latentVarNum,\n 'coef': coef,\n }\n return result\n\n\n","repo_name":"Chilon-Wan/CAPR","sub_path":"auxiliary_fun/PLS.py","file_name":"PLS.py","file_ext":"py","file_size_in_byte":18081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23589469761","text":"#!/usr/bin/env python3\n\nTEST = 'large'\nIN = 'A-{}.in'.format(TEST)\nOUT = 'A-{}.out'.format(TEST)\n\n\ndef run(p, g):\n rem = [0] * p\n for gi in g:\n rem[gi % p] += 1\n fresh = 0\n \n fresh += rem[0]\n rem[0] = 0\n\n if p % 2 == 0:\n q = rem[p // 2] // 2\n fresh += q\n rem[p // 2] -= 2 * q\n\n if p > 2:\n q = min(rem[1], rem[p - 1])\n fresh += q\n rem[1] -= q\n rem[p - 1] -= q\n\n if p % 2 == 0 and rem[p // 2] > 0 and max(rem[1], rem[p - 1]) >= p // 2:\n fresh += 1\n rem[p // 2] -= 1\n if rem[1] >= p // 2:\n rem[1] -= p // 2\n else:\n rem[p - 1] -= p // 2\n\n q = rem[1] // p\n fresh += q\n rem[1] -= p * q\n\n q = rem[p - 1] // p\n fresh += q\n rem[p - 1] -= p * q\n\n if any(rem):\n fresh += 1\n\n return fresh\n\n\ndef main():\n with open(IN) as fin, open(OUT, 'w') as fout:\n t = int(fin.readline().strip())\n for i in range(t):\n n, p = map(int, fin.readline().split())\n g = list(map(int, fin.readline().split()))\n res = run(p, g)\n print('Case #{}: {}'.format(i + 1, res), file=fout)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_212/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21092931812","text":"import sys\nimport re\n\nre_proto = re.compile( '^(?:GLAPI|extern) +([\\w \\*]+) (?:APIENTRY )?([\\w \\*]+) \\((.*)\\);' )\nre_arg = re.compile( ' \\**(\\w+)$' )\nre_header_if = re.compile( '#ifdef (_VL_H|_DM_BUFFER_H_)' )\nre_header_end = re.compile( '#endif (/\\* _VL_H \\*/|/\\* _DM_BUFFER_H_ \\*/)' )\n\ndef get_arguments(args) :\n\tm = re_arg.search( args )\n\tif m :\n\t\treturn m.group( 1 )\n\treturn ''\n\ndef return_statement(ret_type) :\n\tif ret_type == 'void' :\n\t\treturn ''\n\telse :\n\t\treturn 'return '\n\ndef parse_names(args) :\n\tnames = []\n\tfor i in args :\n\t\tn = get_arguments( i )\n\t\tif n != '' :\n\t\t\tnames.append( n )\n\treturn names\n\ndef write_text_file(src_name, dest_file) :\n\twith open( src_name, 'r' ) as src_file :\n\t\tfor i in src_file :\n\t\t\tdest_file.write( i )\n\t\tdest_file.write( '\\n' )\n\ndef write_function(func, dest_file) :\n\targs = ','.join( func[2] )\n\tnames = parse_names( func[2] )\n\tret = return_statement( func[0] )\n\tdest_file.write( \\\n\t\t\t'inline ' + func[0] + ' ' + func[1] + '(' + args + ')\\n{\\n' \\\n\t\t\t+ '\\ttypedef ' + func[0] + ' (*proc_type)(' + args + ');\\n' \\\n\t\t\t+ '\\tstatic proc_type func_ptr = reinterpret_cast( glewlle::get_proc_address( \"' + func[1] + '\" ) );\\n' \\\n\t\t\t+ '\\tif( !func_ptr ) { throw std::runtime_error( \"glewlle error : ' + func[1] + '\" ); }\\n' \\\n\t\t\t+ '\\t' + ret + '(*func_ptr)( ' + ', '.join( names ) + ' );\\n' \\\n\t\t\t+ '}\\n\\n' )\n\ndef parse_functions(src_file, dest_file) :\n\tflg = 0\n\tfor i in src_file :\n\t\tm = re_header_if.match( i )\n\t\tif m :\n\t\t\tdest_file.write( i )\n\t\t\tflg = 1\n\t\t\tcontinue\n\t\tm = re_header_end.match( i )\n\t\tif m :\n\t\t\tdest_file.write( i )\n\t\t\tflg = 0\n\t\t\tcontinue\n\t\tm = re_proto.match( i )\n\t\tif not m :\n\t\t\tcontinue\n\t\twrite_function( [m.group( 1 ), m.group( 2 ), m.group( 3 ).split( ',' )], dest_file );\n\ndef include_guard_str(name) :\n\ts = name.replace( '../', '' )\n\ts = s.upper()\n\ts = s.replace( '.', '_' )\n\treturn s + '_'\n\ndef make_file(name, src_file, file_list) :\n\tdest_file = open( name, 'w' )\n\tdest_file.write( '// ' + name.replace( '../', '' ).replace( '.hpp', '' ) + '\\n' )\n\twrite_text_file( 'copy.txt', dest_file )\n\tguard = include_guard_str( name )\n\n\tdest_file.write( \\\n\t\t\t'#ifndef ' + guard + '\\n' \\\n\t\t\t+ '#define ' + guard + '\\n\\n' );\n\tfor i in file_list :\n\t\twrite_text_file( i, dest_file )\n\tdest_file.write( '#include <' + src_file.replace( '../', '' ) + '>\\n\\n' );\n\n\twith open( src_file, 'r' ) as f :\n\t\tparse_functions( f, dest_file )\n\tdest_file.write( '\\n' )\n\tdest_file.write( '#endif // ' + guard + '\\n' )\n\ndef main() :\n\tmake_file( '../glewlle.hpp', '../glext.h', ['include.pp'] )\n\tmake_file( '../glewlle_wgl.hpp', '../wglext.h', ['glewlle_include.pp'] )\n\tmake_file( '../glewlle_glx.hpp', '../glxext.h', ['glewlle_include.pp'] )\n\nmain()\n","repo_name":"LNSEAB/glewlle","sub_path":"src/glewlle_make_header.py","file_name":"glewlle_make_header.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"1500392069","text":"#!/usr/bin/python3\nimport sys\nimport os\nimport serial\nimport serial.tools.list_ports\n\n\ndef hci_parse_group():\n i = 1\n i +=1\n print(i)\n # 串口检测\n\nclass serial_port(object):\n def __init__(self) -> None:\n super().__init__()\n self.ser = serial.Serial()\n self.port_check()\n self.open_port()\n self.close_port()\n\n def port_check(self):\n # 检测所有存在的串口,将信息存储在字典中\n Com_Dict = {}\n port_list = list(serial.tools.list_ports.comports())\n print('port_list:',port_list)\n for port in port_list:\n Com_Dict[\"%s\" % port[0]] = \"%s\" % port[1]\n print('port[0]:',port[0],'prot[1]:',port[1])\n print(port)\n print(Com_Dict)\n if len(Com_Dict) == 0:\n print('no serial')\n def open_port(self):\n self.ser.port = '/dev/ttyUSB0'\n self.ser.baudrate = 115200\n self.ser.bytesize = 8\n self.ser.stopbits = 1\n self.ser.parity = 'N'\n try:\n self.ser.open()\n except:\n print('open port error')\n return None\n print('open port suss')\n def close_port(self):\n try:\n self.ser.close()\n except:\n print('close port faild')\n print('close port suss')\n\nif __name__ == '__main__':\n hci_parse_group()\n test = serial_port()\n #test.open_port()","repo_name":"jjkkwym/learning_records","sub_path":"language/python/uart.py","file_name":"uart.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1586999310","text":"import cv2\nimport numpy as np\nimport json\ndef test(arr=None,T=150):\n \n \"\"\"arr = [ [0,0,1,0,1],\n [0,1,0,0,1],\n [1,0,1,1,1],\n [0,0,0,0,0],\n [0,1,1,1,0],\n [1,0,0,1,0] ]\"\"\"\n\n #size = (6,5)\n size = (arr.shape[0],arr.shape[1])\n newArr = []\n for i in range(size[0]+2):\n newArr.append(list())\n for j in range(size[1]+2):\n if(i>0 and size[0]>=i and j>0 and size[1]>=j):\n newArr[i].append(arr[i-1][j-1])\n else:\n newArr[i].append(0)\n\n # first pass\n eqi_list = []\n label = 1\n for i in range(1,size[0]+2):\n for j in range(1,size[1]+2):\n if(newArr[i][j]!=0):\n # check for different nei value\n neighbors = [newArr[i][j-1],newArr[i-1][j-1],newArr[i-1][j],newArr[i-1][j+1]]\n non_zero_nei = -1\n # check for first non zero\n for k in range(len(neighbors)):\n if(neighbors[k]!=0):\n non_zero_nei = k\n break\n nei_value = neighbors[non_zero_nei]\n for n in neighbors[non_zero_nei+1:]:\n # check for different value\n if(n!=0 and n!=nei_value):\n big = max(n,nei_value)\n small = min(n,nei_value)\n if(small==0):\n continue\n if((big,small) not in eqi_list):\n eqi_list.append((big,small))\n \n # label assign\n min_value = 256\n if(newArr[i][j-1]!=0):\n min_value = min(min_value,newArr[i][j-1])\n if(newArr[i-1][j-1]!=0):\n min_value = min(min_value,newArr[i-1][j-1])\n if(newArr[i-1][j]!=0):\n min_value = min(min_value,newArr[i-1][j])\n if(newArr[i-1][j+1]!=0):\n min_value = min(min_value,newArr[i-1][j+1])\n \n if(min_value >= 256):\n newArr[i][j] = label\n label += 1\n else:\n newArr[i][j] = min_value\n #print(newArr[i])\n\n # remove padding\n arr = list()\n for i in range(size[0]):\n arr.append(list())\n for j in range(size[1]):\n arr[i].append(newArr[i+1][j+1])\n\n # label replacing\n for i in range(size[0]):\n for j in range(size[1]):\n if(arr[i][j]!=0):\n for k in eqi_list:\n if(arr[i][j]==k[0]):\n arr[i][j] = k[1]\n #print(arr[i])\n\n #print(eqi_list)\n \n size_list = list()\n # count for area\n for i in range(label):\n size_list.append(0)\n for i in range(size[0]):\n for j in range(size[1]):\n if(arr[i][j]!=0):\n size_list[arr[i][j]]+=1\n\n edge_dict = dict() # store edges\n # check for area\n for i in range(size[0]):\n for j in range(size[1]):\n if(size_list[arr[i][j]] > T):\n label = arr[i][j]\n if(label not in edge_dict):\n edge_dict[label] = [10000,10000, 0,0]\n (edge_dict[label][0]) = min(edge_dict[label][0],i)\n (edge_dict[label][1]) = min(edge_dict[label][1],j)\n (edge_dict[label][2]) = max(edge_dict[label][2],i)\n (edge_dict[label][3]) = max(edge_dict[label][3],j)\n \n return edge_dict\n\ndef preprocess(to_save=True):\n i = 0\n edge_dicts = list()\n capture = cv2.VideoCapture(\"car.mp4\")\n backSub = cv2.createBackgroundSubtractorMOG2()\n \n if(not capture.isOpened()):\n print(\"cannot open video\")\n return\n with open('cars.txt', 'w') as f:\n while True:\n i+=1\n ret, frame = capture.read()\n if frame is None:\n break\n # back subtract\n fgMask = backSub.apply(frame)\n\n # remove shadow\n shadowval = backSub.getShadowValue()\n ret, nmask = cv2.threshold(fgMask, shadowval, 255, cv2.THRESH_BINARY)\n d = test(nmask)\n if(i%10==0):\n print(i)\n edge_dicts.append(d)\n #print(edge_dicts)\n\n if(to_save):\n for j in range(len(edge_dicts)):\n f.write(\"\\n\")\n for d in edge_dicts[j]:\n edges = edge_dicts[j][d]\n for e in edges:\n f.write(str(e)+' ')\n\n return edge_dicts\n\ndef main(edge_dicts):\n #print(edge_dicts)\n capture = cv2.VideoCapture(\"car.mp4\")\n backSub = cv2.createBackgroundSubtractorMOG2()\n \n if(not capture.isOpened()):\n print(\"cannot open video\")\n return\n i = 0\n while True:\n ret, frame = capture.read()\n if frame is None:\n break\n # draw rec\n red_color = (0, 255, 0) # BGR\n\n #for edge_d in edge_dicts:\n if(i>=len(edge_dicts)):\n i = 0\n\n # [224, 130, 239, 166]\n for j in range(0,len(edge_dicts[i]),4):\n #edges = edge_dicts[i][d]\n #print((edges[1][0], edges[0][1]), (edges[1][0], edges[1][1]))\n cv2.rectangle(frame, (edge_dicts[i][j+1], edge_dicts[i][j+0]), (edge_dicts[i][j+3], edge_dicts[i][j+2]), red_color, 3, cv2.LINE_AA)\n cv2.imshow('Frame',frame) # shape (240, 320)\n \n if cv2.waitKey(30) & 0xFF == ord('q'):\n break\n i+=1\n capture.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n # d = preprocess()\n # about 740 frames\n d = list()\n # load file\n with open(\"cars.txt\",'r') as f:\n for line in f:\n line = line.split(' ')\n line = line[:-1]\n for i,l in enumerate(line):\n line[i] = int(l)\n d.append(line)\n \n #print(d)\n main(d)\n","repo_name":"ben900926/UVA_pilot","sub_path":"Lab03/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20928325267","text":"\nimport datetime\nimport matplotlib.pyplot as plt\n\nimport pickers_model.agent.agent_portrayal as ap\n\n#without the plot\ndef simulation_step( i, model ):\n \n model.step()\n\n#with the plot\ndef update_plot( i, model ): \n \n model.step()\n\n for picker in model.pickers:\n x,y = picker.pos\n picker.scatterplot.set_offsets( (x,y) )\n picker.scatterplot.set_color( ap.agent_portrayal_pyplot( picker )[\"Color\"] )\n\n for robot in model.robots:\n x,y = robot.pos\n robot.scatterplot.set_offsets( (x,y) )\n robot.scatterplot.set_color( \"cyan\" ) \n \n plt.xlabel( str( model.start_time_datetime + datetime.timedelta( 0, i*model.step_size) ) )\n\n","repo_name":"FrankIvankovic/TaskAssigner","sub_path":"update_plot.py","file_name":"update_plot.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30880262716","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n num = bin(n).replace(\"0b\",\"\")\r\n count = 0\r\n prev = num[0]\r\n cnt = []\r\n for i in range(1, len(num)):\r\n if num[i] == \"1\": \r\n if prev == num[i]:\r\n count += 1\r\n else:\r\n count = 0\r\n cnt.append(count+1)\r\n prev = num[i]\r\n print(max(cnt))\r\n ","repo_name":"hr-rithik/30daysofcode-HackerRank","sub_path":"Day_10.py","file_name":"Day_10.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23423133611","text":"t = int(input())\nfor i in range(1, t + 1):\n\tprint (\"Case #\", i, \": \", sep ='', end='')\n\tc, f, x = tuple(float(i) for i in input().split())\n\tcurrent = 2.0\n\tspent = 0.0\n\twhile True:\n\t\tneed = x / current\n\t\tifBuy = c / current + x / (current + f)\n\t\tif need > ifBuy:\n\t\t\tspent += c / current\n\t\t\tcurrent += f\n\t\telse:\n\t\t\tprint (spent + need)\n\t\t\tbreak\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1850.py","file_name":"1850.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14189313056","text":"def make_car(marca, modelo, **auto_details):\n\t\"\"\"\n\tCrea un auto a partir del nombre y modelo.\n\tToma otros detalles como parametros opcionales.\n\t\"\"\"\n\tauto_details['marca'] = marca\n\tauto_details['modelo'] = modelo\n\treturn auto_details\n\nauto = make_car('volkswagen','fox', levantavidrios=False, cierre_centralizado=False)\n\nprint(auto)","repo_name":"Joaquin-Urruti/Python-Crash-Course","sub_path":"Chapter 8/ch8_p174_814_coche.py","file_name":"ch8_p174_814_coche.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43387019663","text":"from django.urls import path \nfrom . import views\n\nurlpatterns = [\n path('',views.apiOverview, name=\"api-overview\"),\n path('all/',views.all, name=\"all-members\"),\n path('one//',views.one, name=\"detail-view\"),\n path('create/',views.create, name='create-member'),\n path('update//',views.update, name='update-member'),\n path('delete//',views.delete, name='delete-member'),\n]\n","repo_name":"gade-raghav/APIs","sub_path":"bdapi/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6601352988","text":"from operator import add, sub, mul, itruediv, neg\nfrom math import factorial\nfrom inspect import signature\n\n\ndef names(n):\n chars = \"xyz\"\n numc = len(chars)\n for i in range(n):\n idx = i % numc\n subscript = '' if i < numc else str(i // numc)\n yield chars[idx] + subscript\n\n\nclass Operator:\n def __init__(self, name, f, max_applications=None):\n self.name = name\n self.f = f\n self.__argcount = None\n self.max_applications = max_applications\n\n def __call__(self, *args, **kwargs):\n return self.f(*args, **kwargs)\n\n def __str__(self):\n return f'{self.name}({\", \".join(names(self.argcount))})'\n\n @property\n def argcount(self):\n if self.__argcount is None:\n self.__argcount = len(signature(self.f).parameters)\n\n return self.__argcount\n\n# unary\nneg = Operator('-', neg)\nfact = Operator('!', factorial, max_applications=1)\n\n# binary\nadd = Operator('+', add)\nsub = Operator('-', sub)\nmul = Operator('*', mul)\ndiv = Operator('/', itruediv)\npow = Operator('^', lambda a, b: float(a) ** b)\nroot = Operator('√', lambda n, r: n ** (1 / r))\n\n# the arithmetic operators\narith_ops = add, sub, mul, div, pow","repo_name":"thechosenreader/24solver","sub_path":"operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23604326631","text":"import sys\r\n\r\ninf = open(sys.argv[1], 'r')\r\noutf = open(sys.argv[1]+'.result', 'w')\r\n\r\ndef gcd(a,b):\r\n while b > 0: a,b = b, a%b\r\n return a\r\n\r\nnumCases = int(inf.readline())\r\ncnt = 0\r\nwhile cnt < numCases:\r\n\tnumbers = []\r\n\tmyarray = inf.readline().split(' ')\r\n\tnumNumbers = int(myarray[0])\r\n\ti = 0\r\n\twhile i < numNumbers:\r\n\t\tnumbers.append(int(myarray[i+1]))\r\n\t\ti = i + 1\r\n\tnumbers.sort()\r\n\tdiffs=[]\r\n\ti = 0\r\n\twhile i < len(numbers)-1:\r\n\t\tdiffs.append(numbers[i+1]-numbers[i])\r\n\t\ti = i + 1\r\n\tmingcd = 0\r\n\tif len(diffs) == 1:\r\n\t\tmingcd = diffs[0]\r\n\ti = 0\t\r\n\twhile i < len(diffs)-1:\r\n\t\tcurrgcd = gcd(diffs[i],diffs[i-1])\r\n\t\tif mingcd == 0:\r\n\t\t\tmingcd = currgcd\r\n\t\tif mingcd > currgcd and currgcd != 0:\r\n\t\t\tmingcd = currgcd\r\n\t\ti = i + 1\t\t\r\n\tleftBorder = (numbers[0]/mingcd)*mingcd\r\n\tif leftBorder == numbers[0]:\r\n\t\tresult = 0\r\n\telse:\t\r\n\t\trightBorder = (numbers[0]/mingcd+1)*mingcd\r\n\t\tresult = rightBorder - numbers[0]\r\n#\tprint result\t\r\n#\ti = 0\r\n#\tminDiff = 0\r\n#\twhile i < numNumbers-1:\r\n#\t\tcurrDiff = numbers[i+1]-numbers[i]\r\n#\t\tif minDiff == 0:\r\n#\t\t\tminDiff = currDiff\r\n#\t\tif currDiff != 0 and minDiff > currDiff:\r\n#\t\t\tminDiff = currDiff\r\n#\t\ti = i + 1\r\n#\tif minDiff != 0:\r\n#\t\tleftBorder = (numbers[0]/minDiff)*minDiff\r\n#\t\tif leftBorder == numbers[0]:\r\n#\t\t\tresult = 0\r\n#\t\telse:\t\r\n#\t\t\trightBorder = (numbers[0]/minDiff+1)*minDiff\r\n#\t\t\tresult = rightBorder - numbers[0]\r\n#\telse:\r\n#\t\tresult = 0\r\n#\t\tprint result\r\n\r\n\toutf.write('Case #' + str(cnt+1) + ': ' + str(result) + '\\n')\r\n\tcnt = cnt + 1\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_54/696.py","file_name":"696.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74123649475","text":"\"\"\"Calcipy-Invoke Defaults.\"\"\"\n\nimport json\nfrom contextlib import suppress\nfrom pathlib import Path\n\nfrom beartype import beartype\nfrom invoke.context import Context\n\nfrom ._invoke import Collection\n\nDEFAULTS = {\n 'tags': {\n 'filename': 'CODE_TAG_SUMMARY.md',\n 'ignore_patterns': '',\n },\n 'test': {\n 'min_cover': '0',\n 'out_dir': 'releases/tests',\n },\n 'type': {\n 'out_dir': 'releases/tests/mypy_html',\n },\n}\n\n\n@beartype\ndef from_ctx(ctx: Context, group: str, key: str) -> str:\n \"\"\"Safely extract the value from the context or the defaults.\n\n Instead of `ctx.tests.out_dir` use `from_ctx(ctx, 'test', 'out_dir')`\n\n \"\"\"\n with suppress(KeyError):\n return str(ctx.config[group][key])\n return str(DEFAULTS[group][key])\n\n\n@beartype\ndef new_collection() -> Collection:\n \"\"\"Initialize a collection with the combination of merged and project-specific defaults.\"\"\"\n ns = Collection('')\n\n # Merge default and user configuration\n ns.configure(DEFAULTS)\n config_path = Path('.calcipy.json')\n if config_path.is_file():\n ns.configure(json.loads(config_path.read_text(encoding='utf-8')))\n\n return ns\n","repo_name":"KyleKing/calcipy","sub_path":"calcipy/tasks/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"}
+{"seq_id":"24348759927","text":"import os\nimport sys\n\nfrom py_lib.lua_init import lua\nfrom py_lib.parse_init_luas import items, typemap, sizemap\nfrom py_lib.query_items import get_anim, get_new, has_key_new\nfrom py_lib.fix_pre import fix_pre_hotspots\nfrom py_lib.drawlist import set_static_drawitems, set_compare_drawitems, \\\n set_over_drawitems, set_labels, update_label_0, fetch_image\nfrom py_lib.select_item import select_new_item, select_anim\n\ncurrent_item = lua.table(name = \"\", anim = \"\", new = None, labels = [])\n\ndef format_new_label() :\n # TODO: show if can't save\n current_item.labels[0] = \"{:s}: {:s} {:d},{:d} {:s}\".format(\n current_item.name, current_item.anim,\n current_item.new.hot_x, current_item.new.hot_y,\n current_item.new.status)\n\ndef update_new_label() :\n format_new_label()\n update_label_0(current_item.labels[0])\n\ndef set_current(name, anim) :\n pre = get_anim(name, \"pre\", anim)\n if pre.hot_x == None or pre.hot_y == None :\n fix_pre_hotspots(name)\n fetch_image(pre)\n new = get_new(name, anim)\n old = None\n if \"old\" in items[name] :\n it_old = items[name].old\n if anim in it_old :\n old = it_old[anim]\n elif anim != \"idle\" and \"idle\" in it_old :\n old = it_old.idle\n else :\n old = it_old[list(it_old)[0]]\n # TODO: labels: old and pre hotspots, old anim name if different\n if old :\n set_static_drawitems(sizemap[name], [new, old, pre])\n labels = [\"new\", \"current\", \"preliminary\"]\n else :\n set_static_drawitems(sizemap[name], [new, pre])\n labels = [\"new\", \"preliminary\"]\n current_item.name = name\n current_item.anim = anim\n current_item.new = new\n current_item.labels = labels\n format_new_label()\n set_labels(current_item.labels)\n\n# let's select one before initialising main window\nbuilding_selected = False\nwhile not building_selected :\n (name, anim) = select_new_item()\n if not name :\n print(\"Selection cancelled, exiting.\", file = sys.stderr)\n sys.exit(0)\n if typemap[name] == \"static\" :\n building_selected = True\n else :\n warning(\"Only buildings are implemented.\")\nset_current(name, anim)\n\ndef switch_anim() :\n anim = select_anim(current_item.name, [\"pre\"])\n if anim and anim != current_item.anim :\n set_current(current_item.name, anim)\n\ndef change_hotspot(dir, val) :\n current_item.new.status = \"changed\"\n current_item.new[\"hot_\" + dir] += val\n update_new_label()\n\ndef reset_hotspot() :\n # 1. It may still be None\n # 2. current_item.new = get_new() would invalidate the drawlist entry\n n = get_new(current_item.name, current_item.anim)\n current_item.new.hot_x = n.hot_x\n current_item.new.hot_y = n.hot_y\n current_item.new.status = n.status\n update_new_label()\n\ndef pre_hotspot() :\n p = get_anim(current_item.name, \"pre\", current_item.anim)\n if current_item.new.hot_x != p.hot_x :\n current_item.new.status = \"changed\"\n current_item.new.hot_x = p.hot_x\n if current_item.new.hot_y != p.hot_y :\n current_item.new.status = \"changed\"\n current_item.new.hot_y = p.hot_y\n # doesn't hurt, even if nothing was actually done\n if current_item.new.status == \"changed\" :\n update_new_label()\n\ndef save_hotspot() :\n if current_item.new.status == \"stored\" :\n return\n item = items[current_item.name].new\n hssl = [\"\"]\n if not item.file_ok :\n warning(\"Hotspot can't be saved!\")\n return\n if os.access(item.hsfile, os.F_OK) :\n if os.access(item.hsfile, os.R_OK | os.W_OK) :\n hsf = open(item.hsfile)\n hssl = hsf.readlines()\n hsf.close()\n else :\n item.file_ok = False\n current_item.file_ok = False\n warning(\"Hotspot can't be saved!\")\n return\n elif not os.access(os.path.dirname(item.hsfile), os.W_OK | os.X_OK) :\n item.file_ok = False\n current_item.file_ok = False\n warning(\"Hotspot can't be saved!\")\n return\n hssl[0] = \"{:d},{:d}\\n\".format(current_item.new.hot_x, current_item.new.hot_y)\n hsf = open(item.hsfile, \"w\")\n for l in hssl :\n hsf.write(l)\n hsf.close()\n item.hot_x = current_item.new.hot_x\n item.hot_y = current_item.new.hot_y\n item.status = \"stored\"\n current_item.new.status = \"stored\"\n update_new_label()\n\n","repo_name":"tothxa/wl_graphics_re-export","sub_path":"scripts/py_lib/current_item.py","file_name":"current_item.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"31984860174","text":"import random\nfrom dataclasses import *\n\nplaceNames = [\n \"Ashbourne\",\n \"Bexhill\",\n \"Cheltenham\",\n \"Dorking\",\n \"Epsom\",\n \"Farnham\",\n \"Gillingham\",\n \"Harrogate\",\n \"Ilfracombe\",\n \"Jarrow\",\n \"Kendal\",\n \"Louth\",\n \"Matlock\",\n \"Newark\",\n \"Ormskirk\",\n \"Penzance\",\n \"Queenborough\",\n \"Rye\",\n \"Scarborough\",\n \"Tewkesbury\",\n]\n\n\n@dataclass\nclass PlaceDesc:\n name: str\n desc: str\n\n def __init__(self, type, name, desc=\"\"):\n self.name = f\"{type} of {name}\"\n self.desc = \"\"\n\n\n@dataclass\nclass Stats:\n pop: int\n dip: int\n mil: int\n prd: int\n\n def __init__(self, pop=0, dip=0, mil=0, prd=0):\n self.pop = pop\n self.dip = dip\n self.mil = mil\n self.prd = prd\n\n\ndef quickStats(self):\n pop = 0\n dip = 0\n mil = 0\n prd = 0\n\n for sp in self.subPlaces:\n pop += sp.stats.pop\n dip += sp.stats.dip\n mil += sp.stats.mil\n prd += sp.stats.prd\n\n self.stats = Stats(pop, dip, mil, prd)\n\n\n@dataclass\nclass Place:\n desc: PlaceDesc\n ruler: str\n stats: Stats\n subPlaces: list\n\n def __init__(self, desc=None, ruler=None, stats=None):\n self.desc = PlaceDesc(desc, random.choice(placeNames))\n self.ruler = ruler\n self.stats = stats\n self.subPlaces = []\n","repo_name":"youhengzhou/TheSimsPy","sub_path":"prototype/Recursion/lib/place/placelib/placelib.py","file_name":"placelib.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"9384327589","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom torch import nn\r\nfrom torch.nn import TransformerEncoderLayer\r\nimport logging\r\nimport os\r\nimport torch\r\nimport json\r\n\r\n\r\nclass TransformerUnit(nn.Module):\r\n \"\"\"Transformer customized\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n d_model: int,\r\n n_heads: int = 8,\r\n out_features: int = -1):\r\n super(TransformerUnit, self).__init__()\r\n\r\n self.config_keys = ['d_model', 'n_heads', 'out_features']\r\n self.d_model = d_model\r\n self.n_heads = d_model\r\n self.out_features = out_features\r\n # activation by default the GELU\r\n self.transformerlayer = TransformerEncoderLayer(\r\n d_model=d_model,\r\n nhead=n_heads,\r\n activation='gelu')\r\n self.linear = nn.Linear(\r\n d_model,\r\n out_features,\r\n bias=True)\r\n\r\n def forward(self, features):\r\n \"\"\"Returns embeddings\"\"\"\r\n '''\r\n '''\r\n uttrs = features[0]\r\n uttrs_tr = self.transformerlayer(uttrs)\r\n uttrs_ln = self.linear(uttrs_tr)\r\n # features[0] = uttrs_ln\r\n # return features\r\n return (uttrs_ln, features[1], features[2], features[3], features[4])\r\n\r\n def get_config_dict(self):\r\n # get current values of several keys you have mentioned\r\n return {key: self.__dict__[key] for key in self.config_keys}\r\n\r\n def save(self, output_path: str):\r\n # logging.info('The transformer accepts a save file: '\r\n # 'save to {}'.format(output_path))\r\n torch.save(self.state_dict(), os.path.join(\r\n output_path, 'pytorch_model.bin'))\r\n\r\n with open(os.path.join(\r\n output_path,\r\n 'transformerunit_config.json'), 'w')\\\r\n as fOut:\r\n json.dump(self.get_config_dict(), fOut, indent=2)\r\n\r\n # Static is not necessary, here, we use static since the load method\r\n # is called in the transformer __init__\r\n @staticmethod\r\n def load(input_path: str, device_load: str = 'cpu'):\r\n '''\r\n not necessarily load to GPU actually.\r\n\r\n You can always load the model to cpu and transfer it to\r\n cuda:0 in the sequential model. This is what bert is\r\n actually adopting. No need to directly load the model\r\n to GPU.\r\n '''\r\n with open(os.path.join(\r\n input_path, 'transformerunit_config.json')) as fIn:\r\n config = json.load(fIn)\r\n\r\n # Here, we don't adopt the \"load from the initialization\r\n # of the model\" scheme. We load the model from the\r\n # .bin, so we don't have to implement the model_name_or_path\r\n # in the __init__()\r\n\r\n # **dict\r\n # *list\r\n transformerunit = TransformerUnit(**config)\r\n device = torch.device(device_load)\r\n transformerunit.load_state_dict(torch.load(os.path.join(\r\n input_path, 'pytorch_model.bin'), map_location=device))\r\n return transformerunit\r\n","repo_name":"something678/TodKat","sub_path":"src/transformerunit.py","file_name":"transformerunit.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"61"}
+{"seq_id":"29966676907","text":"#!/usr/bin/env python3\n\nimport re\nfrom test_more import ok, eq\nfrom graph_tools import Graph\n\ng = Graph(directed=True)\nbuf = \"\"\"digraph sample {\n 1;\n 2;\n 1 -> 2;\n}\n\"\"\".splitlines()\ng.import_graph('dot', buf)\neq(g.nvertices(), 2)\neq(g.nedges(), 1)\nok(g.is_directed())\nok(g.has_edge(1, 2))\n\ng = Graph(directed=True)\nbuf = \"\"\"// comment here\ndigraph sample {\n 1;\n/* another comment\n here */\n 2;\n 4;\n 1 -> 2;\n 1 -> 4;\n}\n\"\"\".splitlines()\ng.import_graph('dot', buf)\neq(g.nvertices(), 3)\neq(g.nedges(), 2)\nok(g.is_directed())\nok(g.has_edge(1, 2))\nok(g.has_edge(1, 4))\n\ng = Graph(directed=True, multiedged=True)\nbuf = \"\"\"// comment here\ndigraph sample {\n 1 [color=yellow];\n 2;\n 1 -> 2 [bw=\"1.5Mb\",delay=\"10ms\",\n type=\"RED\"];\n 1 -> 2;\n}\n\"\"\".splitlines()\ng.import_graph('dot', buf)\neq(g.nvertices(), 2)\neq(g.nedges(), 2)\neq(g.is_directed(), 1)\nok(g.has_edge(1, 2))\neq(g.get_vertex_attribute(1, 'color'), 'yellow')\neq(g.get_edge_attribute_by_id(1, 2, 0, 'bw'), '1.5Mb')\neq(g.get_edge_attribute_by_id(1, 2, 0, 'delay'), '10ms')\neq(g.get_edge_attribute_by_id(1, 2, 0, 'type'), 'RED')\n","repo_name":"h-ohsaki/graph-tools","sub_path":"test/40_dot.py","file_name":"40_dot.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"}
+{"seq_id":"30552994623","text":"# Determine the number of times bit values appear in a binary number\ndef part1(values):\n # We iterate through the list of binary numbers and count the 1's in each position\n total_values = len(values)\n ones_counter = [0 for _ in range(len(values[0]))]\n for binary in values:\n for index, digit in enumerate(binary):\n if digit == '1':\n ones_counter[index] += 1\n zeros_counter = [total_values - x for x in ones_counter]\n print(f'Zeroes: {zeros_counter}')\n print(f'Ones: {ones_counter}')\n\n gamma = ''\n epsilon = ''\n for index in range(len(ones_counter)):\n if ones_counter[index] > zeros_counter[index]:\n gamma += '1'\n epsilon += '0'\n else:\n gamma += '0'\n epsilon += '1'\n print(f'Gamma: {gamma} ({int(gamma, 2)})')\n print(f'Epsilon: {epsilon} ({int(epsilon, 2)})')\n\n return int(gamma, 2) * int(epsilon, 2)\n\ndef part2(values):\n # We go through the process of iterating and paring down twice to determine the most/least common\n def process_list(remaining, index, most_common):\n if len(remaining) == 1:\n return remaining[0]\n zeros_list = []\n ones_list = []\n for binary in remaining:\n if binary[index] == '0':\n zeros_list.append(binary)\n else:\n ones_list.append(binary)\n if most_common:\n if len(ones_list) >= len(zeros_list):\n return process_list(ones_list, index + 1, most_common)\n else:\n return process_list(zeros_list, index + 1, most_common)\n else:\n if len(zeros_list) <= len(ones_list):\n return process_list(zeros_list, index + 1, most_common)\n else:\n return process_list(ones_list, index + 1, most_common)\n\n oxygen = process_list(values, 0, True)\n carbondioxide = process_list(values, 0, False)\n print(f'Oxygen: {oxygen} ({int(oxygen, 2)})')\n print(f'CO2: {carbondioxide} ({int(carbondioxide, 2)})')\n\n return int(oxygen, 2) * int(carbondioxide, 2)\n","repo_name":"theknoxinator/AoC","sub_path":"2021/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39391624530","text":"from django import forms\nfrom . models import Juego, Compañia, Producto\n\nclass JuegoForm(forms.ModelForm):\n nombre = forms.CharField(label='Nombre',max_length=60, widget=forms.TextInput(\n attrs={\n 'class':'form-control'\n }\n ))\n\n \n class Meta:\n model = Juego\n fields = ('nombre',)\n\nclass CompañiaForm(forms.ModelForm):\n nombre_compañia = forms.CharField(label='Nombre Compañia',max_length=60, widget=forms.TextInput(\n attrs={\n 'class':'form-control'\n }\n )) \n class Meta:\n model = Compañia\n fields = ('nombre_compañia',)\n\nclass ProductoForm(forms.ModelForm):\n\n nombre_producto = forms.CharField(label='Nombre Producto',max_length=60, widget=forms.TextInput(\n attrs={\n 'class':'form-control'\n }\n ))\n\n precio = forms.CharField(label='Precio',max_length=60, widget=forms.TextInput(\n attrs={\n 'class':'form-control'\n }\n ))\n \n marca = forms.CharField(label='Marca',max_length=60, widget=forms.TextInput(\n attrs={\n 'class':'form-control'\n }\n ))\n \n class Meta:\n model = Producto\n fields = ('nombre_producto', 'precio', 'marca',)","repo_name":"AndresPedraza09/Fase3GonzalezCheuquepilPedraza005","sub_path":"GoodPlay/login/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6658889400","text":"#!/usr/bin/python2\n\nimport jinja2\nimport sys\nimport os\nimport MySQLdb\n\nif len(sys.argv) > 1 :\n script, argpath = sys.argv\n os.chdir(argpath)\nelse:\n argpath = \".\"\noutfile = argpath + \"/shows.html\"\ndbhost = 'localhost'\ndbuser = 'root'\ndbpswd = '40ounce'\ndbname = 'showdb'\n\n# template directory and file for Jinja2 processing\ntemplateDir = \"/var/www/showlist/templates\"\nTEMPLATE_FILE=\"shows.html\"\n\n#\n# Connect to the database\n#\ndb = MySQLdb.connect(dbhost,dbuser,dbpswd,dbname)\n\n#\n# Create a cursor to hold the query results\n#\ncursor = db.cursor()\nquery = \"SELECT dy, show_flyer_pdf, show_flyer_jpg, show_dt, venue_name, venue_address, venue_city, venue_zip, venue_phone, venue_url, show_time, show_info1, show_info2, show_info3 FROM show_v WHERE show_date >= curdate() order by show_date asc\"\n\n#\n# Execute the query\n#\ncursor.execute(query)\n#\n# Create an empty array to hold the calendar entries\n#\nCalendar = []\n#\n# Parse the rows into a hash and load each hash into the calendar array\n#\nfor (dy, show_flyer_pdf, show_flyer_jpg, show_dt, venue_name, venue_address, venue_city, venue_zip, venue_phone, venue_url, show_time, show_info1, show_info2, show_info3 ) in cursor:\n\t#\n\t# If this is the first row, capture the next flyer\n\t#\n\ttry:\n\t\tflyer\n\texcept NameError:\n\t\tflyer = show_flyer_jpg\n\n\t#\n\t# Create a new hash for each calendar entry\n\t#\n\tshow = {}\n\tshow[\"SHOWDAY\"] = dy\n\tshow[\"SHOWPDF\"] = show_flyer_pdf\n\tshow[\"SHOWJPG\"] = show_flyer_jpg\n\tshow[\"SHOWDATE\"] = show_dt\n\tshow[\"VENUENAME\"] = venue_name\n\tshow[\"VENUEADDR\"] = venue_address\n\tshow[\"VENUECITY\"] = venue_city\n\tshow[\"VENUEZIP\"] = venue_zip\n\tshow[\"VENUEPHONE\"] = venue_phone\n\tshow[\"VENUEURL\"] = venue_url\n\tshow[\"STARTTIME\"] = show_time\n\tshow[\"SHOW_INFO1\"] = show_info1\n\tshow[\"SHOW_INFO2\"] = show_info3\n\tshow[\"SHOW_INFO3\"] = show_info2\n\n\t#\n\t# Add the hash to the array\n\t#\n\tCalendar.append(show)\n\n#\n# Process the Jinja2 template\ntemplateLoader = jinja2.FileSystemLoader(searchpath=templateDir)\ntemplateEnv = jinja2.Environment(loader=templateLoader)\ntemplate = templateEnv.get_template(TEMPLATE_FILE)\n\noutText = template.render(flyer=flyer,show=\"\",calendar=Calendar)\n\n#print(outText)\n#\n# Write the output\n#\nf = open(outfile,'w')\nf.write(outText)\nf.close()\n\n","repo_name":"dremkus/showlist","sub_path":"writeshowpage.py","file_name":"writeshowpage.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14978478481","text":"import numpy as np\r\nimport cv2\r\n\r\n\r\ndef angle_of_skew(image):\r\n '''\r\n :param image: black and white image as an array\r\n :return: the angle by which the image should be rotated to correct skew\r\n '''\r\n # find the coordinates of interest (text), which is black so pixel value is 0\r\n coords_of_interest = np.column_stack(np.where(image == 0)) # change to > 0 if using inverted methodology\r\n\r\n # extract the angle, the last item in the list returned by minAreaRect\r\n angle = cv2.minAreaRect(coords_of_interest)[-1]\r\n\r\n # adjust the angle for proper rotation results, as minAreaRect only returns between 0 and -90\r\n if angle < -45:\r\n angle = -(90 + angle)\r\n else:\r\n angle = -(angle)\r\n\r\n return angle\r\n\r\n\r\ndef rotate_image(mat, angle):\r\n \"\"\"\r\n :param mat: the image to be rotated as an array\r\n :param angle: the angle by which the image needs to be rotated\r\n :return: the rotated image\r\n \"\"\"\r\n\r\n height, width = mat.shape[:2]\r\n image_center = (width/2, height/2)\r\n\r\n rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)\r\n\r\n # rotation calculates the cos and sin, taking absolutes of those.\r\n abs_cos = abs(rotation_mat[0, 0])\r\n abs_sin = abs(rotation_mat[0, 1])\r\n\r\n # find the new width and height bounds\r\n bound_w = int(height * abs_sin + width * abs_cos)\r\n bound_h = int(height * abs_cos + width * abs_sin)\r\n\r\n # subtract old image center (bringing image back to original) and adding the new image center coordinates\r\n rotation_mat[0, 2] += bound_w/2 - image_center[0]\r\n rotation_mat[1, 2] += bound_h/2 - image_center[1]\r\n\r\n # rotate image with the new bounds and translated rotation matrix\r\n rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))\r\n return rotated_mat\r\n","repo_name":"caneale320/misc_projects","sub_path":"OCR_Processing/image_rotation.py","file_name":"image_rotation.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11316344705","text":"import random\r\nimport time\r\n\r\ndef checkScore(score, guess, numList, setOnes, length):\r\n\r\n\tfor element in numList[:]:\r\n\t\tif length-calcOnes(guess^element) != int(score):\r\n\t\t\tnumList.remove(element)\r\n\t#return numList\r\n\r\ndef makeNums2(length = 4):\r\n\treturn [i for i in range(2**length)]\r\n\r\ndef calcOnes(setOnes, length, num):\r\n\ttotOnes = 0\r\n\twhile num > 0:\r\n\t\ttotOnes += setOnes[num & 255]\r\n\t\tnum >>= 8\r\n\treturn totOnes\r\n\r\ndef makeGuess(wrongs, length, setOnes):\r\n\tif len(wrongs) == 0:\r\n\t\treturn 0\r\n\tfor i in range(wrongs[-1][0], 2**length):\r\n\t\tfor j in range(len(wrongs)):\r\n\t\t\tif length-calcOnes(setOnes, length, i^wrongs[j][0]) == wrongs[j][1]:\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\tbreak\r\n\t\t\tif j == len(wrongs)-1:\r\n\t\t\t\treturn i\r\n\treturn -1\r\n\r\ndef getScore(guess, secret, setOnes, length):\r\n\treturn length - calcOnes(setOnes, length, guess ^ secret)\r\n\r\ndef getBinary(num, length):\r\n\tbinStr = \"{0:b}\".format(num)\r\n\treturn (length-len(binStr))*'0' + binStr\r\n\r\ndef playGame(secret, length, setOnes):\r\n\tturns = 0\r\n\twrongs = []\r\n\tguess = 0\r\n\t#print()\r\n\twhile guess >= 0:\r\n\t\t\r\n\t\tturns += 1\r\n\t\tt0 = time.clock()\r\n\t\tguess = makeGuess(wrongs, length, setOnes)\r\n\t\tt1 = time.clock()\r\n\t\tscore = getScore(guess, secret, setOnes, length)\r\n\t\twrongs.append((guess, score))\r\n\t\t#print(getBinary(guess, length), getBinary(secret, length), score, len(wrongs), round(t1-t0, 3))\r\n\t\tif score == length:\r\n\t\t\treturn turns\r\n\t\t#checkScore(score, guess, numList, setOnes, length)\r\n\treturn turns\r\n\r\nn = 15\r\nt0 = time.clock()\r\nsetOnes = [bin(i).count('1') for i in range(256)]\r\n\r\n\r\nfor j in range(0, n):\r\n\tsecrets = [random.randint(0,2**j-1) for i in range(100)]\r\n\tprint('playing game', j+1)\r\n\tscores = [playGame(secret, j+1, setOnes) for secret in secrets]\r\n\taverage = sum(scores)/len(scores)\r\n\tprint(average)\r\n\r\nt1 = time.clock()\r\nprint(t1-t0)","repo_name":"maxbergmark/old-work","sub_path":"Egna projekt/bitguesser2.py","file_name":"bitguesser2.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18020125915","text":"import requests, smtplib\r\nfrom datetime import datetime\r\nfrom haversine import haversine, Unit\r\n\r\n# Longitude and latitude of the SF Exploratorium\r\nVIEWING_DISTANCE = 400\r\nMY_LAT = 37.795080\r\nMY_LONG = -122.396430\r\n\r\nlat = MY_LAT+5\r\nlong = MY_LONG+5\r\n\r\n# Retieve longitude and latitute data of ISS\r\nresponse = requests.get(url=\"http://api.open-notify.org/iss-now.json\")\r\nresponse.raise_for_status()\r\niss_data = response.json()\r\n\r\niss_lat = float(iss_data[\"iss_position\"][\"latitude\"])\r\niss_long = float(iss_data[\"iss_position\"][\"longitude\"])\r\n\r\n# Using the haversine formula to calculate how far the ISS is away from you\r\nsf_exp = (MY_LAT, MY_LONG)\r\niss = (iss_lat, iss_long)\r\ntest = (lat, long)\r\ndistance_from = haversine(sf_exp, iss, unit=Unit.MILES)\r\n\r\nprint(f\"The Iternational Space Station is {round(distance_from, 2)} miles away from the San Francisco Exploratorium\")\r\n\r\nparameters = {\r\n \"latitude\": MY_LAT,\r\n \"longitude\": MY_LONG,\r\n \"formatted\": 0\r\n}\r\n\r\n# Retrieve sunrise and sunset data of SF\r\nresponse = requests.get(\"https://api.sunrise-sunset.org/json?\", params=parameters)\r\nresponse.raise_for_status()\r\nsunrise_sunset_data = response.json()\r\nsunrise = int(sunrise_sunset_data[\"results\"][\"sunrise\"].split(\"T\")[1].split(\":\")[0])\r\nsunset = int(sunrise_sunset_data[\"results\"][\"sunset\"].split(\"T\")[1].split(\":\")[0])\r\n\r\ntime_now = datetime.now().hour\r\n\r\nmy_email = \"enter your email\"\r\nmy_password = \"enter your password\"\r\n\r\n\r\n\r\n# If the ISS is close to my current position\r\n# and it is currently dark\r\n# then send me an email to tell me to look up\r\nif int(distance_from) <= VIEWING_DISTANCE:\r\n print(\"The International Space Station is near your house\")\r\n if time_now > sunset or time_now < sunrise:\r\n with smtplib.SMTP(\"smtp.gmail.com\", 587, timeout=120) as connection:\r\n connection.starttls()\r\n connection.login(user=my_email, password=my_password)\r\n connection.sendmail(\r\n from_addr=my_email,\r\n to_addrs=my_email,\r\n msg=\"Suject:LOOK UP!!!\\n\\nLook Up!!! The International Space Station is near your house\"\r\n )\r\n","repo_name":"DonnovanJiles70122/International_Space_Station_Tracker","sub_path":"ISS-Tracker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23182655620","text":"from data_process import load_image\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\n'''\n首先对原始数据进行加工,把二进制转化为矩阵形式\n接着调用sklearn里的各种分类器进行训练和测试\n特征选取\n1.图像像素\n对应数据集为:\ntrain_imgaes,train_labels,train_num 训练集\nval_imgaes,val_labels,val_num 验证集\ntest_imgaes,test_labels,test_num 测试集\n2.灰度直方图\n对应数据集为:\nb1,train_labels,train_num 训练集\nb2,val_labels,val_num 验证集\nb3,test_labels,test_num 测试集\n将对应数据集代入下方各分类器即可观察输出\n把对应位置的train改为b1即可,其他类似\n'''\n#导入训练集,并把它分为训练集与验证集\nimages = load_image(0)\nlabels = load_image(1).astype(np.int) #标签转化为整型\nimages_num = images.shape[0]\n#根据9比1的比例分为验证集和测试集\ntrain_num = int(images_num*0.9)\ntrain_images = images[:train_num]\ntrain_labels = labels[:train_num]\nprint(train_images.shape,train_labels.shape)\nval_num = int(images_num*0.1)\nval_images = images[train_num:]\nval_labels = labels[train_num:]\nprint(val_images.shape,val_labels.shape)\n# plt.imshow(train_images[5000])\n# plt.show()\n#导入测试集\ntest_images = load_image(2)\ntest_labels = load_image(3).astype(np.int)\ntest_num = test_images.shape[0]\nprint(test_images.shape,test_labels.shape)\n\n#数据集变形\ntrain_images = train_images.reshape(train_num,-1)\n# print(train_images.shape,'hahahahah')\n# print(train_labels.shape,'sdadasdasda')\nval_images = val_images.reshape(val_num,-1)\ntest_images = test_images.reshape(test_num,-1)\n\n#上面为对数据进行处理的部分\n\n\n\n#以下部分为使用灰度直方图作为特征进行学习========================\n\nb1 = np.zeros((train_num, 256)) #对图片进行处理,统计每个像素值的个数,以个数为特征,输入样本有256个特征\nfor i in range(train_num): #输入数据集为(N,256) b1 此为训练集\n a = np.bincount(train_images[i].astype(int), minlength=256).reshape(1,-1)\n b1[i] = a\nb2 = np.zeros((test_num, 256)) #b2测试集\nfor i in range(test_num):\n a = np.bincount(test_images[i].astype(int),minlength=256).reshape(1,-1)\n b2[i] = a\nb3 = np.zeros((val_num, 256)) #b3验证集\nfor i in range(val_num):\n a = np.bincount(test_images[i].astype(int),minlength=256).reshape(1, -1)\n b3[i] = a\n\n\n#决策树\ntree = DecisionTreeClassifier()\ntree.fit(b1,train_labels) #训练模型\nvalue = tree.score(b2,test_labels) #输出正确率\nprint(value)\n\n#支持向量机\nsvm = SVC(gamma='scale',decision_function_shape='ovo')\nsvm.fit(b1,train_labels)\nvalue = svm.score(b2,test_labels)\nprint(value)\n#逻辑回归\n\nlogis = LogisticRegression(solver='saga',multi_class='multinomial')\nlogis.fit(b1,train_labels)\nvalue = logis.score(b2,test_labels)\nprint(value)\n\n","repo_name":"hwZhang98/2019summer_homework","sub_path":"第一次作业/作业1/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2369076519","text":"\"\"\"Issue #49.\"\"\"\n# %% Imports\n# Third Party Imports\nfrom ray.rllib.examples.env.random_env import RandomEnv\n\n# Punch Clock Imports\nfrom punchclock.environment.obs_wrappers import CopyObsItem\nfrom punchclock.environment.reward_wrappers import (\n LogisticTransformReward,\n ZeroReward,\n)\nfrom punchclock.environment.wrapper_utils import getNumWrappers\n\n\n# %% Excerpt from SimRunner\n# Defined as standalone function in this script for convenience\ndef transformObs1ataTime(wrapped_env):\n num_env_wrappers = getNumWrappers(wrapped_env)\n\n obs, _ = wrapped_env.unwrapped.reset()\n strcmd = \"env.\"\n for i in range(num_env_wrappers):\n cmd = (num_env_wrappers - i - 1) * strcmd\n # cmd = (num_env_wrappers - i) * strcmd # as written in SimRunner\n cmd = \"wrapped_env.\" + cmd + \"observation(obs)\"\n obs = eval(\n cmd,\n {\n \"wrapped_env\": wrapped_env,\n \"obs\": obs,\n },\n )\n\n return obs\n\n\n# %% Build env\n\nenv = RandomEnv(\n {\n \"observation_space\": Dict({\"a\": Box(0, 1)}),\n }\n)\nwrapped_env = ZeroReward(CopyObsItem(env, \"a\", \"aa\"))\n\nwrapped_env.reset()\nobs, reward, _, _, _ = wrapped_env.step(wrapped_env.action_space.sample())\nprint(f\"obs = \\n{obs}\")\n\n# %% Tests\nprint(\"\\nTest single reward wrapper...\")\nobs = transformObs1ataTime(wrapped_env)\nprint(f\"obs = \\n{obs}\")\n\nprint(\"\\nTest multiple obs wrappers...\")\nwrapped_env = CopyObsItem(CopyObsItem(env, \"a\", \"aa\"), \"a\", \"aaa\")\nobs = transformObs1ataTime(wrapped_env)\nprint(f\"obs = \\n{obs}\")\n\n# Fails\nprint(\"\\nTest obs(reward(env))...\")\nwrapped_env = CopyObsItem(ZeroReward(env), \"a\", \"aaa\")\ntry:\n obs = transformObs1ataTime(wrapped_env)\n print(f\"obs = \\n{obs}\")\nexcept Exception as e:\n print(e)\n\nprint(\"\\nTest reward(obs(env))...\")\nwrapped_env = ZeroReward(CopyObsItem(env, \"a\", \"aa\"))\nobs = transformObs1ataTime(wrapped_env)\nprint(f\"obs = \\n{obs}\")\n\nprint(\"\\nTest reward(reward(obs(env)))...\")\nwrapped_env = LogisticTransformReward(ZeroReward(CopyObsItem(env, \"a\", \"aa\")))\nobs = transformObs1ataTime(wrapped_env)\nprint(f\"obs = \\n{obs}\")\n\n# %% Done\nprint(\"done\")\n","repo_name":"dylan906/clockpunch","sub_path":"issues/iss49/iss49.py","file_name":"iss49.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"18977776807","text":"# main.py\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport pandas as pd\nfrom k_means_proxy import K_Means_Proxy\n\nstyle.use('ggplot')\n\ndef main():\n df = pd.read_csv(\"data/abaloneconverted.csv\")\n df = df[['Length', 'Diameter', 'Height', 'Whole weight', 'Shucked weight', 'Viscera weight', 'Shell weight', 'Rings']]\n dataset = df.astype(float).values.tolist()\n\n X = df.values\n print(df)\n\n print(\"================ K-Means Clustering ================\")\n print(\" DESIGN PATTERN PROXY\")\n print(\"Data : Abalone\")\n print(\"\")\n print(\"Anggota Kelompok : \")\n print(\"-10119108 Prasetyo Hade MW\")\n print(\"-10119118 Rizky Septiana\")\n print(\"-10119123 Angga Cahya Abadi\")\n print(\"-10119124 Primarazaq Noorshalih Putra Hilmana\")\n print(\"----------------------------------------------------\")\n kluster = int(input(\"Masukkan Jumlah Kluster yang diinginkan : \"))\n\n km_proxy = K_Means_Proxy(kluster)\n km_proxy.fit(X)\n\n colors = 10 * [\"r\", \"c\", \"k\", \"g\", \"b\"]\n arr = []\n\n for centroid in km_proxy.k_means.centroids:\n plt.scatter(km_proxy.k_means.centroids[centroid][0], km_proxy.k_means.centroids[centroid][1], s=130, marker=\"x\")\n\n for classification in km_proxy.k_means.classes:\n color = colors[classification]\n for features in km_proxy.k_means.classes[classification]:\n plt.scatter(features[0], features[1], color=color, s=30)\n arr.append([classification])\n\n print(\"Total Data : \", len(arr), \"data\")\n for i in range(kluster):\n print(\"Kluster \", i, \": \", arr.count([i]), \"data\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"primarazaq/KMeans_Design_Pattern","sub_path":"Design_Pattern/Structural/Proxy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42599078378","text":"import json\nimport os.path\nfrom .projects import _ProjectHandler\n\nimport d2p.util\nimport d2p.wui.templating\n\nclass _LectureProjectHandler(_ProjectHandler):\n def init(self, projectId):\n super(_LectureProjectHandler, self).init(projectId)\n assert self.p.ptype == 'lecture'\n self.pdict['hideHeader'] = True\n self.pdict['showUI'] = True\n self.pdict.setdefault('scripts', []).extend([\n {'src': '/static/lecture/mustache.js'},\n {'src': '/static/lecture/osxh.js'},\n {'src': '/static/lecture/lecture.js'},\n {'src': '/static/lecture_admin.js'},\n ])\n self.pdict.setdefault('stylesheets', []).extend([\n {'src': '/static/lecture/lecture.css'},\n ])\n\n def genChapterUrl(self, chapterId):\n return self.pdict['project']['baseurl'] + 'chapter/' + chapterId + '/'\n\n def getChapters(self):\n return sorted(self.p.view_newest(lambda e: e['type'] == 'chapter'), key=lambda e: d2p.util.sortkey_natural(e['name']))\n\n def getLectureTemplates(self):\n templateDir = os.path.join(d2p.wui.templating.TEMPLATE_PATH, 'lecture')\n res = {}\n for fn in os.listdir(templateDir):\n if fn.startswith('_'):\n continue\n base,ext = os.path.splitext(fn)\n if ext != '.mustache':\n continue\n with open(os.path.join(templateDir, fn)) as f:\n res['lecture/' + base] = f.read()\n return res\n\nclass LectureProjectShowHandler(_LectureProjectHandler):\n def get(self, projectId):\n self.init(projectId)\n\n dct = self.pdict\n dct['template'] = 'lecture/overview'\n dct['title'] = self.p.name\n dct['chapters'] = self.getChapters()\n for c in dct['chapters']:\n c['_url'] = self.genChapterUrl(c['_id'])\n self.render(dct)\n\nclass ChapterHandler(_LectureProjectHandler):\n def post(self, projectId, eId=None): # None: new entry\n self.init(projectId)\n\n d = {\n 'type': 'chapter'\n }\n for k in ['name']:\n v = self.get_argument(k, None)\n assert v\n d[k] = v\n slidesJSON = self.get_argument('slidesJSON', None)\n if not slidesJSON:\n slidesJSON = '[]'\n d['slides'] = json.loads(slidesJSON)\n assert isinstance(d['slides'], list)\n\n if eId:\n d['_id'] = eId\n\n e = self.p.local_add(d)\n\n dct = self.pdict\n dct['url'] = self.genChapterUrl(e['_id'])\n self.write(dct)\n\n\n def get(self, projectId, eId, revId=None):\n self.init(projectId)\n\n dct = self.pdict\n e = self.p.getEntry(eId, revId)\n dct['template'] = 'lecture/chapter-javascript'\n dct['title'] = e['name'] + ' - ' + self.p.name\n e['_lecture'] = self.pdict['project']\n dct['chapter'] = e\n dct['chapterJSON'] = json.dumps(e, indent=4)\n dct['templatesJSON'] = json.dumps(self.getLectureTemplates())\n self.render(dct)\n\n\ndef routes(prefix):\n return [\n (prefix + r\"/\", LectureProjectShowHandler),\n (prefix + r\"/chapter/\", ChapterHandler),\n (prefix + r\"/chapter/([0-9a-f]+)/\", ChapterHandler),\n ]\n\n","repo_name":"phihag/d2p","sub_path":"wui/lecture.py","file_name":"lecture.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"70079130115","text":"import operator\nfrom types import LambdaType\nfrom NatasTF.utils import print_error_line\n\n# Guardamos o contexto das variáveis aqui\n# Como essa linguagem não suporta funções\n# tratamos elas como escopo global\nvariaveis = {\n\n}\n\n# Tabela para tradução dos tipos para os tipos da linguagem\n_tipos = {\n str: 'char',\n float: 'real',\n int: 'inteiro'\n}\n\n# função que obtem a posicao absoluta do lexem no código\nerr_info = lambda p, n: p.lexpos(n)\n\n\n# Classe base que usamos como \"interface\"\nclass Base:\n error_info = None\n\n def eval(self):\n try:\n return self._eval()\n except Exception as e:\n error_handler(str(e), self.error_info)\n\n def _eval(self):\n raise NotImplementedError()\n\n\nclass Identificador(Base):\n\n def __init__(self, p, name):\n self.error_info = err_info(p, name)\n self.name = p[name]\n\n\n def assign(self, val):\n\n if self.name not in variaveis:\n raise Exception(f'Erro semântico: Variável \\'{self.name}\\' referenciada mas não declarada.')\n \n if isinstance(val, str): # se forem tipos iguais\n if variaveis[self.name]['type'] == 'char':\n variaveis[self.name]['value'] = val\n return\n else:\n raise Exception(f'Erro semântico: Variável \\'{self.name}\\' do tipo \\'{variaveis[self.name][\"type\"]}\\' incompatível com tipo \\'{_tipos[type(val)]}\\'')\n \n if variaveis[self.name]['type'] == 'real':\n variaveis[self.name]['value'] = float(val) # atribui float na variavel real\n elif variaveis[self.name]['type'] == 'inteiro':\n variaveis[self.name]['value'] = int(val) # se não for real, é inteiro, então converte\n else:\n raise Exception(f'Erro semântico: Variável \\'{self.name}\\' do tipo \\'{variaveis[self.name][\"type\"]}\\' incompatível com tipo \\'{_tipos[type(val)]}\\'')\n\n\n def _eval(self):\n \n if self.name in variaveis:\n if 'value' not in variaveis[self.name]:\n raise Exception(f'Erro semântico: Variável \\'{self.name}\\' referenciada mas não inicializada.')\n return variaveis[self.name]['value']\n else:\n raise Exception(f'Erro semântico: Variável \\'{self.name}\\' não definida.')\n\n \nclass Atribuicao(Base):\n def __init__(self, p, ID, value):\n self.error_info = err_info(p, ID) # pegando o ID inves de value\n self.ID = p[ID]\n self.value = p[value]\n\n def _eval(self):\n self.ID.assign(self.value.eval())\n\n\nclass Var(Base):\n def __init__(self, p, value):\n self.error_info = err_info(p,value)\n self.value = p[value]\n \n def _eval(self): # limpar char\n return self.value.strip('\\'') if type(self.value) == str else self.value\n\n\nclass BoolExp(Base):\n ops = {\n '>' : operator.gt,\n '>=': operator.ge,\n '<' : operator.lt,\n '<=': operator.le,\n '==': operator.eq,\n '!=': operator.ne,\n\n '&&': lambda a, b: a.eval() and b.eval(),\n '||': lambda a, b: a.eval() or b.eval()\n }\n\n def __init__(self, p, left, op, right):\n self.error_info = err_info(p, op)\n self.left = p[left]\n self.op = p[op]\n self.right = p[right]\n\n def _eval(self):\n try:\n op = self.ops[self.op]\n\n if isinstance(op, LambdaType):\n return int(op(self.left, self.right))\n\n return int(op(self.left.eval(), self.right.eval()))\n except Exception as e:\n raise Exception(f'Erro semântico ao fazer operação binária: {e}. ({self.left} {self.op} {self.right})')\n\n\n\nclass Comandos(Base):\n def __init__(self, list=None):\n if not list:\n list = []\n self.list = list\n\n def _eval(self):\n r = []\n for cmd in self.list:\n res = cmd.eval()\n\n if res: r.append(res)\n\n return r\n \n\nclass Se(Base):\n def __init__(self, p, exp, pv, pf=None):\n self.error_info = err_info(p, exp)\n self.exp = p[exp]\n self.pv = p[pv]\n self.pf = None if not pf else p[pf]\n\n def _eval(self):\n if self.exp.eval():\n return self.pv.eval()\n elif self.pf:\n return self.pf.eval()\n \n\nclass Print(Base):\n def __init__(self, p, arg: Base):\n self.error_info = err_info(p, arg)\n self.arg = p[arg]\n \n def _eval(self):\n s = self.arg.eval()\n print(s if type(s) != str else s.replace('\\\\n', '\\n').replace('\\\\t', '\\t'))\n\n\nclass Ler(Base):\n def __init__(self, p, arg: Base):\n self.error_info = err_info(p, arg)\n self.arg = p[arg]\n \n def _eval(self):\n val = input(f\"Variável '{self.arg.name}' recebe: \")\n\n try:\n val = float(val) # em ler(), tente converter '1' para real\n except:\n pass\n\n self.arg.assign(val)\n\n\nclass Enquanto(Base):\n def __init__(self, p, exp, comandos):\n self.error_info = err_info(p, exp)\n self.exp = p[exp]\n self.comandos =p[comandos]\n\n def _eval(self):\n while self.exp.eval():\n self.comandos.eval()\n\n\nclass OpBinaria(Base):\n def __init__(self, p, left, op, right):\n self.error_info = err_info(p, op)\n self.left = p[left]\n self.op = p[op]\n self.right = p[right]\n\n def _eval(self):\n left, right = self.left.eval(), self.right.eval()\n\n try:\n if self.op == '+':\n return left + right\n elif self.op == '-':\n return left - right\n elif self.op == '/':\n return left / right\n elif self.op == '*':\n return left * right\n\n except ZeroDivisionError:\n raise Exception(f\"Erro semântico: Divisão por zero.\")\n except:\n raise Exception(f\"Erro semântico: operação \\'{self.op}\\' com tipos ({_tipos[type(left)]}, {_tipos[type(right)]}) não suportado.\")\n\n\nclass OpUnaria(Base):\n _ops = {\n '!': lambda x: int(not x),\n '-': lambda x: -x,\n '+': lambda x: x\n }\n \n def __init__(self, p, arg, op):\n self.error_info = err_info(p, op)\n self.arg = p[arg]\n self.op = p[op]\n \n def _eval(self):\n val = self.arg.eval()\n try:\n return self._ops[self.op](val)\n except:\n raise Exception(f\"Erro semântico: operação \\'{self.op}\\' com tipo {_tipos[type(val)]} não suportado.\")\n\n\n\n# trata erro encontrado em alguma produção\ndef error_handler(error, lexpos):\n print('\\033[91mErro encontrado! Veja abaixo a linha e a mensagem de erro.\\n')\n \n if not lexpos:\n print('Linha desconhecida.')\n else:\n print_error_line(lexpos)\n\n print(error, '\\033[0m')\n exit(1)","repo_name":"NatasFX/ELC408-Compiladores-2023","sub_path":"NatasTF/acao_semantica.py","file_name":"acao_semantica.py","file_ext":"py","file_size_in_byte":6828,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"16030322219","text":"# Kata name: Persistent Bugger\n\n# Write a function, persistence, that takes in a positive parameter num and returns its multiplicative persistence,\n# which is the number of times you must multiply the digits in num until you reach a single digit.\n# 999 --> 4 (because 9*9*9 = 729, 7*2*9 = 126, 1*2*6 = 12, and finally 1*2 = 2)\ndef persistence(n):\n result = 0\n while len(str(n)) > 1:\n multiplication = 1\n for i in str(n):\n multiplication *= int(i)\n n = multiplication\n result += 1\n return result\n\n\nprint(persistence(39))\n","repo_name":"stepanskyvlad/Learning-Python","sub_path":"Tasks/Codewars/6-kyu/persistent_bugger.py","file_name":"persistent_bugger.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7911504035","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.pyplot import arrow\n\nnode_size = 1000\nfont_size = 18\nedge_width = 5\narrow_style = '-|>'\narrow_size=30\nnode_color = 'tab:blue'\n\n\nG = nx.DiGraph()\n\nG.add_edge('A','C')\nG.add_edge('B','D')\nG.add_edge('C','D')\nG.add_edge('C','B')\nG.add_edge('C','F')\nG.add_edge('C','E')\nG.add_edge('A','B')\n\npos = nx.circular_layout(G)\nnx.draw(G, pos=pos, with_labels=True, node_color=node_color, node_size=node_size, font_size=font_size, width=edge_width, arrowstyle=arrow_style, arrowsize=arrow_size)\nplt.axis('off')\n\nplt.savefig('unweighted.svg', format='svg')\nplt.clf()\n\n\n# plt.figure(figsize=(20,10))\n\nH = nx.Graph()\n\nH.add_edge('A','B',weight=0.6)\nH.add_edge('A','C',weight=0.2)\nH.add_edge('C','D',weight=0.1)\nH.add_edge('C','E',weight=0.7)\nH.add_edge('C','F',weight=0.9)\nH.add_edge('A','D',weight=0.3)\n\nelarge = [(u, v) for (u, v, d) in H.edges(data=True) if d['weight'] > 0]\nesmall = [(u, v) for (u, v, d) in H.edges(data=True) if d['weight'] == 0.0]\n\n# print(pos)\npos = nx.circular_layout(H)\n\nnx.draw(H, pos=pos, with_labels=True, node_color=node_color, node_size=node_size, font_size=font_size, width=edge_width)\n# Nodes\n# nx.draw_networkx_nodes(H, pos, node_size=node_size)\n\n# # Edges\n# nx.draw_networkx_edges(H, pos, edgelist=elarge,width=edge_width)\n# nx.draw_networkx_edges(H, pos, edgelist=esmall,width=edge_width)\n\n# Edge labels of non zero weights\nedge_labels = {}\nfor u,v in H.edges():\n edge_labels[(u,v)] = round(H[u][v]['weight'], 3)\nnx.draw_networkx_edge_labels(H, pos, label_pos = 0.65, edge_labels = edge_labels, font_size=font_size)\n\nplt.axis('off')\nplt.savefig('weighted.svg', format='svg')","repo_name":"lmfaber/master_thesis","sub_path":"scripts/graph/gr.py","file_name":"gr.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23616888931","text":"filename = input(\"Enter file name of test case: \")\r\nrfname = input(\"Enter result file name: \")\r\nfile = open(filename)\r\n\r\nipt = file.readlines()\r\nls = int(ipt[0])\r\n\r\nres = []\r\nfor n in range(1,ls+1):\r\n\tprint(n)\r\n\tcln = ipt[n]\r\n\tcln = cln.split(\" \")\r\n\t\r\n\tcmbn = 0\r\n\texcl = int(cln[cmbn]) + 1\r\n\tlvsr = int(cln[excl]) + excl + 1\r\n\t\r\n\tcarr = []\r\n\tearr = []\r\n\t\r\n\tif int(cln[cmbn]) != 0:\r\n\t\tfor n in range(cmbn+1,excl):\r\n\t\t\tcarr.append([cln[n][0:2],cln[n][0:2][::-1],cln[n][2]])\r\n\t\t\t\r\n\tif int(cln[excl]) != 0:\r\n\t\tfor n in range(excl+1,lvsr):\r\n\t\t\tearr.append([cln[n][0],cln[n][1]])\r\n\t\r\n\tstack=[]\r\n\tfor n in cln[lvsr+1]:\r\n\t\tstack.append(n)\r\n\t\tif len(stack)>1:\r\n\t\t\tfor i in carr:\r\n\t\t\t\tif str(stack[len(stack)-2]) + str(stack[len(stack)-1]) == i[0] or str(stack[len(stack)-2]) + str(stack[len(stack)-1]) == i[1]:\r\n\t\t\t\t\tdel stack[len(stack)-1]\r\n\t\t\t\t\tdel stack[len(stack)-1]\r\n\t\t\t\t\tstack.append(i[2])\r\n\t\t\tfor i in earr:\r\n\t\t\t\tif i[0] in stack and i[1] in stack:\r\n\t\t\t\t\tstack=[]\r\n\t\r\n\tres.append(stack)\r\n\r\nfor n in range(len(res)):\r\n\r\n\tif len(res[n])>0:\r\n\t\ttorts = \"[\"\r\n\t\tfor i in range(len(res[n])-1):\r\n\t\t\ttorts = torts + res[n][i] + \", \"\r\n\t\ttorts = torts[:-2] + \"]\"\r\n\telse:\r\n\t\ttorts = \"[]\"\r\n\tres[n] = \"Case #\" + str(n+1) + \": \" + torts + \"\\n\"\r\n\t\r\ntostr = \"\"\r\nfor n in res:\r\n\ttostr+=n\r\n\t\r\nrfile = open(rfname, 'w')\r\nrfile.write(tostr[:-1])","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/132.py","file_name":"132.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4489256452","text":"#hashlib\n#Python 的 hashlib 提供了常见的摘要算法,如 MD5,SHA1 等等。\n#什么是摘要算法呢?摘要算法又称哈希算法、散列算法。它通过一个函\n#数,把任意长度的数据转换为一个长度固定的数据串(通常用 16 进制的字符串表示)。\n\n\n#MD5 是最常见的摘要算法,速度很快,生成结果是固定的 128 bit 字节,通常用一个 32 位的 16 进制字符串表示。\nimport hashlib\nmd5 = hashlib.md5()\nmd5.update('how to use md5 in python hashlib?'.encode('utf-8'))\nprint(md5.hexdigest())\n\n\n#如果数据量很大,可以分块多次调用 update(),最后计算的结果是一样\nmd5 = hashlib.md5()\nmd5.update('how to use md5 in '.encode('utf-8'))\nmd5.update('python hashlib?'.encode('utf-8'))\nprint(md5.hexdigest())\n\n\n\n\n#另一种常见的摘要算法是 SHA1,调用 SHA1 和调用 MD5 完全类似:\nimport hashlib\nsha1 = hashlib.sha1()\nsha1.update('how to use sha1 in '.encode('utf-8'))\nsha1.update('python hashlib?'.encode('utf-8'))\nprint(sha1.hexdigest())\n#SHA1 的结果是 160 bit 字节,通常用一个 40 位的 16 进制字符串表示。\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nikelily/PyLearn-Codes","sub_path":"160918/hashlibdemo.py","file_name":"hashlibdemo.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19361480967","text":"#!/usr/local/bin/python3\n# reactions_to_kegg.py\n# Use PyFBA reaction IDs to obtain KEGG KO IDs and their associated pathways.\n#\n# Author: Daniel A Cuevas (dcuevas08.at.gmail.com)\n# Created on 19 Jul 2017\n# Updated on 25 Jul 2017\n\nfrom __future__ import print_function, absolute_import, division\nimport requests\nimport argparse\nimport PyFBA\nimport os\nimport sys\nimport re\n\n\n###############################################################################\n# FUNCTION DEFINITIONS\n###############################################################################\ndef parse_response(response, mseed, kegg):\n \"\"\"\n Parse the KEGG API response text. Text is all plain text without an\n easily digestible format. Feature headers are listed at the beginning of\n each line. For the GET reaction response, the headers are:\n 1) ENTRY\n 2) NAME\n 3) DEFINITION\n 4) EQUATION\n 5) COMMENT\n 6) RCLASS\n 7) ENZYME\n 8) PATHWAY\n 9) ORTHOLOGY\n\n :param response: API response text\n :type response: str\n :param mseed: Model SEED reaction ID\n :type mseed: str\n :param kegg: KEGG reaction ID\n :type kegg: str\n :return: None\n \"\"\"\n # Check if response contains all items\n if not re.search(r'NAME', response):\n print('No NAME info for', mseed, ':', kegg, file=sys.stderr)\n no_name = True\n else:\n no_name = False\n\n if not re.search(r'ENZYME', response):\n print('No ENZYME info for ', mseed, ':', kegg, file=sys.stderr)\n no_enz = True\n else:\n no_enz = False\n\n if not re.search(r'ORTHOLOGY', response):\n print('No ORTHOLOGY info for ', mseed, ':', kegg, file=sys.stderr)\n no_orth = True\n else:\n no_orth = False\n\n # Check if there is no information in the response\n if no_name and no_enz and no_orth:\n print('No name, enzyme, or orthology found', file=sys.stderr)\n print('\\t\\t\\t')\n return None\n\n # Response contains newlines\n response = response.rstrip().split('\\n')\n\n all_name = []\n all_enzyme = []\n all_ko = []\n curr_section = ''\n for line in response:\n kegg_section = line[:12].strip()\n kegg_data = line[12:].strip()\n if kegg_section != '':\n curr_section = kegg_section\n\n if curr_section == 'NAME':\n all_name.append(kegg_data.strip(';').strip())\n\n elif curr_section == 'ENZYME':\n all_enzyme.extend(kegg_data.split())\n\n elif curr_section == 'ORTHOLOGY':\n # Collect all items\n ko = re.match(r'K\\d+', kegg_data).group(0)\n all_ko.append(ko)\n\n # All data here\n if not no_name and not no_enz and not no_orth:\n print('\\t' + ';'.join(all_name) + '\\t', end='')\n print(';'.join(all_enzyme) + '\\t', end='')\n print(';'.join(all_ko))\n\n # Only NAME and ENZYME\n elif no_orth:\n print('\\t' + ';'.join(all_name) + '\\t' + ';'.join(all_enzyme) + '\\t')\n\n # Only NAME and ORTH\n elif no_enz:\n print('\\t' + ';'.join(all_name) + '\\t\\t' + ';'.join(all_ko))\n\n # Only ENZYME and ORTH\n elif no_name:\n print('\\t\\t' + ';'.join(all_enzyme) + '\\t' + ';'.join(all_ko))\n\n # Only NAME\n elif no_enz and no_orth:\n print('\\t' + ';'.join(all_name) + '\\t\\t')\n\n # Only ENZYME\n elif no_name and no_orth:\n print('\\t\\t' + ';'.join(all_enzyme) + '\\t')\n\n # Only ORTHOLOGY\n elif no_name and no_enz:\n # Print out ko data\n print('\\t\\t\\t' + ';'.join(all_ko))\n\n\n###############################################################################\n# ARGUMENT PARSING\n###############################################################################\nparser = argparse.ArgumentParser(description='Use PyFBA reaction IDs to '\n 'obtain KEGG KO IDs and their '\n 'associated pathways')\nparser.add_argument('mseed_to_kegg', help='ModelSEED reaction mapper file')\nparser.add_argument('model_name', help='Model name')\nparser.add_argument('model_dir', help='Model directory')\nparser.add_argument('-v', '--verbose', action='store_true',\n help='Verbose output')\n\nargs = parser.parse_args()\n\n# Check that the mapper file exists\nif not os.path.isfile(args.mseed_to_kegg):\n sys.exit('ModelSEED to KEGG mapper file does not exist!')\n# Check that model directory exists\nif not os.path.isdir(args.model_dir):\n sys.exit('Model directory does not exist!')\n\n\n###############################################################################\n# PROCESS MODEL REACTIONS\n###############################################################################\n# Load ModelSEED reaction ID to KEGG reacton ID mapper\nmseed_to_kegg = {}\nwith open(args.mseed_to_kegg, 'r') as f:\n # Read header line\n header = f.readline()\n for l in f:\n l = l.rstrip('\\n')\n contents = l.split('\\t')\n mseed, kegg = contents[:2]\n\n # Check if reaction ID has already been encountered\n if mseed in mseed_to_kegg:\n print(mseed + ' already encountered in mapper file',\n file=sys.stderr)\n else:\n mseed_to_kegg[mseed] = kegg\n\nprint(str(len(mseed_to_kegg)), 'reacton IDs found in mapper file',\n file=sys.stderr)\n\n# Load model\nmodel = PyFBA.model.load_model(args.model_dir, args.model_name)\nprint('Model contains', str(model.number_of_reactions()), 'reactions',\n file=sys.stderr)\n\n# Set KEGG API URL\nAPI_BASE_URL = 'http://rest.kegg.jp/'\n\n# Output header\nprint('mseed_id', 'equation', 'kegg_id', 'name', 'ec','pathway', sep='\\t')\n\n# Iterate through reaction list from model\nfor i, mseed_rxn in enumerate(model.reactions, start=1):\n print('Processing reaction', str(i), 'of',\n str(model.number_of_reactions()),\n end='\\r', file=sys.stderr)\n if mseed_rxn in mseed_to_kegg:\n kegg_rxn = mseed_to_kegg[mseed_rxn]\n else:\n print(mseed_rxn, 'not found in mapper. Skipping.', file=sys.stderr)\n continue\n\n # Set resource path\n resource = 'get/' + kegg_rxn\n\n # Issue request\n full_path = os.path.join(API_BASE_URL, resource)\n response = requests.get(full_path)\n\n # Check that status code is 200 = good\n if response.status_code != 200:\n print('There was an error with the request: status code =',\n response.status_code,\n file=sys.stderr)\n print('ModelSEED id:', mseed_rxn, file=sys.stderr)\n print('KEGG id:', kegg_rxn, file=sys.stderr)\n print(mseed_rxn, model.reactions[mseed_rxn].equation,\n kegg_rxn, 'None', 'None', 'None', sep='\\t')\n continue\n\n if response.text.strip('\\n') == '':\n print('Response was empty for', kegg_rxn, file=sys.stderr)\n print(mseed_rxn, model.reactions[mseed_rxn].equation,\n kegg_rxn, 'None', 'None', 'None', sep='\\t')\n continue\n\n # Print reaction ID\n print(mseed_rxn, model.reactions[mseed_rxn].equation, kegg_rxn,\n end='', sep='\\t')\n parse_response(response.text, mseed_rxn, kegg_rxn)\n\nprint('\\nScript completed!', file=sys.stderr)\n\n","repo_name":"dacuevas/bioinformatics","sub_path":"kegg_api/reactions_to_kegg.py","file_name":"reactions_to_kegg.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15177196853","text":"from random import randint\nfrom math import ceil, pow\nimport pandas as pd\nfrom collections import namedtuple\nfrom activity_1.activity_1 import arithmetic_average\n\ndef get_shapiro_wilk_tables():\n file_coefficients = 'activity_3/coefficients.csv'\n file_critical_values = 'activity_3/critical_values.csv'\n \n pd_coefficients = pd.read_csv(file_coefficients)\n pd_critical_values = pd.read_csv(file_critical_values)\n\n pd_coefficients.set_index('i\\\\n', inplace=True)\n pd_critical_values.set_index('N', inplace=True)\n\n pd_coefficients = pd_coefficients.apply(lambda x: x.str.replace(',', '.').astype(float), axis=1)\n pd_critical_values = pd_critical_values.apply(lambda x: x.str.replace(',', '.').astype(float), axis=1)\n\n return (pd_coefficients, pd_critical_values)\n\n\ndef get_b_shapiro_wilk(values, pd_coefficients):\n result = 0\n len_sample = len(values)\n\n for i in range(ceil(len_sample / 2)):\n result += (values[len_sample-i-1] - values[i]) * float(pd_coefficients.loc[i+1, str(len_sample)])\n \n return result\n\ndef get_s_shapiro_wilk(values):\n mean = arithmetic_average(values)\n \n return sum([(value - mean) ** 2 for value in values])\n\ndef get_shapiro_result(pd_critical_values, W_calculated, confidence_level):\n significance_level = str(round(1-confidence_level,2))\n\n W = float(pd_critical_values.loc[len(sample_sorted), significance_level])\n\n return ('Rejected' if W_calculated > W else 'Acepted', significance_level)\n \n\nif __name__ == '__main__':\n sample = [1.90642, 2.22488, 2.10288, 1.69742, 1.52229, \n 3.15435, 2.61826, 1.98492, 1.42738, 1.99568]\n confidence_level = 0.95\n\n pd_coefficients, pd_critical_values = get_shapiro_wilk_tables()\n sample_sorted = sorted(sample)\n\n s = get_s_shapiro_wilk(sample_sorted)\n b = get_b_shapiro_wilk(sample_sorted, pd_coefficients)\n W_calculated = pow(b,2) / s\n\n hypothesis_null, significance_level = get_shapiro_result(pd_critical_values, W_calculated, confidence_level)\n\n ShapiroResult = namedtuple('ShapiroResult', ('hypothesis_null','significance_level'))\n\n print(ShapiroResult(hypothesis_null, significance_level))","repo_name":"fwase/avaliacao-desempenho","sub_path":"activity_3/activity_3.py","file_name":"activity_3.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1245509254","text":"import numpy as np\nimport tifffile as tiff\n\nmask = np.array([[0.5,1,0.5],[1,0,1],[0.5,1,0.5]])\niter=100\nthreshold=40\n\ndef label_prop(img,iter=10000,threshold=50):\n\n y=img\n w,h = y.shape\n y1 = np.arange(w*h,dtype=np.uint32).reshape([w,h])*y\n\n #标签传播\n y_iter = y1.astype(dtype=np.uint32)\n for i in range(iter):\n print(i)\n temp = np.zeros([w+2,h+2,5]) #4000*15106*5\n temp[1:w+1,1:h+1,0] = y_iter\n temp[0:w,1:h+1,1] = y_iter\n temp[2:w+2,1:h+1,2] = y_iter\n temp[1:w+1,2:h+2,3] = y_iter\n temp[1:w+1,0:h,4] = y_iter\n\n temp = np.max(temp,axis=2)\n temp_mask = temp[1:w+1,1:h+1]*y\n if (temp_mask==y_iter).all():\n break\n else:\n y_iter = temp_mask\n\n #删去连通数小于阈值的区域\n unique, counts = np.unique(y_iter,return_counts=True)\n delete_id = unique[counts<=threshold]\n for i in delete_id:\n y_iter[y_iter==i]=0\n y_iter[y_iter>0]=1\n return y_iter.astype(np.uint8)\n\n#1.计算邻居8个点\ndef cal_neighbor(i,j):\n # global a\n neighbor = a[i-1:i+2,j-1:j+2]\n if a[i,j]==1:\n if np.sum(neighbor*mask)<=2:\n return 0\n # else:\n # if np.sum(neighbor*mask)>=3.5:\n # return 1\n return a[i,j]\n\ndef label_pruning(img):\n n, m = img.shape\n for i in range(n):\n for j in range(m):\n if img[i, j] == 1:\n print(i, j)\n q = [(i, j)]\n p = 0\n f = True\n while p < len(q):\n x, y = q[p]\n p += 1\n if x + 1 < n and (x + 1, y) not in q and img[x + 1, y] == 1:\n q.append((x + 1, y))\n if x - 1 >= 0 and (x - 1, y) not in q and img[x - 1, y] == 1:\n q.append((x - 1, y))\n if y + 1 < m and (x, y + 1) not in q and img[x, y + 1] == 1:\n q.append((x, y + 1))\n if y - 1 >= 0 and (x, y - 1) not in q and img[x, y - 1] == 1:\n q.append((x, y - 1))\n # if x + 1 < n and y + 1 < m and (x + 1, y + 1) not in q and img[x + 1, y + 1] == 1:\n # q.append((x + 1, y + 1))\n # if x + 1 < n and y - 1 >= 0 and (x + 1, y - 1) not in q and img[x + 1, y - 1] == 1:\n # q.append((x + 1, y - 1))\n # if x - 1 >= 0 and y + 1 < m and (x - 1, y + 1) not in q and img[x - 1, y + 1] == 1:\n # q.append((x - 1, y + 1))\n # if x - 1 >= 0 and y - 1 >= 0 and (x - 1, y - 1) not in q and img[x - 1, y - 1] == 1:\n # q.append((x - 1, y - 1))\n if len(q) > 5:\n f = False\n break\n if f:\n for x, y in q:\n img[x, y] = 0\n return img\n\nif __name__ == '__main__':\n\n res = label_prop(tiff.imread(\"D:/数据/天池/tif/out_11_10_drop.tiff\"))\n tiff.imsave(\"D:/数据/天池/tif/out_11_11_LP_1.tiff\",res)\n\n y = res\n w = np.shape(y)[0]\n h = np.shape(y)[1]\n print([w+2,h+2])\n # global a\n a = np.zeros([w+2,h+2])\n b = np.zeros([w+2,h+2])\n\n a[1:w+1,1:h+1] = y\n for m in range(10):\n for i in range(1,w+1):\n print(i)\n for j in range(1,h+1):\n b[i,j] = cal_neighbor(i,j)\n\n out = b[1:w+1,1:h+1]\n print(out.shape)\n tiff.imsave(\"D:\\\\数据\\\\天池\\\\tif\\\\out_11_11_D_LP.tiff\",out.astype(np.uint8))","repo_name":"xvlvzhu/Satellite_Imagery_Change_Detection","sub_path":"util/denoise.py","file_name":"denoise.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"}
+{"seq_id":"73872794113","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom .models import Profile\n\nclass SignUpForm(UserCreationForm):\n\temail = forms.CharField(max_length=254, required=True, widget=forms.EmailInput())\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('username', 'email', 'first_name', 'last_name', 'password1', 'password2')\n\n\tdef clean(self):\n\t\tcleaned_data = super(SignUpForm, self).clean()\n\t\tusername = cleaned_data.get('username')\n\t\tif username and User.objects.filter(username__iexact=username).exists():\n\t\t self.add_error('username', 'A user with that username already exists.')\n\t\treturn cleaned_data\n\n\nclass UserForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('email', 'first_name', 'last_name')\n\n\nclass ProfileForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Profile\t\t\n\t\tfields = ('bio', 'website', 'country', 'date_of_birth', 'phone_number', 'user_type', 'pro_type')\n\t\twidgets = {\n\t\t\t'bio':forms.Textarea(attrs={'placeholder': 'Tell people about yourself', 'rows':20, 'cols':100}),\n\t\t\t'website':forms.TextInput(attrs={'placeholder': 'Your website address'}),\t\t\t\n\t\t\t'phone_number':forms.TextInput(attrs={'placeholder': 'Yourt telephone number'}),\n\t\t\t'alert_new_subscribe':forms.CheckboxInput(attrs={'name': 'alert_new_sub', 'hidden': 'hidden', 'id':'alert_new_sub'}), \n\t\t\t'alert_new_item':forms.CheckboxInput(attrs={'name': 'alert_new_item', 'hidden': 'hidden', 'id':'alert_new_item'}),\n\t\t\t'alert_suggested_boards':forms.CheckboxInput(attrs={'name': 'alert_sug_board', 'hidden': 'hidden', 'id':'alert_sug_board'}),\n\t\t\t'date_of_birth':forms.TextInput(attrs={'data-toggle':'datepicker'}),\n\t\t}\n\nclass ProfileImageForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Profile\t\t\n\t\tfields = ('picture',)\n\n\nclass ChangeBackgroundForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Profile\t\t\n\t\tfields = ['background',]\n\n\nclass PrivacyForm(forms.ModelForm):\t\n\tclass Meta:\n\t\tmodel = Profile\t\t\n\t\tfields = ('global_privacy', 'user')\n\t\twidgets = {'user':forms.HiddenInput()}\n\n\nclass SettingsForm(forms.ModelForm):\t\n\tclass Meta:\n\t\tmodel = Profile\t\t\n\t\tfields = ('alert_new_subscribe', 'alert_new_item', 'alert_suggested_boards', 'user')\n\t\twidgets = {\n\t\t\t'alert_new_subscribe':forms.CheckboxInput(attrs={'hidden': 'hidden', 'id':'alert_new_sub'}), \n\t\t\t'alert_new_item':forms.CheckboxInput(attrs={'hidden': 'hidden', 'id':'alert_new_item'}),\n\t\t\t'alert_suggested_boards':forms.CheckboxInput(attrs={'hidden': 'hidden', 'id':'alert_sug_board'}),\n\t\t\t'user':forms.HiddenInput(),\n\t\t}\n\n\nclass UpdateSocial(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Profile\n\t\tfields = ['social_instagram', 'social_facebook', 'social_twitter', 'social_youtube',\n\t\t'social_twitch', 'social_vimeo', 'social_pinterest', 'social_weibo', 'social_vk', 'user']\n\t\twidgets = {\n\t\t\t'user':forms.HiddenInput(),\n\t\t\t'social_instagram':forms.TextInput(attrs={'placeholder': 'Your Instagram username'}),\n\t\t\t'social_facebook':forms.TextInput(attrs={'placeholder': 'Your Facebook username'}),\n\t\t\t'social_twitter':forms.TextInput(attrs={'placeholder': 'Your Twitter username'}),\n\t\t\t'social_youtube':forms.TextInput(attrs={'placeholder': 'Your Youtube username'}),\n\t\t\t'social_twitch':forms.TextInput(attrs={'placeholder': 'Your Twitch username'}),\n\t\t\t'social_vimeo':forms.TextInput(attrs={'placeholder': 'Your Vimeo username'}),\n\t\t\t'social_pinterest':forms.TextInput(attrs={'placeholder': 'Your Pinterest username'}),\n\t\t\t'social_weibo':forms.TextInput(attrs={'placeholder': 'Your Weibo username'}),\n\t\t\t'social_vk':forms.TextInput(attrs={'placeholder': 'Your VK username'}),\n\t\t}","repo_name":"faierbol/wantbrddev","sub_path":"user/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20571835706","text":"import numpy as np\n\nfrom PIL import Image\n\nimport math\n\nfrom tqdm import tqdm as ProgressBar\n\n# * Constant\n\nIMG_HEIGHT = 1080\nIMG_WIDTH = 1080\n\nS_POSITION_H = 540\nS_POSITION_W = 540\n\nWAVE_LENGTH = 10\nWAVE_SPEED = 3\nWAVE_AMPITUDE = 255\nSTARTING_PHASE = 0\n\nPI = math.pi\n\npicture = np.zeros((IMG_WIDTH, IMG_HEIGHT), np.uint8)\n\n\ndef Wave(d, t):\n k = 2 * PI / WAVE_LENGTH\n omega = 2 * PI * WAVE_SPEED / WAVE_LENGTH\n return WAVE_AMPITUDE * math.sin(k * d - omega * t + STARTING_PHASE)\n\n\ndef distance(x, y):\n return math.sqrt((x - S_POSITION_H) ** 2 + (y - S_POSITION_W) ** 2)\n\n\nfor x in ProgressBar(range(IMG_WIDTH)):\n for y in range(IMG_HEIGHT):\n picture[x][y] = Wave(distance(x, y), 0)\n\nPictureObj = Image.fromarray(picture)\n\nPictureObj.save(\"./generated/FirstWave.png\")\n","repo_name":"Leomotors/Wave-Interference-Simulation","sub_path":"test/Single_Wave.py","file_name":"Single_Wave.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"34686970611","text":"import cv2 as cv\n\n# read images\n# img = cv.imread('Resources/Photos/cat_large.jpg')\n# cv.imshow('cat', img)\n# cv.waitKey(0) # waits for a key press, 0 means indefinitely\n\n# read video\ncapture = cv.VideoCapture('Resources/Videos/dog.mp4')\n\nwhile True:\n isTrue, frame = capture.read()\n if not isTrue: break\n cv.imshow('Video', frame)\n\n if cv.waitKey(20) & 0xFF == ord('d'):\n break\n\ncapture.release()\ncv.destroyAllWindows()\n\n\n\n\n\n\n\n\n","repo_name":"BrianxYu/openCV","sub_path":"1.read.py","file_name":"1.read.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13753110858","text":"import gym\nfrom drl.environment.atari_wrappers import make_atari, wrap_deepmind, FrameStack\nimport numpy as np\nimport pygame\nfrom pygame import display, time, surfarray\n\ndef grayscale(img):\n return np.stack((img,) * 3, axis=-1)\n\npygame.init()\npygame.font.init()\nfont = pygame.font.Font(None, 36)\n\nclock = time.Clock()\nFPS = 60\nrunning = True\nframe = 84\nk = 4\nscale = 4\nw = frame * k * scale\nh = frame * scale\nscreen = display.set_mode((w, h))\n\nenv = make_atari('BreakoutNoFrameskip-v4')\nenv = wrap_deepmind(env,\n episode_life=True,\n clip_rewards=False,\n frame_stack=False,\n scale=False)\nobs_shape = env.observation_space.shape\nenv = FrameStack(env, 4)\n\nobs = env.reset()\nwhile running:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n display.set_caption(f'{clock.get_fps():.2f}')\n \n obs, reward, done, info = env.step(env.action_space.sample())\n if done:\n obs = env.reset()\n obs = np.asarray(obs)\n xoffset = 0\n for img in obs:\n img = surfarray.make_surface(grayscale(img))\n img = pygame.transform.scale(img, (frame * scale, frame * scale))\n screen.blit(img, (xoffset, 0))\n xoffset += frame * scale\n # surfarray.blit_array(screen, obs[-1])\n \n # screen.fill((255, 255, 255))\n # text = font.render(\"fps\", 1, (10, 10, 10))\n # textpos = text.get_rect(centerx=screen.get_width() / 2)\n # screen.blit(text, textpos)\n \n display.flip()\n # time.delay(100)\n","repo_name":"wumo/drl","sub_path":"drl/environment/test/atari_images_test.py","file_name":"atari_images_test.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11361173617","text":"import config\nimport sys\nimport wsgian\n\nurls = [\t\t\n\t{'pattern': '/', 'module': 'wificode'},\n\t{'pattern': '/pdf/{ssid}/{ssid_pw}', 'module': 'wificode', 'action': 'pdf'},\n]\n\ntry:\n\tconfig = config.config({})\nexcept ImportError:\n\tprint('config not found: run \"python config.py\" then edit site_config.py')\n\tsys.exit(1)\n\napp = wsgian.App(urls, config)\n\nif __name__ == '__main__':\n\twsgian.quickstart(app, config['HTTP_ADDRESS'], int(config['HTTP_PORT']))\n\n","repo_name":"gieffetag/wificode","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3107965783","text":"#\n\nfrom trainClass import trainClass\nfrom models import modelClass\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn import model_selection, preprocessing\nimport xgboost as xgb\nfrom xgboost.sklearn import XGBClassifier\nimport lightgbm as lgb\n\nfrom time import time\n#import seaborn as #sns\n\nfrom sklearn import cross_validation, metrics\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.cluster import KMeans\n\nfrom statsmodels.graphics.gofplots import qqplot_2samples\nfrom sklearn.model_selection import KFold\n\nfrom scipy import stats\n\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import rcParams\nrcParams['figure.figsize'] = 12, 4\n\n#from catboost import CatBoostClassifier\n#from catboost import CatBoostRegressor\n\n\n#import seaborn as sns\nimport warnings\n\ndef fxn():\n warnings.warn(\"deprecated\", DeprecationWarning)\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n fxn()\n#\n# liner regression model\n#\nfrom sklearn import linear_model\n\n#\n# Aug. 4th, LightGBM + 2 x XGBoost => 0.0645426\n# Stack RidgeModel alpha 0.1\n\n# Aug. 6th LightGBM + 2 x XGBoost + catbost => 0.065xxx\n# SGDRegressor(penalty=\"elasticnet\",loss=\"huber\")\n# result much worse ....\n\n\n\n# target columns name\ntarget = \"logerror\"\ngeoColumns = [\"latitude\",\"longitude\"]\n\nmodelCls = modelClass()\n\ndef showMAE(y_true,y_pred):\n print(\"y_predict (target) shape \", y_pred.shape)\n residual = y_true - y_pred\n\n #sns.distplot(residual)\n plt.show()\n\n print(\"Mean:%.6f Std:%.6f\" % ( np.mean(residual), np.std(residual)) )\n loss = metrics.mean_absolute_error(y_true,y_pred)\n print(\"MAE : %.7f\" % loss)\n\n\ndef showMSE(y_test,y_pred):\n print(\"y_predict (target) shape \", y_pred.shape)\n residual = y_test - y_pred\n #train_df[\"residual\"] = residual\n\n loss = metrics.mean_squared_error(y_test,y_pred)\n print(\"RMSE : %.7f\" % np.sqrt( loss ))\n print(\"MSE : %.7f\" % loss)\n\n\n\n\n\ndef get_features1(df):\n #df.loc[:,\"transactiondate\"] = pd.to_datetime(df[\"transactiondate\"])\n df = df.assign( transactiondate = lambda x: pd.to_datetime(x.transactiondate) )\n df = df.assign( transactiondate_year = lambda x: x.transactiondate.dt.year )\n df = df.assign( Month = lambda x: x.transactiondate.dt.month )\n df = df.assign( transactiondate = lambda x: x.transactiondate.dt.quarter )\n df = df.fillna(-1.0)\n return df\n\n\n\ndef MAE(y_true,y_pred):\n mse = metrics.mean_absolute_error(y_true, y_pred)\n #print 'MAE: %.8f' % mse\n return mse\n\ndef two_score(y_true,y_pred):\n score = MAE(y_true,y_pred) #set score here and not below if using MSE in GridCV\n return score\n\ndef two_scorer():\n return metrics.make_scorer(two_score, greater_is_better=False)\n # change for false if using MSE\n\ndef pahse1_gridSearch(x_train, y_train):\n\n estimator = lgb.LGBMRegressor(num_leaves=512)\n\n # run randomized search\n\n param_grid = {\n 'learning_rate': stats.uniform() ,\n 'num_leaves': [512],\n 'max_depth': stats.randint(low=3,high=10),\n 'max_bin' : stats.randint(low=10,high=70),\n 'boosting_type' : ['gbdt'],\n 'objective' : ['regression'],\n 'metric' : ['l1'],\n 'bagging_fraction' : stats.uniform(),\n 'bagging_freq' : stats.randint(low=10,high=100),\n 'sub_feature' : stats.uniform(),\n 'min_data' : stats.randint(low=100,high=1000),\n 'min_hessian' : stats.uniform(),\n 'feature_fraction_seed' : [2],\n 'bagging_seed' : [3]\n\n }\n n_iter_search = 20\n random_search = RandomizedSearchCV(estimator, param_distributions=param_grid,\n n_iter=n_iter_search,scoring = two_scorer(),\n error_score=0.01 )\n start = time()\n print(\"-\"*30)\n\n random_search.fit(x_train, y_train)\n print(\"RandomizedSearchCV took %.2f seconds for %d candidates\"\n \" parameter settings.\" % ((time() - start), n_iter_search))\n #report(random_search.cv_results_)\n\n print(\"+\")\n print(\" Best parameters set found on development set:\")\n print(random_search.best_params_)\n\n return random_search\n\ndef report(results, n_top=3):\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_score'] == i)\n for candidate in candidates:\n print(\"Model with rank: {0}\".format(i))\n print(\"Mean validation score: {0:.3f} (std: {1:.3f})\".format(\n results['mean_test_score'][candidate],\n results['std_test_score'][candidate]))\n print(\"Parameters: {0}\".format(results['params'][candidate]))\n print(\"\")\n\n\n\ndef gridSearchCVL_LGB(GridSearchSW=False,add_data_sw=0):\n\n trainCls = trainClass(test=True)\n\n params_list = modelCls.getModelParams()\n params_list = [ v for (k,v) in params_list.items() if \"lightgbm\" in k ]\n\n kaggle_data = trainCls.kaggleSumbmitData()\n x_train, y_train, x_test = trainCls.makeTrainDataForLightGBM(add_data_sw)\n\n params_list = modelCls.getModelParams()\n params_list = [ v for (k,v) in params_list.items() if \"lightgbm\" in k ]\n row_num, feat_num = x_train.shape\n _n_splits = 5\n kf = KFold(n_splits=_n_splits, shuffle=True,random_state=42)\n print(\" number of folds .... 5\")\n print(\"Sample test shape\", x_test.shape)\n base_models_length = len(params_list)\n\n params = params_list[0]\n\n test_dates = ['2016-10-01','2016-11-01','2016-12-01','2017-10-01','2017-11-01','2017-12-01']\n test_columns = ['201610','201611','201612','201710','201711','201712']\n S_test_i = np.zeros( (_n_splits, x_test.shape[0], len(test_columns)) )\n\n for j, (train_idx, test_idx) in enumerate( kf.split(x_train) ):\n X_train = x_train[train_idx]\n y_true = y_train[train_idx]\n X_holdout = x_train[test_idx]\n y_holdout = y_train[test_idx]\n\n print(\"-\"*30)\n print(\" FOLD : %d \" % j)\n print(\" train shape ......\")\n print(X_train.shape,y_true.shape)\n\n if not GridSearchSW:\n d_train = lgb.Dataset(X_train, label=y_true)\n print(\"\\nFitting LightGBM model ...\")\n clf = lgb.train(params, d_train, 430)\n else:\n print(\" RandomizedSearchCV ...\")\n clf = pahse1_gridSearch(X_train, y_true)\n\n y_holdout_pred = clf.predict(X_holdout)\n loss = metrics.mean_absolute_error(y_holdout,y_holdout_pred)\n print(\"-\"*30)\n print(\"\")\n print(\" MAE on training Validation (X_holdout) .. %.7f\" % loss)\n\n for i in range(len(test_dates)):\n print(\"-\"*30)\n print(\" set test dates .....\", test_columns[i])\n YYYY = float(test_columns[i][:4])\n MM = float(test_columns[i][-2:])\n x_test[:,-2] = YYYY\n x_test[:,-1] = MM\n print(\"valuation for x_test .... \")\n y_hat = clf.predict(x_test)\n S_test_i[j,:,i] = y_hat\n\n cv_mean_result = S_test_i.mean(0)\n print(\"** cv mean result .... **\")\n print(cv_mean_result.shape)\n y_hat_true = kaggle_data[\"201610\"].values\n y_hat0 = cv_mean_result[:,0]\n loss = metrics.mean_absolute_error(y_hat_true,y_hat)\n print(\"-\"*30)\n print(\"\")\n print(\" MAE on Kaggle Submit data.. %.7f\" % loss)\n\n\ndef makeSubmit(y_hat,properties):\n\n y_pred = []\n print(\"-\"*30)\n print(\"length of y_hat ...\", len(y_hat))\n\n for i,predict in enumerate(y_hat):\n y_pred.append(str(round(predict,4)))\n y_pred=np.array(y_pred)\n\n\n output = pd.DataFrame({'ParcelId': properties['parcelid'].astype(np.int32),\n '201610': y_pred, '201611': y_pred, '201612': y_pred,\n '201710': y_pred, '201711': y_pred, '201712': y_pred})\n # set col 'ParceID' to first col\n cols = output.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n output = output[cols]\n from datetime import datetime\n\n print(\" create file ....\")\n output.to_csv('sub_stack{}.csv'.format(datetime.now().strftime('%Y%m%d_%H%M%S')), index=False)\n print(\" done ....\")\n\n\n\ndef main():\n\n gridSearchCVL_LGB(False,2) # additional features on Build Year\n gridSearchCVL_LGB(False,1) # additional features\n gridSearchCVL_LGB()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"scrambleegg7/zillow","sub_path":"lightGBM_GridSearch.py","file_name":"lightGBM_GridSearch.py","file_ext":"py","file_size_in_byte":8269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6066134578","text":"import numpy as np\nimport fma.utils as utils\nimport pandas as pd\nimport algorithms.util.PrincipalComponentAnalysis as pca\n\n\nclass FmaDataLoader:\n\n SUBSETS = ['small', 'medium', 'large']\n\n FEATURES = ['chroma_stft', 'chroma_cqt', 'chroma_cens', 'tonnetz', 'mfcc', 'rmse', 'zcr', 'spectral_centroid',\n 'spectral_bandwidth', 'spectral_contrast', 'spectral_rolloff']\n\n ATTRIBUTES = [\"kurtosis\", \"mean\", \"min\", \"max\", \"skew\", \"median\", \"std\"]\n\n GENRES = ['Rock', 'Electronic', 'Experimental', 'Hip-Hop', \"Folk\", \"Instrumental\", \"Pop\", \"International\",\n \"Classical\", \"Jazz\", \"Country\", \"Soul-RnB\", \"Spoken\", \"Blues\", \"Easy Listening\", \"Old-Time / Historic\"]\n\n def __init__(self, path_prefix='.'):\n self.__load_data(path_prefix)\n\n def __load_data(self, path_prefix):\n self.tracks = utils.load(path_prefix + \"/tracks.csv\")\n self.genres = utils.load(path_prefix + \"/genres.csv\")\n self.features = utils.load(path_prefix + \"/features.csv\")\n\n def load_specific_data(self, f_size, f_feature='all', f_genre='all', f_attributes='all'):\n small = self.tracks['set', 'subset'] <= f_size\n\n if f_genre != 'all' and f_feature != 'all' and f_attributes != 'all':\n genre = self.tracks['track', 'genre_top'] == f_genre\n X = self.features.loc[small & genre, f_feature][f_attributes]\n y = self.tracks.loc[small & genre, ('track', 'genre_top')]\n\n elif f_feature != 'all' and f_attributes != 'all':\n X = self.features.loc[small, f_feature][f_attributes]\n y = self.tracks.loc[small, ('track', 'genre_top')]\n\n elif f_genre != 'all':\n genre = self.tracks['track', 'genre_top'] == f_genre\n X = self.features.loc[small & genre]\n y = self.tracks.loc[small & genre, ('track', 'genre_top')]\n\n else:\n X = self.features.loc[small]\n y = self.tracks.loc[small, ('track', 'genre_top')]\n\n return X, y.values\n\n def load_split_data(self):\n small = self.tracks['set', 'subset'] <= 'small'\n\n train = self.tracks['set', 'split'] == 'training'\n val = self.tracks['set', 'split'] == 'validation'\n test = self.tracks['set', 'split'] == 'test'\n\n y_train = self.tracks.loc[small & train, ('track', 'genre_top')]\n y_test = self.tracks.loc[small & test, ('track', 'genre_top')]\n X_train = self.features.loc[small & train, 'mfcc']\n X_test = self.features.loc[small & test, 'mfcc']\n\n return X_train, y_train, X_test, y_test\n\n\ndef main():\n fma = FmaDataLoader('./data')\n # X, y = fma.load_specific_data(f_size=fma.SUBSETS[0], f_feature=fma.FEATURES[0], f_attributes=fma.ATTRIBUTES[0])\n X_kurtosis, _ = fma.load_specific_data(f_size=fma.SUBSETS[0], f_feature='mfcc', f_attributes='kurtosis')\n X_mean, _ = fma.load_specific_data(f_size=fma.SUBSETS[0], f_feature='mfcc', f_attributes='mean')\n X_median, _ = fma.load_specific_data(f_size=fma.SUBSETS[0], f_feature='mfcc', f_attributes='median')\n X_std, y = fma.load_specific_data(f_size=fma.SUBSETS[0], f_feature='mfcc', f_attributes='std')\n\n X = np.column_stack((X_kurtosis, X_mean, X_median, X_std, y))\n print(X.shape, X[:, :-1].shape)\n\n pcaObj = pca.PrincipalComponentAnalysis()\n P, num_of_pcs = pcaObj.pca(np.array(X[:, :-1], dtype=float))\n\n df = pd.DataFrame(P)\n df.to_csv('./data/mfcc_specific_features_pca.csv')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kakella/machine-learning-ucsc-extension-team-project","sub_path":"fma/FmaDataLoader.py","file_name":"FmaDataLoader.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"1541132249","text":"from rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom .models import *\nfrom rest_framework.response import Response\nfrom rest_framework.authentication import TokenAuthentication,SessionAuthentication\nfrom api.pay import PayClass\nfrom rest_framework import generics\nfrom .models import Wallet\nfrom .serializers import WalletSerializer,TransactionSerializer,WithdrawalSerializer,TransferSerializer\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\nfrom account.models import UserProfile\nfrom django.utils import timezone\n\n#Basic functions\ndef is_user_online(user_id):\n user = User.objects.get(id=user_id)\n user_profile, created = UserProfile.objects.get_or_create(user=user)\n user_profile = UserProfile.objects.filter(user_id=user_id, is_online=True).first()\n if user_profile:\n return True\n else:\n return False\n \ndef send_offline_message(sender, recipient, message):\n offline_message = OfflineMessage.objects.create(\n sender=sender,\n recipient=recipient,\n message=message,\n timestamp=timezone.now()\n )\n\n#Wallet details and Top-up\nclass WalletDetailView(generics.RetrieveAPIView):\n serializer_class = WalletSerializer\n permission_classes = [IsAuthenticated]\n authentication_classes = [SessionAuthentication,TokenAuthentication]\n def get_object(self):\n user_wallet = Wallet.objects.get(user=self.request.user)\n return user_wallet \nclass TransactionListView(generics.ListAPIView):\n serializer_class = TransactionSerializer\n permission_classes = [IsAuthenticated]\n authentication_classes = [SessionAuthentication,TokenAuthentication]\n def get_queryset(self):\n user_transactions = Transaction.objects.filter(user=self.request.user)\n return user_transactions\nclass TopUpView(APIView):\n permission_classes = [IsAuthenticated]\n authentication_classes = [SessionAuthentication,TokenAuthentication]\n \n def post(self, request):\n try:\n wallet = Wallet.objects.get(user=request.user)\n response_data = self.momo_pay(request)\n reference = response_data['ref']\n print(reference)\n wallet.reference = reference\n wallet.save()\n except Wallet.DoesNotExist:\n response_data = self.momo_pay(request)\n reference = response_data.get('ref')\n wallet = Wallet.objects.create(user=request.user, reference=reference)\n\n # Verify Momo transaction\n wallet = Wallet.objects.get(user=request.user)\n txn = wallet.reference\n verification = self.verify_momo_transaction(txn)\n verified_amount = verification['amount']\n\n # Update wallet balance and save\n wallet.balance += int(verified_amount)\n wallet.save()\n\n # Create transaction record\n transaction = Transaction.objects.create(\n user=request.user,\n transaction_type='top_up',\n amount=verified_amount\n )\n transaction.save()\n\n return Response({'message': 'Wallet topped up successfully'})\n\n def verify_momo_transaction(self, txn):\n response_data = PayClass.verifymomo(txn)\n return response_data\n \n def momo_pay(self, request):\n amount = request.data.get(\"amount\")\n currency = request.data.get(\"currency\")\n txt_ref = request.data.get(\"txt_ref\")\n phone_number = request.data.get(\"phone_number\")\n payer_message = request.data.get(\"payer_message\")\n \n response_data = PayClass.momopay(amount, currency, txt_ref, phone_number, payer_message)\n\n return response_data\n\n\nclass WithdrawalView(generics.CreateAPIView):\n serializer_class = WithdrawalSerializer\n permission_classes = [IsAuthenticated]\n authentication_classes = [SessionAuthentication,TokenAuthentication]\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n user_wallet = Wallet.objects.get(user=request.user)\n\n # Extract data from serializer\n amount = serializer.validated_data['amount']\n currency = serializer.validated_data['currency']\n phone_number = serializer.validated_data['phone_number']\n txt_ref = serializer.validated_data['txt_ref']\n\n if user_wallet.balance >= int(amount):\n response_data = PayClass.request_to_transfer(amount, currency, txt_ref, phone_number)\n reference_id = response_data['ref']\n response_data = PayClass.check_disbursement_status(reference_id)\n\n withdrawal_amount = response_data['amount'] # Store the withdrawal amount for response\n\n user_wallet.balance -= int(withdrawal_amount)\n user_wallet.save()\n\n # Create a Transaction record\n transaction = Transaction.objects.create(\n user=request.user,\n transaction_type='withdrawal',\n amount=withdrawal_amount\n )\n transaction.save()\n\n return Response({\"message\": \"Withdrawal successful\"})\n else:\n return Response({\"message\": \"Insufficient balance\"},status=400)\n\n\n#Inter Wallet Transfer with real-time updates on both ends\nclass TransferFromWalletView(generics.CreateAPIView):\n permission_classes = [IsAuthenticated]\n authentication_classes = [SessionAuthentication,TokenAuthentication]\n serializer_class = TransferSerializer\n \n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n \n # Extract data from serializer\n amount = serializer.validated_data['amount']\n receiver_username = serializer.validated_data['reciever']\n \n #Get Sender's and receiver's wallet\n receiver = User.objects.get(username=receiver_username)\n sender_wallet = Wallet.objects.get(user=request.user)\n #Some logic\n if receiver.username == request.user.username:\n return Response({\"message\":\"sender and receiver cannot be equal\"}, status=400)\n else:\n pass\n #Take amount from sender's balance\n sender_wallet.balance -= int(amount)\n sender_wallet.save()\n # Create transaction record\n transaction = Transaction.objects.create(\n user=request.user,\n transaction_type=f'Transfer to {receiver_username}',\n amount=amount,\n receiver=receiver_username\n )\n transaction.save()\n # Notify sender and receiver about transaction initiation\n sender_channel_layer = get_channel_layer()\n receiver_channel_layer = get_channel_layer()\n\n async_to_sync(sender_channel_layer.group_send)(\n f\"user_{request.user.id}_group\",\n {\"type\": \"transaction.initiation\"}\n )\n async_to_sync(receiver_channel_layer.group_send)(\n f\"user_{receiver.id}_group\",\n {\"type\": \"transaction.initiation\"}\n )\n if is_user_online(user_id=receiver.id):\n pass\n else:\n send_offline_message(sender=request.user, recipient=receiver, message='Transaction initiated')\n\n return Response({\"message\": \"Transfer initiated\"})\n \nclass VerificationView(APIView):\n \n def get(self, request, *args, **kwargs):\n transaction = Transaction.objects.filter(user=request.user).last()\n receiver = User.objects.get(username=transaction.receiver)\n amount = transaction.amount\n self.send_verification_notification(request, receiver, amount)\n return Response({\"message\": \"Verification notification sent\"})\n \n def send_verification_notification(self, request, receiver, amount):\n \n #Get receiver's wallet\n receiver_wallet, created = Wallet.objects.get_or_create(user=receiver)\n \n #Add Money to receiver's balance\n receiver_wallet.balance += int(amount)\n receiver_wallet.save()\n \n #Create transaction for receiver\n transaction = Transaction.objects.create(\n user=receiver,\n transaction_type=f'Money_Received',\n amount=amount\n )\n transaction.save()\n # Notify sender and receiver about transaction verification\n sender_channel_layer = get_channel_layer()\n receiver_channel_layer = get_channel_layer()\n\n async_to_sync(sender_channel_layer.group_send)(\n f\"user_{request.user.id}_group\",\n {\"type\": \"transaction.verification\"}\n )\n async_to_sync(receiver_channel_layer.group_send)(\n f\"user_{receiver.id}_group\",\n {\"type\": \"transaction.verification\"}\n )\n #Save User's message if offline\n if is_user_online(user_id=receiver.id):\n pass\n else:\n send_offline_message(sender=request.user, recipient=receiver, message='Transaction Verified')\n \nclass ReversalView(APIView):\n \n def get(self, request, *args, **kwargs):\n transaction = Transaction.objects.filter(user=request.user).last()\n receiver = User.objects.get(username=transaction.receiver)\n amount = transaction.amount\n self.send_reversal_notification(request, receiver, amount)\n return Response({\"message\": \"Reversal notification sent\"})\n \n def send_reversal_notification(self, request, receiver, amount):\n \n #Get Sender's wallet\n sender_wallet = Wallet.objects.get(user=request.user)\n \n #Update Sender's wallet\n sender_wallet.balance += int(amount)\n sender_wallet.save()\n \n #Create record for sender\n transaction = Transaction.objects.create(\n user=request.user,\n transaction_type=f'Money_Reversed',\n amount=amount\n )\n transaction.save()\n # Notify sender and receiver about transaction reversal\n sender_channel_layer = get_channel_layer()\n receiver_channel_layer = get_channel_layer()\n\n async_to_sync(sender_channel_layer.group_send)(\n f\"user_{request.user.id}_group\",\n {\"type\": \"transaction.reversal\"}\n )\n async_to_sync(receiver_channel_layer.group_send)(\n f\"user_{receiver.id}_group\",\n {\"type\": \"transaction.reversal\"}\n )\n if is_user_online(user_id=receiver.id):\n pass\n else:\n send_offline_message(sender=request.user, recipient=receiver, message='Transaction Reversed')\n \n ","repo_name":"DeAgusco/MTN-hackathon-backend","sub_path":"backend/wallet_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13419120458","text":"import librosa\nimport scipy.signal as signal\nimport numpy as np\n\naudio_sample, sampling_rate = librosa.load(\"akmu.wav\", sr = None)\n\nS = np.abs(librosa.stft(audio_sample, n_fft=1024, hop_length=512, win_length = 1024, window=signal.hann))\npitches, magnitudes = librosa.piptrack(S=S, sr=sampling_rate)\n\nshape = np.shape(pitches)\nnb_samples = shape[0]\nnb_windows = shape[1]\n\n# FFT 결과를 plot\nimport matplotlib.pyplot as plt\nimport librosa.display\n\n#멜 스펙토그램\nD = librosa.feature.melspectrogram(S=S, sr=sampling_rate)\nD_dB = librosa.power_to_db(D, ref=np.max)\n\nplt.figure(figsize=(12, 4))\nlibrosa.display.specshow(D_dB)\nplt.ylabel('mel')\nplt.xlabel('time')\nplt.title('Mel-frequency spectrogram')\nplt.colorbar()\nplt.tight_layout()\nplt.show()","repo_name":"Damnun/analysis_Pitch_Accuracy_Librosa","sub_path":"python/mel.py","file_name":"mel.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"31335825041","text":"\"\"\"\nparty 3 - E3\ncreate a method to allow the user to play against alpha\n\"\"\"\nimport chess\nfrom alpha import gagnantAmiAlphaBeta_user\n\n\n'''\nrole : reads a move given by the user\ninput: b: the board\noutput : the move\n'''\ndef getMove(b):\n print(\"choose on of the legal moves\")\n print(\"legal moves\")\n print([m for m in b.generate_legal_moves()])\n move = input('Enter your move: ')\n move = chess.Move.from_uci(move)\n while not (b.is_legal(move)):\n move = input('Enter your move: ')\n move = chess.Move.from_uci(move)\n return move\n\n\n# ---------human vs Alpha------------------\n'''\nrole: plays the turn of the human\ninput: b: the chess board ; limit: the limit of the depth;\n AlphaLastMove: the move made by alpha in the turn before the last turn\n counterAlpha : a counter used to only memorise one move of alpha each two turns\noutput: void , calls matchHumanVsAlpha_alpha to give the turn to alpha\n'''\ndef matchHumanVsAlpha_human(b, limit, AlphaLastMove, counterAlpha=0):\n print(\"----------\")\n print(b)\n if b.is_game_over():\n print(\"Resultat : \", b.result())\n return\n move = getMove(b)\n b.push(move)\n matchHumanVsAlpha_alpha(b, limit, AlphaLastMove, counterAlpha)\n b.pop()\n\n\n'''\nrole: plays the turn of the AI using alpha\ninput: b: the chess board ; limit: the limit of the depth;\n AlphaLastMove: the move made by alpha in the turn before the last turn\n counterAlpha : a counter used to only memorise one move of alpha each two turns\noutput: void , calls matchHumanVsAlpha_human to give the turn to alpha\n'''\ndef matchHumanVsAlpha_alpha(b, limit, AlphaLastMove, counterAlpha=0):\n print(\"----------\")\n print(b)\n if b.is_game_over():\n print(\"Resultat : \", b.result())\n return\n move = gagnantAmiAlphaBeta_user(b, limit)\n b.push(move)\n if counterAlpha % 2 == 0:\n matchHumanVsAlpha_human(b, limit, move, counterAlpha + 1)\n else:\n matchHumanVsAlpha_human(b, limit, AlphaLastMove, counterAlpha + 1)\n b.pop()\n\n\n'''\nrole: starts the match by calling matchHumanVsAlpha_human\ninput: b: the chess board ; limit: the limit of the depth;\noutput: void , calls matchHumanVsAlpha_human to give the turn to alpha\n'''\ndef matchHumanVsAlpha(b, limit):\n matchHumanVsAlpha_human(b, limit, None)\n","repo_name":"Abrahim-BAMATRAF/AI_chess","sub_path":"humanVsAlpha.py","file_name":"humanVsAlpha.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6505419517","text":"# 230309 22866 탑 보기\n# idx <= 100,000에 있는 건물이 양쪽으로 보이는 다른 건물의 개수와 가장 가까운 건물 번호 출력\n\nimport sys\n\ninput = sys.stdin.readline\n\nn = int(input())\nbuildings = list(map(int,input().split()))\n\n# 왼쪽부터 stack으로 보이는 건물 찾기\n# 오른쪽부터 stack으로 보이는 건물 찾기\n# 각 idx에 도착하면 보이는 건물들 입력\n# n*2\nanswer = [[] for _ in range(n)]\nstack = []\nnearestBuilding = [sys.maxsize]*n\nbuildingCount = [0]*n\n\nfor idx in range(n):\n building = buildings[idx]\n while stack and stack[-1][1] <= building:\n candidate = stack.pop()\n\n if stack:\n buildingCount[idx] += len(stack)\n nearestBuilding[idx] = min(nearestBuilding[idx],stack[-1][0],key=lambda x:abs(x-idx))\n stack.append([idx,building])\n\nstack = []\n\nfor idx in range(n-1,-1,-1):\n building = buildings[idx]\n while stack and stack[-1][1] <= building:\n candidate = stack.pop()\n if stack:\n buildingCount[idx] += len(stack)\n nearestBuilding[idx] = min(nearestBuilding[idx], stack[-1][0], key=lambda x: abs(x - idx))\n stack.append([idx,building])\n\n# 여기서 오버헤드 발생\n# 굳이 다 계산해논거에 min안해도 됨\n# 계산하면서 건물개수, 가까운 건물 구할수 있네\n# 어차피 스택이면 마지막 요소가 가장 가까운거니까..\nfor idx in range(len(answer)):\n print(buildingCount[idx], end=\" \")\n if nearestBuilding[idx] < sys.maxsize:\n print(nearestBuilding[idx]+1)\n else:\n print()\n\n# 휴.. 이게 골드3 적당하지 했다가 된통 뚜드려맞았네","repo_name":"seho27060/TIL","sub_path":"Problem-Sovling/2023-03/230309_Algorithm/BOJ_22866_탑-보기.py","file_name":"BOJ_22866_탑-보기.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23459685331","text":"import sys\r\nf,out = open(\"input\", \"r\"),open(\"output\", \"w\")\r\nT = int(f.readline())\r\ndef div(x,d):\r\n\ty=x/d\r\n\tif y!=int(y):\r\n\t\treturn int(y)+1\r\n\treturn int(y)\r\nfor t in range(T):\r\n\tD = int(f.readline())\r\n\tP = [int(w) for w in f.readline().split()]\r\n\tx = max(P)\r\n\tfor i in range(1,max(P)):\r\n\t\ty = 0\r\n\t\tfor j in [k for k in P if k>i]:\r\n\t\t\ty += div(j,i) - 1\r\n\t\tif y+i end:\r\n\t\t\treturn None\r\n\r\n\t\tmid = int((start + end) / 2)\r\n\t\t# print start, mid, end\r\n\r\n\t\tif (tabOfElems[mid] < searchedValue):\r\n\t\t\tstart = mid + 1\r\n\t\telif (tabOfElems[mid] > searchedValue):\r\n\t\t\tend = mid - 1\r\n\t\telse:\r\n\t\t\treturn mid\r\n\r\ndef bsRec(tabOfElems, searchedValue):\r\n\treturn bsRecInner(tabOfElems, searchedValue, 0, len(tabOfElems))\r\n\r\ndef bsRecInner(tabOfElems, searchedValue, start, end):\r\n\tif start >= end:\r\n\t\treturn None\r\n\r\n\tmid = int((start + end) / 2)\r\n\r\n\tif (searchedValue > tabOfElems[mid]):\r\n\t\treturn bsRecInner(tabOfElems, searchedValue, mid + 1, end)\r\n\telif (searchedValue < tabOfElems[mid]):\r\n\t\treturn bsRecInner(tabOfElems, searchedValue, start, mid)\r\n\telse:\r\n\t\treturn mid\r\n\r\n\r\ndef binarySearchVariantFindingMinimum(tabOfElems, start, end):\r\n\r\n\tmid = start + (end - start) / 2\r\n\r\n\tif (start > end):\r\n\t\treturn NOT_FOUND\r\n\r\n\tif(start == end):\r\n\t\treturn mid\r\n\r\n\tif (mid > start and tabOfElems[mid] < tabOfElems[mid - 1]):\r\n\t\treturn mid\r\n\r\n\tif(mid < end and tabOfElems[mid] > tabOfElems[mid + 1]):\r\n\t\treturn mid + 1\r\n\r\n\telif(tabOfElems[mid] < tabOfElems[end]):\r\n\t\treturn binarySearchVariantFindingMinimum(tabOfElems, start, end)\r\n\telse:\r\n\t\treturn binarySearchVariantFindingMinimum(tabOfElems, mid + 1, end)\r\n\r\ndef interpolationSearch(tabOfElems, searchedValue):\r\n\r\n\tstart = 0\r\n\tend = len(tabOfElems)\r\n\tend = end - 1\r\n\r\n\r\n\twhile (start < end):\r\n\t\tmid = start + ((end - start) / (tabOfElems[end] - tabOfElems[start])) * (searchedValue - tabOfElems[start])\r\n\r\n\t\tif (tabOfElems[mid] < searchedValue):\r\n\t\t\tstart = mid + 1\r\n\t\telif (searchedValue < tabOfElems[mid]):\r\n\t\t\tend = mid - 1\r\n\t\telse:\r\n\t\t\treturn mid\r\n\r\n\treturn NOT_FOUND\r\n\r\ndef test(tab, fun):\r\n\tpass\r\n\r\ndef bsFun(tab, elem, startIndex):\r\n\tif not tab:\r\n\t\treturn None\r\n\r\n\tmid = int(len(tab) / 2)\r\n\t# print tab, mid\r\n\r\n\tif elem < tab[mid]:\r\n\t\treturn bsFun(tab[ : mid], elem, startIndex)\r\n\telif elem > tab[mid]:\r\n\t\treturn bsFun(tab[mid + 1 : ], elem, startIndex + mid + 1)\r\n\telse:\r\n\t\treturn startIndex + mid\r\n\r\ndef main():\r\n\ttab = [1, 9, 14, 66, 75, 77, 83, 97]\r\n\ttab.sort()\r\n\r\n\t# res = binarySearchIteratively(tab, 96)\r\n\tprint(bsFun(tab, 15, 0))\r\n\tprint(bsFun(tab, 1, 0))\r\n\tprint(bsFun(tab, 97, 0))\r\n\tprint(bsFun(tab, 77, 0))\r\n\r\n\t# print res\r\n\t# if res:\r\n\t# \tprint tab[res]\r\n\r\n\t# start = 0\r\n\t# end = len(tab)\r\n\t# print(binarySearchRecursive(tab, 97, start, end))\r\n\r\n\t# print(end)\r\n\t# tmpEnd = end - 1\r\n\t# print(tmpEnd)\r\n\t# #print(binarySearchVariantFindingMinimum(tab, start, tmpEnd))\r\n\r\n\t# print(interpolationSearch(tab, 14))\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","repo_name":"michal0janczyk/interview_preparation","sub_path":"Fundamentals/Algorithms/Birany search.py","file_name":"Birany search.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21687486368","text":"from layers import SoftmaxLayer\nfrom layers import RecurrentLayer\n\nfrom functions import NegativeLogLikelihood\nfrom functions import Softmax\nfrom functions import LeakyRELU\n\nfrom copy import deepcopy\nimport numpy as np\nfrom random import shuffle\n\nclass RecurrentNet:\n def __init__(self, num_inputs, layers=None, cost_func=NegativeLogLikelihood):\n self.num_inputs = num_inputs\n self.num_layers = 0\n self.layer_types = []\n\n if layers is not None:\n self.layers = layers\n self.num_layers = len(self.layers)\n\n for l in layers:\n if isinstance(l, SoftmaxLayer):\n self.layer_types.append(\"soft\")\n elif isinstance(l, RecurrentLayer):\n self.layer_types.append(\"recurr\")\n else:\n self.layers = []\n\n self.cost_func = cost_func\n\n def add(self, layer_type, output_size):\n op = self.num_inputs\n if len(self.layers) > 0:\n op = self.layers[-1].get_output_shape()\n\n layer_shape = (output_size, op)\n if layer_type is \"soft\":\n self.layers.append(SoftmaxLayer(layer_shape))\n elif layer_type is \"recurr\":\n self.layers.append(RecurrentLayer(layer_shape))\n\n self.layer_types.append(layer_type)\n\n self.num_layers+=1\n\n def forget_past(self):\n for l in self.layers:\n if isinstance(l, RecurrentLayer):\n l.forget_past()\n\n def feed_forward(self, network_input):\n for l in self.layers:\n network_input = l.feed_forward(network_input)\n return network_input\n\n def backprop(self, network_input, expected_output):\n curr_z = network_input\n z_activations = [network_input]\n p_z_activations = []\n\n for i, lt, lyr in zip(range(1, self.num_layers + 1), self.layer_types, self.layers):\n if lt == \"recurr\":\n prev_z, curr_z = lyr.get_activations(curr_z)\n z_activations.append(deepcopy(curr_z))\n p_z_activations.append(prev_z)\n elif lt == \"soft\":\n curr_z = lyr.get_activations(curr_z)\n z_activations.append(deepcopy(curr_z))\n\n if not i == self.num_layers:\n # Use softmax for SM layers, otherwise leaky relu\n if lt is \"soft\":\n curr_z = Softmax.func(curr_z)\n else:\n curr_z = LeakyRELU.func(curr_z)\n\n # Store derivatives and activation for output layer\n if self.layer_types[-1] is \"soft\":\n squashed_activations = Softmax.func(deepcopy(curr_z))\n squashed_activations_deriv = Softmax.func_deriv(deepcopy(curr_z))\n else:\n squashed_activations = LeakyRELU.func_deriv(deepcopy(curr_z))\n squashed_activations_deriv = LeakyRELU.func_deriv(deepcopy(curr_z))\n\n # Errors for the last layer\n delta = self.cost_func.delta(squashed_activations,\n squashed_activations_deriv,\n expected_output)\n\n is_conv = True\n if self.layer_types[self.num_layers - 1] is not \"conv\" \\\n and self.layer_types[self.num_layers - 1] is not \"deconv\":\n is_conv = False\n\n delta_w = []\n delta_pw = []\n delta_b = []\n\n cnt = -1\n # Append all the errors for each layer\n for i, lt, lyr, zprev in reversed(zip(range(self.num_layers), self.layer_types, self.layers, z_activations[:-1])):\n if lt is \"soft\":\n dw, db, dlt = lyr.backprop(zprev, delta)\n delta_w.insert(0, dw)\n delta_b.insert(0, db)\n\n delta = dlt\n elif lt is \"recurr\":\n dw, dpw, db, dlt = lyr.backprop(p_z_activations[cnt], zprev, delta)\n delta_w.insert(0, dw)\n delta_pw.insert(0, dpw)\n delta_b.insert(0, db)\n\n delta = dlt\n\n cnt-=1\n\n return np.array(delta_w), np.array(delta_pw), np.array(delta_b)\n\n # Updates the network given a specific minibatch (done by averaging gradients over the minibatch)\n # Args:\n # mini_batch - a list of tuples, (input, expected output)\n # step_size - the amount the network should change its parameters by relative to the gradients\n def update_network(self, mini_batch, step_size):\n recurrent_indicies = [False for i in range(self.num_layers)]\n for i, l in enumerate(self.layers):\n if isinstance(l, RecurrentLayer):\n recurrent_indicies[i] = True\n\n gradient_w, gradient_pw, gradient_b = self.backprop(mini_batch[0][0], mini_batch[0][1])\n\n for inp, outp in mini_batch[1:]:\n dgw, dgpw, dgb = self.backprop(inp, outp)\n gradient_w += dgw\n gradient_pw += dgpw\n gradient_b += dgb\n\n # Average the gradients\n gradient_w *= step_size / (len(mini_batch) + 0.00)\n gradient_pw *= step_size / (len(mini_batch) + 0.00)\n gradient_b *= step_size / (len(mini_batch) + 0.00)\n\n cnt = 0\n # Update weights and biases in opposite direction of gradients\n for i, gw, gb, lyr in zip(range(self.num_layers), gradient_w, gradient_b, self.layers):\n if recurrent_indicies[i]:\n lyr.update(-gw, -gradient_pw[cnt], -gb)\n cnt+=1\n else:\n lyr.update(-gw, -gb)\n\n # Evaluates the average cost across the training set\n def evaluate_cost(self, training_set):\n total = 0.0\n for inp, outp in training_set:\n net_outp = self.feed_forward(inp)\n total += self.cost_func.cost(net_outp, outp)\n return total / len(training_set)\n\n # Performs SGD on the network\n # Args:\n # epochs - (int), number of times to loop over the entire batch\n # step_size - (float), amount network should change its parameters per update\n # mini_batch_size - (int), number of training examples per mini batch\n # training_inputs - (list), the list of training inputs\n # expected_outputs - (list), the list of expected outputs for each input\n def stochastic_gradient_descent(self, epochs, step_size, mini_batch_size, training_set):\n # Train\n for ep in range(epochs):\n for x in range(0, len(training_set), mini_batch_size):\n self.update_network(training_set[x:x + mini_batch_size], step_size)\n # Update with progress\n print(\"Epoch: %d Average cost: %f\" % (ep + 1, self.evaluate_cost(training_set)))\n self.forget_past()\n","repo_name":"maxgao18/Neural-Nets","sub_path":"src/neuralnets/recurrent.py","file_name":"recurrent.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"6701663084","text":"from numpy import *\nfrom sys import argv\n\ndef minimiza_descida_maxima(f, grad, x_inicial, h, max_it, tol):\n x = x_inicial\n x_anterior = x\n for i in xrange(max_it):\n direcao_descida = -grad(x)\n alfa = h\n x = x + alfa * direcao_descida\n if linalg.norm(x - x_anterior) < tol:\n break\n x_anterior = x\n return x\n\ndef main(args):\n f = lambda x: eval(args[0])\n df = lambda x: eval(args[1])\n x_0 = array(eval(args[2]))\n h = float(eval(args[3]))\n tol = float(eval(args[4]))\n max_it = int(eval(args[5]))\n res = minimiza_descida_maxima(f,df,x_0,h,max_it,tol)\n print({'x':res,'f(x)':f(res)})\n \nif __name__ == \"__main__\":\n main(argv[1:])\n","repo_name":"LuisVCSilva/otimizacao_classica","sub_path":"Lista 2/teste/Demo1D/Metodos/minMaximaDescida.py","file_name":"minMaximaDescida.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8307138543","text":"# This will be the common prelude to all of our queries. \n\nimport sys # talk to the operating system \nimport os.path # manipulate paths to files, directories \nimport pickle # read/write pickled python dictionaries \nimport pprint # pretty print JSON\n\ndata_dir = sys.argv[1] # first command-line argument -- the directory of data \n\n# use os.path.join so that path works on both Windows and Unix \nmovies_path = os.path.join(data_dir, 'movies.pickled')\npeople_path = os.path.join(data_dir, 'people.pickled')\n\n# open data dictionary files and un-pickle them \nmoviesFile = open(movies_path, mode= \"rb\")\nmovies = pickle.load(moviesFile)\n\npeopleFile = open(people_path, mode= \"rb\")\npeople = pickle.load(peopleFile)\n\n#####################################\n# write your query code here ...\n\ndef get_person_by_name (str): \n # initialise output \n the_person = {} \n # iterate through all the keys of the people dictionary \n # looking for one with the right name \n for person_id in people.keys():\n if people[person_id]['name'] == str: \n the_person = people[person_id]\n return the_person \n\nname = sys.argv[2] \n\npprint.pprint (get_person_by_name(name))\n","repo_name":"Timothy-G-Griffin/build_databases.cst.cam.ac.uk","sub_path":"DOCtorWho/get_person_by_name.py","file_name":"get_person_by_name.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"71227665155","text":"from flask import Flask, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_admin import Admin\n#from flask_cors import CORS\n\nfrom flask_admin.base import MenuLink\n\nfrom .adminviews import AdminPanel, ProtectedModelView\n\ndb = SQLAlchemy()\nlogin_manager = LoginManager()\nadmin = Admin()\n#cors = CORS(app)\n\ndef create_app():\n '''\n Creating the Flask app and setting its config\n '''\n\n #Creating the flask app\n app = Flask(__name__, instance_relative_config=False)\n\n #setting config variables from DevelopmentConfig class in config file\n app.config.from_object('config.DevelopmentConfig')\n\n #Initiaizaing Plugins\n db.__init__(app)\n login_manager.__init__(app)\n admin.init_app(app, index_view=AdminPanel(name= 'Admin Panel'))\n #cors.__init__(app)\n\n with app.app_context():\n\n from . import model\n\n # Creating tables in the database\n db.drop_all()\n db.create_all()\n\n # inserting dummy data\n sql = open('dump.sql').read()\n db.engine.execute(sql)\n\n db.session.commit()\n\n #Addning Databse Viewss to Admin Panel\n admin.add_view(ProtectedModelView(model.User, db.session, category='Databases', name=\"Users\" ))\n admin.add_view(ProtectedModelView(model.Buyer, db.session, category='Databases', name=\"Buyers\"))\n admin.add_view(ProtectedModelView(model.Deal, db.session, category='Databases', name=\"Deals\" ))\n admin.add_view(ProtectedModelView(model.Plot, db.session, category='Databases', name=\"Plots\" ))\n admin.add_view(ProtectedModelView(model.Notes, db.session, category='Databases', name=\"Notes\" ))\n\n admin.add_view(ProtectedModelView(model.Transaction, db.session, category='Databases', name=\"Transactions\"))\n \n admin.add_link(MenuLink(name='Back to Profile', url='/profile'))\n\n #Adding Routes\n from . import routes\n \n return app \n","repo_name":"Guyroscopic/Prototype_webapp","sub_path":"application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23514362271","text":"import re\n\n\n# 1 = L\n# 0 = G\ndef frac(basis, basis3):\n k = len(basis3) * \"0\"\n basis2 = re.sub(\"0\", k, basis)\n basis = re.sub(\"1\", basis3, basis2)\n return basis\n\n\ndef basissen(lengte):\n ret = set()\n for i in range(2 ** lengte, 2 * 2 ** lengte):\n ret.add(str(bin(i)[3:]))\n print(str(bin(i)[3:]))\n return ret\n\n\ndef fractal(basis, aantal):\n basis2 = basis\n for _ in range(aantal - 1):\n basis2 = frac(basis2, basis)\n return basis2\n\n\ndef fracs(basisLengte, aantal):\n ret = set()\n for basis in basissen(basisLengte):\n f = fractal(basis, aantal)\n print(f)\n ret.add(f)\n\n return ret\n\n\ndef test(setje, combinaties):\n i = False\n for combi in combinaties:\n booltje = False\n for getal in setje:\n if combi[getal - 1] == \"0\":\n booltje = True\n if not booltje:\n if i:\n return False\n else:\n i = True\n return setje\n\n\ndef setmaker(aantal, lengte):\n ret = []\n ret3 = []\n for i in range(1, lengte + 1):\n a = []\n a.append(i)\n ret.append(a.copy())\n ret3.extend(ret)\n for _ in range(aantal - 1):\n ret2 = []\n for setje in ret:\n for i in range(setje[-1], lengte + 1):\n a = setje.copy()\n if i not in a:\n a.append(i)\n ret2.append(a.copy())\n ret3.extend(ret2)\n print(ret2)\n return ret2\n\n\ndef Case(a, b, c, caseNr):\n sets = setmaker(c, a ** b)\n combinaties = fracs(a, b)\n for setje in sets:\n if test(setje, combinaties):\n res = test(setje, combinaties)\n ret = \"\"\n for item in res:\n ret += \" \" + str(item)\n print(\"Case #{}: {}\".format(caseNr, ret[1:]))\n return\n print(\"Case #{}: {}\".format(caseNr , \"IMPOSSIBLE\"))\n return\n\n\ndef Case2(a,b,c,caseNr):\n combinaties = fracs(a, b)\n print(combinaties)\n\naantal = int(input())\nfor l in range(aantal):\n inputstr = str(input())\n spl = inputstr.split(\" \")\n ret = \"Case #{}: 1\".format(l + 1)\n for i in range(2,int(spl[0]) + 1):\n ret += \" \" + str(i)\n print(ret)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_180/1427.py","file_name":"1427.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31958444366","text":"import argparse\nimport torch\nimport numpy as np\nimport math\nfrom torch import Tensor, nn\nfrom HAPiCLR.methods.base import BaseMethod\nimport torch.nn.functional as F\nfrom typing import Any, Dict, List, Sequence, Union, Tuple\n\nfrom HAPiCLR.losses.nt_xent_loss import NTXentLoss\nfrom pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR\nfrom HAPiCLR.utils.lars import LARSWrapper\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR\n# from solo.utils.distributed_util import gather_from_all\n# from classy_vision.generic.distributed_util import get_cuda_device_index, get_rank\n# from classy_vision.losses import ClassyLoss, register_loss\n\n\n\nfrom pl_bolts.optimizers.lars import LARS\nfrom pl_bolts.optimizers.lr_scheduler import linear_warmup_decay\n#************************************************************\n# SyncFunction adding to gather all the batch tensors from others GPUs\n#************************************************************\n\n\nclass Downsample(nn.Module):\n def __init__(self):\n super(Downsample, self).__init__()\n self.GA = nn.AdaptiveAvgPool2d((1, 1))\n self.flatten = nn.Flatten(1)\n\n def forward(self, X):\n mask = None\n if type(X) == list:\n mask = X[1]\n X = X[0]\n\n if mask is None:\n X = self.GA(X)\n else:\n # print(X.shape)\n # print(mask.shape)\n X = X.view(X.shape[0], X.shape[1], -1)\n mask = mask.view(mask.shape[0], mask.shape[1], -1)\n nelements = mask.sum(dim=-1)+1\n X = X.sum(dim=-1) / nelements\n\n X = torch.flatten(X, 1)\n return X\n\n## Masking Steps Between Mask and Image\nclass Indexer(nn.Module):\n def __init__(self):\n super(Indexer, self).__init__()\n def forward(self, X, M_f, M_b):\n \"\"\"Indicating the foreground and background feature.\n Args:\n X (torch.Tensor): batch of images in tensor format.\n M_f (torch.Tensor) : batch of foreground mask\n M_b (torch.Tensor) : batch of background mask\n Returns:\n Dict[str, Any]: a dict containing the outputs of the parent and the projected features.\n \"\"\"\n # feature_f = torch.mul(X, M_f)\n # # out['foreground_feature'] = self.downsample_f(out['foreground_feature'])\n # feature_b = torch.mul(X, M_b)\n # # out['background_feature'] = self.downsample_b(out['background_feature'])\n\n feature_f = torch.mul(X , M_f)\n feature_b = torch.mul(X, M_b)\n\n return feature_f, feature_b\n### Two Sucessive Conv 1x1 Layers (reduce the dimension of the channels)\nclass ConvMLP(nn.Module):\n def __init__(self, chan=2048, chan_out = 256, inner_dim = 2048, scale_factor=None):\n super().__init__()\n if scale_factor != None:\n self.net = nn.Sequential(\n nn.Upsample(scale_factor=scale_factor, mode='bilinear'),\n nn.Conv2d(chan, inner_dim, 1),\n nn.BatchNorm2d(inner_dim),\n nn.ReLU(),\n nn.Conv2d(inner_dim, chan_out, 1)\n )\n else:\n self.net = nn.Sequential(\n nn.Conv2d(chan, inner_dim, 1),\n nn.BatchNorm2d(inner_dim),\n nn.ReLU(),\n nn.Conv2d(inner_dim, chan_out, 1)\n )\n\n def forward(self, x):\n x = self.net(x)\n #x = torch.flatten(x, 2)\n return x\n\nloss_types = ['V0','V1','V2','V3','V4','pixel_lavel_ontrastive']\n\nclass HAPiCLR(BaseMethod): \n def __init__(self, \n # optimizer: str,\n # lars: bool,\n # lr: float,\n # weight_decay: float,\n # classifier_lr: float,\n # exclude_bias_n_norm: bool,\n # accumulate_grad_batches: Union[int, None],\n # extra_optimizer_args: Dict,\n # scheduler: str,\n # min_lr: float,\n # warmup_start_lr: float,\n # warmup_epochs: float,\n proj_output_dim: int, proj_hidden_dim: int,\n pixel_hidden_dim: int, pixel_output_dim: int, temperature: float, \n loss_type: str, alpha: int = None, gather_distributed_gpus: bool= True, scale_factor=None, \n\n **kwargs):\n \"\"\"Implements MSCRL.\n Args:\n proj_output_dim (int): number of dimensions of the projected features.\n proj_hidden_dim (int): number of neurons in the hidden layers of the projector.\n temperature (float): temperature for the softmax in the contrastive loss.\n loss_type (str): which loss need to use]\n \"\"\"\n\n super().__init__(**kwargs)\n self.temperature = temperature\n self.alpha = alpha\n self.beta = 0.5\n #assert loss_type in loss_types, \"Loss type didn't included\"\n self.loss_type = loss_type\n self.criterion = NTXentLoss(gather_distributed=True, temperature=self.temperature)\n #***********************\n # MLP projector\n #**********************\n self.projector = nn.Sequential(\n #Downsample(),\n nn.Linear(self.features_dim, proj_hidden_dim),\n nn.BatchNorm1d(proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, proj_output_dim),\n )\n\n self.downsample = nn.Sequential(\n Downsample()\n )\n #**********************\n # Conv 1x1 projector\n #**********************\n self.scale_factor = scale_factor\n self.upsample = nn.Upsample(scale_factor=scale_factor, mode='bilinear')\n self.indexer = Indexer()\n self.convMLP = ConvMLP(self.features_dim, pixel_hidden_dim, pixel_output_dim, None)\n\n\n # self.optimizer = optimizer\n # self.lars = lars\n # self.lr = lr\n # self.weight_decay = weight_decay\n # self.classifier_lr = classifier_lr\n # self.exclude_bias_n_norm = exclude_bias_n_norm\n # self.accumulate_grad_batches = accumulate_grad_batches\n # self.extra_optimizer_args = extra_optimizer_args\n # self.scheduler = scheduler\n # self.lr_decay_steps = lr_decay_steps\n # self.min_lr = min_lr\n # self.warmup_start_lr = warmup_start_lr\n # self.warmup_epochs = warmup_epochs\n\n\n #************************************\n # Adding default arugments for models\n #************************************\n @staticmethod\n def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parent_parser = super(HAPiCLR, HAPiCLR).add_model_specific_args(parent_parser)\n parser = parent_parser.add_argument_group(\"mscrl\")\n\n # projector\n parser.add_argument(\"--proj_output_dim\", type=int, default=128)\n parser.add_argument(\"--proj_hidden_dim\", type=int, default=2048)\n parser.add_argument(\"--pixel_output_dim\", type=int, default=256)\n parser.add_argument(\"--pixel_hidden_dim\", type=int, default=2048)\n parser.add_argument(\"--loss_type\", type=str, default=\"byol+f_loss+b_loss\")\n\n # parameters\n parser.add_argument(\"--temperature\", type=float, default=0.5)\n parser.add_argument(\"--alpha\", type=float, default=None)\n parser.add_argument(\"--beta\", type=str, default=\"0.5\")\n parser.add_argument(\"--scale_factor\", type=int, default=None)\n\n # optimizer --> Inherence from base.py\n \n # scheduler --> Inherence from base.py\n\n # For gather embedding from other GPUs\n parser.add_argument(\"--gather_distributed_gpus\", type=bool, default=True, help=\"True If training with >2 else False\")\n\n return parent_parser\n \n @property\n def learnable_params(self) -> List[dict]:\n \"\"\"Adds projector parameters to the parent's learnable parameters.\n\n Returns:\n List[dict]: list of learnable parameters.\n \"\"\"\n\n extra_learnable_params = [{\"params\": self.projector.parameters()},\n {\"params\": self.convMLP.parameters()}\n ]\n return super().learnable_params + extra_learnable_params\n \n def exclude_from_wt_decay(self, named_params, weight_decay, skip_list=(\"bias\", \"bn\")):\n params = []\n excluded_params = []\n\n for name, param in named_params:\n if not param.requires_grad:\n continue\n elif any(layer_name in name for layer_name in skip_list):\n excluded_params.append(param)\n else:\n params.append(param)\n\n return [\n {\"params\": params, \"weight_decay\": weight_decay},\n {\n \"params\": excluded_params,\n \"weight_decay\": 0.0,\n },\n ]\n\n\n def configure_optimizers(self) -> Tuple[List, List]:\n \"\"\"Collects learnable parameters and configures the optimizer and learning rate scheduler.\n\n Returns:\n Tuple[List, List]: two lists containing the optimizer and the scheduler.\n \"\"\"\n\n # collect learnable parameters\n idxs_no_scheduler = [\n i for i, m in enumerate(self.learnable_params) if m.pop(\"static_lr\", False)\n ]\n\n # select optimizer\n if self.optimizer == \"sgd\":\n optimizer = torch.optim.SGD\n elif self.optimizer == \"adam\":\n optimizer = torch.optim.Adam\n elif self.optimizer == \"adamw\":\n optimizer = torch.optim.AdamW\n else:\n raise ValueError(f\"{self.optimizer} not in (sgd, adam, adamw)\")\n\n # create optimizer\n optimizer = optimizer(\n self.learnable_params,\n lr=self.lr,\n weight_decay=self.weight_decay,\n **self.extra_optimizer_args,\n )\n # optionally wrap with lars\n if self.lars:\n assert self.optimizer == \"sgd\", \"LARS is only compatible with SGD.\"\n optimizer = LARSWrapper(\n optimizer,\n eta=self.eta_lars,\n clip=self.grad_clip_lars,\n exclude_bias_n_norm=self.exclude_bias_n_norm,\n )\n\n if self.scheduler == \"none\":\n return optimizer\n\n if self.scheduler == \"warmup_cosine\":\n scheduler = LinearWarmupCosineAnnealingLR(\n optimizer,\n warmup_epochs=self.warmup_epochs,\n max_epochs=self.max_epochs,\n warmup_start_lr=self.warmup_start_lr,\n eta_min=self.min_lr,\n )\n elif self.scheduler == \"cosine\":\n scheduler = CosineAnnealingLR(optimizer, self.max_epochs, eta_min=self.min_lr)\n elif self.scheduler == \"step\":\n scheduler = MultiStepLR(optimizer, self.lr_decay_steps)\n else:\n raise ValueError(f\"{self.scheduler} not in (warmup_cosine, cosine, step)\")\n\n if idxs_no_scheduler:\n partial_fn = partial(\n static_lr,\n get_lr=scheduler.get_lr,\n param_group_indexes=idxs_no_scheduler,\n lrs_to_replace=[self.lr] * len(idxs_no_scheduler),\n )\n scheduler.get_lr = partial_fn\n\n return [optimizer], [scheduler]\n\n \n def forward(self, X: torch.tensor, *args, **kwargs) -> Dict[str, Any]:\n \"\"\"Performs the forward pass of the backbone, the projector.\n\n Args:\n X (torch.Tensor): a batch of images in the tensor format.\n\n Returns:\n Dict[str, Any]:\n a dict containing the outputs of the parent\n and the projected features.\n \"\"\"\n _, X_, M_f, M_b = X\n\n out = super().forward(X_, *args, **kwargs)\n ## Output representation [batch,X1], [batch,X2]\n z = self.projector(out[\"feats\"])\n z_f, z_b = self.indexer(out[\"feats\"])\n\n z_f = self.projector(z_f)\n z_b = self.projector(z_b)\n \n return {**out, \"z\": z, \"z_f\": z_f, \"z_b\": z_b}\n \n def shared_step(self, batch, batch_idx):\n \n \"\"\"Training step for SimCLR reusing BaseMethod training step.\n Args:\n batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where\n [X] is a list of size num_crops containing batches of images.\n batch_idx (int): index of the batch.\n Returns:\n torch.Tensor: total loss composed of SimCLR loss and classification loss.\n \"\"\"\n \n indexes, X, M_f, M_b = batch\n out = super().training_step(X, batch_idx)\n class_loss = out[\"loss\"]\n\n feats = out[\"feats\"]\n #print(feats.shape)\n \n z = [self.projector(f) for f in feats]\n #z[[b, x1], [b, x2]]\n #print(z.shape)\n # get projection representations\n z1 = z[0]\n #print(z1.shape)\n z2 = z[1]\n loss = self.criterion(z1, z2)\n #loss = self.nt_xent_loss(z1, z2, self.temperature)\n return loss , class_loss\n\n\n def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor: \n loss, class_loss= self.shared_step(batch, batch_idx) \n metrics={\n \"Contrastive_loss\": loss\n }\n self.log_dict(metrics, on_epoch=True, sync_dist=True)\n \n return loss + class_loss\n\n \n","repo_name":"TranNhiem/HAPiCLR","sub_path":"HAPiCLR/methods/hapiclr.py","file_name":"hapiclr.py","file_ext":"py","file_size_in_byte":13214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73787078273","text":"from qick import *\nimport matplotlib.pyplot as plt\n\nfrom legacy.pulseConfig import set_pulse_registers_IQ, declareMuxedGenAndReadout\n\nclass AmplitudeRabiProgram(PAveragerProgram):\n def initialize(self):\n cfg = self.cfg\n\n self.declare_gen(ch=cfg[\"qubit_ch\"], nqz=cfg[\"qubit_nzq\"]) # qubit drive\n self.declare_gen(ch=cfg[\"res_ch_I\"], nqz=cfg[\"res_nzq_I\"]) # resonator drive I\n self.declare_gen(ch=cfg[\"res_ch_Q\"], nqz=cfg[\"res_nzq_Q\"]) # resonator drive Q\n\n self.declare_readout(ch=cfg[\"ro_ch\"], length=cfg[\"readout_length\"],freq=cfg[\"res_freq\"], gen_ch=cfg[\"res_ch_I\"])\n\n self.q_rp=self.ch_page(self.cfg[\"qubit_ch\"]) # get register page for qubit_ch\n self.r_gain=self.sreg(cfg[\"qubit_ch\"], \"gain\") # get gain register for qubit_ch\n self.r_gain_update = 1 # register for keeping the update value of gain\n self.safe_regwi(self.q_rp, self.r_gain_update, cfg[\"start\"])\n\n\n res_freq = self.freq2reg(cfg[\"res_freq\"], gen_ch=cfg[\"res_ch_I\"], ro_ch=cfg[\"ro_ch\"]) # convert frequency to dac frequency (ensuring it is an available adc frequency)\n qubit_freq = self.freq2reg(cfg[\"ge_freq\"])\n self.qubit_freq = qubit_freq\n\n # add qubit and readout pulses to respective channels\n n_sigma = cfg.get(\"n_sigma\", 4)\n self.add_gauss(ch=cfg[\"qubit_ch\"], name=\"qubit\", sigma=self.us2cycles(cfg[\"sigma\"]), length=self.us2cycles(cfg[\"sigma\"]*n_sigma))\n\n set_pulse_registers_IQ(self, cfg[\"res_ch_I\"], cfg[\"res_ch_Q\"], cfg[\"skewPhase\"], cfg[\"IQScale\"],\n style=\"const\", freq=res_freq, phase=cfg[\"res_phase\"], gain=cfg[\"res_gain\"],\n length=cfg[\"res_length\"])\n\n self.set_pulse_registers(ch=self.cfg[\"qubit_ch\"], style=\"arb\",waveform=\"qubit\",\n phase=self.deg2reg(90, gen_ch=cfg[\"qubit_ch\"]),\n freq=qubit_freq, gain=cfg[\"start\"])\n\n self.sync_all(self.us2cycles(1)) # give processor some time to configure pulses\n\n def body(self):\n cfg = self.cfg\n prepareWithMSMT = cfg.get(\"prepareWithMSMT\", False)\n #\n if prepareWithMSMT:\n self.set_pulse_registers(ch=self.cfg[\"qubit_ch\"], style=\"arb\", waveform=\"qubit\",\n phase=self.deg2reg(90, gen_ch=cfg[\"qubit_ch\"]),\n freq=self.qubit_freq, gain=cfg[\"pi2_gain\"])\n self.pulse(ch=self.cfg[\"qubit_ch\"]) # play gaussian pulse\n self.sync_all(self.us2cycles(0.05)) # align channels and wait 50ns\n self.measure(pulse_ch=[cfg[\"res_ch_I\"], cfg[\"res_ch_Q\"]],\n adcs=[self.cfg[\"ro_ch\"]],\n adc_trig_offset=self.cfg[\"adc_trig_offset\"],\n t=0,\n wait=True,\n syncdelay=self.us2cycles(0.5))\n #\n #\n # drive and measure\n self.mathi(self.q_rp, self.r_gain, self.r_gain_update, '+', 0) # set the updated gain value\n self.pulse(ch=self.cfg[\"qubit_ch\"]) # play gaussian pulse\n self.sync_all(self.us2cycles(0.05)) # align channels and wait 50ns\n self.measure(pulse_ch=[cfg[\"res_ch_I\"], cfg[\"res_ch_Q\"]],\n adcs=[self.cfg[\"ro_ch\"]],\n adc_trig_offset=self.cfg[\"adc_trig_offset\"],\n t=0,\n wait=True,\n syncdelay=self.us2cycles(self.cfg[\"relax_delay\"]))\n\n\n def update(self):\n self.mathi(self.q_rp, self.r_gain_update, self.r_gain_update, '+', self.cfg[\"step\"]) # update gain of the pulse\n # self.mathi(self.q_rp, self.r_gain, self.r_gain, '+', self.cfg[\"step\"]) # update gain of the pulse\n\n\n\nclass MuxedAmplitudeRabiProgram(PAveragerProgram):\n def initialize(self):\n cfg = self.cfg\n\n # declare muxed generator and readout channels\n declareMuxedGenAndReadout(self, cfg[\"res_ch\"], cfg[\"res_nqz\"], cfg[\"res_mixer_freq\"],\n cfg[\"res_freqs\"], cfg[\"res_gains\"], cfg[\"ro_chs\"], cfg[\"readout_length\"])\n\n # set readout pulse registers\n self.set_pulse_registers(ch=cfg[\"res_ch\"], style=\"const\", length=cfg[\"res_length\"], mask=[0, 1, 2, 3])\n\n # set / config qubit DAC channel\n qubit_mixer_freq = cfg.get(\"qubit_mixer_freq\", 0)\n self.declare_gen(ch=cfg[\"qubit_ch\"], mixer_freq=qubit_mixer_freq, nqz=cfg[\"qubit_nqz\"]) # qubit drive\n\n self.q_rp=self.ch_page(self.cfg[\"qubit_ch\"]) # get register page for qubit_ch\n self.r_gain=self.sreg(cfg[\"qubit_ch\"], \"gain\") # get gain register for qubit_ch\n self.r_gain_update = 1 # register for keeping the update value of gain\n self.safe_regwi(self.q_rp, self.r_gain_update, cfg[\"start\"])\n\n self.qubit_freq = self.freq2reg(cfg[\"ge_freq\"], gen_ch=cfg[\"qubit_ch\"])\n\n # add qubit pulses to respective channels\n n_sigma = cfg.get(\"n_sigma\", 4)\n self.add_gauss(ch=cfg[\"qubit_ch\"], name=\"qubit\", sigma=self.us2cycles(cfg[\"sigma\"]), length=self.us2cycles(cfg[\"sigma\"]*n_sigma))\n\n self.set_pulse_registers(ch=self.cfg[\"qubit_ch\"], style=\"arb\",waveform=\"qubit\",\n phase=self.deg2reg(90, gen_ch=cfg[\"qubit_ch\"]),\n freq=self.qubit_freq, gain=cfg[\"start\"])\n\n self.sync_all(self.us2cycles(1)) # give processor some time to configure pulses\n\n def body(self):\n cfg = self.cfg\n prepareWithMSMT = cfg.get(\"prepareWithMSMT\", False)\n #\n if prepareWithMSMT:\n self.set_pulse_registers(ch=self.cfg[\"qubit_ch\"], style=\"arb\", waveform=\"qubit\",\n phase=self.deg2reg(90, gen_ch=cfg[\"qubit_ch\"]),\n freq=self.qubit_freq, gain=cfg[\"pi2_gain\"])\n self.pulse(ch=self.cfg[\"qubit_ch\"]) # play gaussian pulse\n self.sync_all(self.us2cycles(0.05)) # align channels and wait 50ns\n self.measure(pulse_ch=self.cfg[\"res_ch\"],\n adcs=self.ro_chs,\n pins=[0],\n adc_trig_offset=self.cfg[\"adc_trig_offset\"],\n wait=True,\n syncdelay=self.us2cycles(self.cfg[\"relax_delay\"]))\n #\n #\n # drive and measure\n self.mathi(self.q_rp, self.r_gain, self.r_gain_update, '+', 0) # set the updated gain value\n self.pulse(ch=self.cfg[\"qubit_ch\"]) # play gaussian pulse\n self.sync_all(self.us2cycles(0.05)) # align channels and wait 50ns\n # --- msmt\n self.measure(pulse_ch=self.cfg[\"res_ch\"],\n adcs=self.ro_chs,\n pins=[0],\n adc_trig_offset=self.cfg[\"adc_trig_offset\"],\n wait=True,\n syncdelay=self.us2cycles(self.cfg[\"relax_delay\"]))\n\n\n def update(self):\n self.mathi(self.q_rp, self.r_gain_update, self.r_gain_update, '+', self.cfg[\"step\"]) # update gain of the pulse\n # self.mathi(self.q_rp, self.r_gain, self.r_gain, '+', self.cfg[\"step\"]) # update gain of the pulse\n\n\n\n\n\nif __name__ == \"__main__\":\n from Hatlab_DataProcessing.analyzer import qubit_functions_rot as qfr\n from legacy.qubitMSMT import config, PyroServer\n from Hatlab_RFSOC.proxy import getSocProxy\n\n soc, soccfg = getSocProxy(PyroServer)\n\n expt_cfg={\n \"start\":-30000,\n \"step\":200,\n \"expts\":300,\n \"reps\": 300,\n \"relax_delay\":500\n }\n config.update(expt_cfg) #combine configs\n\n print(\"running...\")\n\n rabi=AmplitudeRabiProgram(soccfg, config)\n\n if config.get(\"prepareWithMSMT\", False) :\n x_pts, avgi, avgq = rabi.acquire(soc, load_pulses=True, readouts_per_experiment=2, save_experiments=[0, 1],\n progress=True, debug=False)\n Idata = rabi.di_buf_p[0]\n Qdata = rabi.dq_buf_p[0]\n\n from Hatlab_DataProcessing.post_selection.postSelectionProcess import simpleSelection_1Qge\n g_pct, I_vld, Q_vld, selData = simpleSelection_1Qge(Idata, Qdata, plot=True, xData={\"amp\": x_pts},\n selCircleSize=1)\n\n plt.figure()\n plt.figure(\"g_pct\")\n plt.plot(x_pts, g_pct)\n\n piPul = qf.PiPulseTuneUp(x_pts, g_pct)\n piResult = piPul.run()\n piResult.plot()\n else:\n x_pts, avgi, avgq = rabi.acquire(soc,load_pulses=True,progress=True, debug=False)\n\n #Plotting Results\n plt.figure()\n plt.subplot(111, title= f\"Amplitude Rabi, $\\sigma={soc.cycles2us(config['sigma'])*1000}$ ns\", xlabel=\"Gain\", ylabel=\"Qubit Population\" )\n plt.plot(x_pts,avgi[0][0],'o-', markersize = 1)\n plt.plot(x_pts,avgq[0][0],'o-', markersize = 1)\n\n piPul = qfr.PiPulseTuneUp(x_pts, avgi[0][0]+1j*avgq[0][0])\n piResult = piPul.run()\n piResult.plot()\n piResult.print_ge_rotation()\n\n\n","repo_name":"PITT-HATLAB/Hatlab_RFSOC","sub_path":"Hatlab_RFSOC/legacy/qubitMSMT/M003_amplituderabi.py","file_name":"M003_amplituderabi.py","file_ext":"py","file_size_in_byte":9023,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"74293179074","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\nimport mysql.connector\r\n\r\nclass CustomerHistory:\r\n def __init__(self,window):\r\n self.window=window\r\n self.window.title(\"CustomerHistory\")\r\n self.window.geometry(\"1280x725+0+0\")\r\n self.window.configure(bg=\"white\")\r\n\r\n #variable dec\r\n self.searchBy=StringVar()\r\n self.searchEntry=StringVar()\r\n\r\n Headinglabel=Label(self.window,text=\"CustomerHistory\",font=(\"times new roman\",19,\"bold\"),bg=\"black\",fg=\"gold\",bd=4,relief=RIDGE,width=85)\r\n Headinglabel.place(x=0,y=0)\r\n\r\n RefreshButton=Button(self.window,text=\"Refresh\",font=(\"times new roman\",16,\"bold\"),bg=\"black\",fg=\"gold\",bd=2,relief=RIDGE,width=16,command=self.CustomerHistory)\r\n RefreshButton.place(x=550,y=670)\r\n\r\n searchBy=Label(self.window,text=\"SearchBy:-\",font=(\"times new roman\",21,\"bold\"),bd=2,bg=\"red\",relief=RIDGE)\r\n searchBy.place(x=0,y=40)\r\n\r\n combo_search=ttk.Combobox(self.window,font=(\"times new roman\",15,\"bold\"),width=18,state=\"readonly\",textvariable=self.searchBy)\r\n combo_search[\"value\"]=(\"CustomerRef\",\"CustomerName\",\"CustomerMobile\",\"IdNumber\",\"InDateTime\",\"OutDateTime\")\r\n combo_search.current(0)\r\n combo_search.place(x=150,y=44)\r\n\r\n searchE=Entry(self.window,font=(\"times new roman\",15,\"bold\"),bg=\"#F7EFE9\",width=66,textvariable=self.searchEntry)\r\n searchE.place(x=365,y=45)\r\n\r\n Search=Button(self.window,text=\"Search\",font=(\"times new roman\",13,\"bold\"),bg=\"black\",fg=\"yellow\",width=20,relief=RIDGE,command=self.Search)\r\n Search.place(x=1050,y=42)\r\n\r\n\r\n #function calling\r\n self.CustomerHistory()\r\n\r\n def CustomerHistory(self,querys=\"select * from customerhistory \"):\r\n tree=ttk.Treeview(self.window)\r\n tree[\"columns\"]=(\"Cref\",\"CName\",\"CMob\",\"AddPin\",\"IdType\",\"IdNum\",\"RoomCod\",\"TimeIn\",\"TimeOut\")\r\n\r\n s=ttk.Style(self.window)\r\n s.theme_use(\"classic\")\r\n s.configure(\".\",font=(\"Helvetica\",11,\"bold\"))\r\n s.configure(\"Treeview.Heading\",foreground=\"black\",font=(\"Helvetica\",10,\"bold\"))\r\n\r\n tree['show']='headings'\r\n\r\n tree.column(\"Cref\",width=70,minwidth=70,anchor=tk.CENTER)\r\n tree.column(\"CName\",width=150,minwidth=150,anchor=tk.CENTER)\r\n tree.column(\"CMob\",width=115,minwidth=115,anchor=tk.CENTER)\r\n tree.column(\"AddPin\",width=130,minwidth=130,anchor=tk.CENTER)\r\n tree.column(\"IdType\",width=120,minwidth=120,anchor=tk.CENTER)\r\n tree.column(\"IdNum\",width=130,minwidth=130,anchor=tk.CENTER)\r\n tree.column(\"RoomCod\",width=70,minwidth=70,anchor=tk.CENTER)\r\n tree.column(\"TimeIn\",width=200,minwidth=200,anchor=tk.CENTER)\r\n tree.column(\"TimeOut\",width=200,minwidth=200,anchor=tk.CENTER)\r\n\r\n tree.heading(\"Cref\",text=\"Cust_Ref\",anchor=tk.CENTER)\r\n tree.heading(\"CName\",text=\"Name\",anchor=tk.CENTER)\r\n tree.heading(\"CMob\",text=\"Mobile\",anchor=tk.CENTER)\r\n tree.heading(\"AddPin\",text=\"Address\",anchor=tk.CENTER)\r\n tree.heading(\"IdType\",text=\"ID Type\",anchor=tk.CENTER)\r\n tree.heading(\"IdNum\",text=\"ID Number\",anchor=tk.CENTER)\r\n tree.heading(\"RoomCod\",text=\"RoomCode\",anchor=tk.CENTER)\r\n tree.heading(\"TimeIn\",text=\"In Time\",anchor=tk.CENTER)\r\n tree.heading(\"TimeOut\",text=\"Out Time\",anchor=tk.CENTER)\r\n \r\n \r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"vicky@123//\",database=\"hotelandrestoproject\")\r\n my_cursor=conn.cursor()\r\n #query=\"select * from customerhistory \"\r\n query=querys\r\n my_cursor.execute(query)\r\n \r\n i=0\r\n for ro in my_cursor:\r\n tree.insert('',i+1,text=\"\",values=(ro[0],ro[1],ro[2],ro[3],ro[4],ro[5],ro[6],ro[7],ro[8]))\r\n i=i+1\r\n\r\n tree.place(x=0,y=78,width=1280,height=580)\r\n\r\n def Search(self):\r\n conn=mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"vicky@123//\",database=\"hotelandrestoproject\")\r\n my_cursor=conn.cursor()\r\n mode=self.searchBy.get()\r\n value=self.searchEntry.get()\r\n #print(mode,value)\r\n if mode==\"InDateTime\" or mode==\"OutDateTime\":\r\n value=value+\"%\"\r\n print(value)\r\n querys=\"select * from customerhistory where {} LIKE '{}'\".format(mode,value)\r\n my_cursor.execute(querys)\r\n for i in my_cursor:\r\n print(i)\r\n self.CustomerHistory(querys)\r\n \r\n \r\n\r\n \r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n window=Tk()\r\n obj=CustomerHistory(window)\r\n window.mainloop()\r\n","repo_name":"Techniqual-Tech/HotelandRestoManagementSystem","sub_path":"Hotel&RestorantManagementSystem/hotelCustomerHistory.py","file_name":"hotelCustomerHistory.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41120560099","text":"from PyQt6.QtWidgets import * # QMainWindow, QTextEdit, QLineEdit, QPushButton, QApplication\nimport sys\nfrom backend import Chatbot\n# from main import jarvis\nimport threading\n#import speech_recognition as sr\nimport pyttsx3\nimport datetime\nimport speech_recognition as sr\nimport wikipedia\nimport webbrowser\nimport os\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[0].id)\nengine.setProperty('rate', 150)\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n \ndef wishMe():\n hour = int(datetime.datetime.now().hour)\n if hour >= 0 and hour < 12:\n speak(\"Good Morning!\")\n elif hour >= 12 and hour < 18:\n speak(\"Good Afternoon\")\n else:\n speak(\"Good Evening!\")\n speak(\"I am Jarvis, Sir. Please tell me how may I help you\")\n\ndef takecommand():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n # print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n try:\n # print(\"Recognizing...\")\n query = r.recognize_google(audio, language='en-in')\n # print(f\"User said: {query}\\n\")\n except Exception as e:\n # print(e)\n speak(\"Say that again please...\")\n return \"None\"\n return query\n \nclass ChatbotWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"AI Robo speaker\")\n self.chatbot=Chatbot()\n self.setMinimumSize(700, 500)\n self.chat_area=QTextEdit(self)\n self.chat_area.setGeometry(10, 10, 1500, 600)\n self.chat_area.append(\"Hi, this is your AI desktop assistant. Ask anything you want! \")\n self.chat_area.setReadOnly(True)\n # self.input_field=QLineEdit(self)\n # self.input_field.setGeometry(10, 720, 1000, 100)\n # self.input_field.returnPressed.connect(self.send_message)\n self.button=QPushButton(\"Ask Something\", self)\n self.button.setGeometry(675, 640, 150, 50)\n self.button.clicked.connect(self.send_message)\n self.show()\n \n def send_message(self):\n wishMe()\n query = takecommand().lower()\n self.chat_area.append(f\"Me: {query}
\")\n if 'wikipedia' in query:\n self.chat_area.append(\"Bot: Searching wikipedia...
\")\n speak('Searching wikipedia...')\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences=2)\n speak(\"According to wikipedia\")\n # print(results)\n speak(results)\n elif 'open youtube' in query:\n self.chat_area.append(\"Bot: opening youtube...
\")\n webbrowser.open(\"youtube.com\")\n \n elif 'open google' in query:\n self.chat_area.append(\"Bot: opening google...
\")\n webbrowser.open(\"google.com\")\n elif 'data structure' in query or 'coding' in query:\n self.chat_area.append(\"Bot: opening leetcode...
\")\n webbrowser.open(\"leetcode.com\")\n elif 'google classroom' in query:\n self.chat_area.append(\"Bot: opening google classroom
\")\n webbrowser.open(\"classroom.google.com\")\n elif 'open stackoverflow' in query:\n self.chat_area.append(\"Bot: opening stackoverflow...
\")\n webbrowser.open(\"stackoverflow.com\")\n elif 'play music' in query:\n webbrowser.open(\"jiosaavn.com\")\n elif 'the time' in query:\n strtime = datetime.datetime.now().strftime(\"%H:%M:%S\")\n self.chat_area.append(f\"Bot: Sir, the time is {strtime}
\")\n speak(f\"Sir, the time is {strtime}\")\n elif 'open code' in query:\n path = \"C:\\\\Users\\\\LENOVO\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\n os.startfile(path)\n elif 'search for me' in query:\n self.chat_area.append(\"Bot: Searching...
\")\n webbrowser.open(\"google.com/search?q=\"+query.replace(\"search for me \", ''))\n else:\n # user_input=self.input_field.text().strip()\n # print(user_input)\n \n # self.input_field.clear()\n \n thread=threading.Thread(target=self.get_bot_response, args=(query, ))\n thread.start()\n \n \n def get_bot_response(self, user_input):\n response=self.chatbot.get_response(user_input)\n self.chat_area.append(f\"Bot: {response}
\")\n speak(response)\n \n\napp=QApplication(sys.argv)\nmain_window=ChatbotWindow()\nsys.exit(app.exec())","repo_name":"AnshAg111/MyPracticum","sub_path":"main (2).py","file_name":"main (2).py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32785796971","text":"import requests\r\nimport bs4\r\n\r\njob_titles = []\r\ncompany_names =[]\r\ncompany_locationes =[]\r\ninformation =[]\r\nposted =[]\r\npage_num=0\r\nwhile True:\r\n # try:\r\n #requests to fetch the url\r\n res= requests.get(f\"https://wuzzuf.net/search/jobs/?a=hpb&q=web%20developers&start={page_num}\")\r\n #page content\r\n con=res.content\r\n #print(con)\r\n #create soup object to parse content\r\n soup= bs4.BeautifulSoup(con, \"lxml\")\r\n\r\n\r\n page_limit = int(soup.find(\"strong\").text)\r\n if(page_num>page_limit//15):\r\n print(\"pages ended\")\r\n break\r\n #find elements\r\n job_title = soup.find_all(\"h2\",{\"class\":\"css-m604qf\"})\r\n company_name= soup.find_all(\"a\",{\"class\":\"css-17s97q8\"})\r\n company_location= soup.find_all(\"span\",{\"class\":\"css-5wys0k\"})\r\n job_information = soup.find_all(\"div\", {\"class\":\"css-y4udm8\"})\r\n posted_new= soup.find_all(\"div\",{\"class\":\"css-4c4ojb\"})\r\n posted_old= soup.find_all(\"div\",{\"class\":\"css-do6t5g\"})\r\n posteed =[*posted_new , *posted_old]\r\n #step loop over returned lists\r\n for i in range(len(job_title)):\r\n job_titles.append(job_title[i].text)\r\n company_names.append(company_name[i].text)\r\n company_locationes.append(company_location[i].text)\r\n information.append(job_information[i].text)\r\n posted.append(posteed[i].text)\r\n\r\n page_num +=1\r\n\"\"\"\" print(\"page switched\")\r\nexcept:\r\n print(\"error\")\r\n break\r\n\"\"\"\r\nprint (job_titles)\r\n\r\n\r\nprint (company_names)\r\n\r\n\r\nprint(company_locationes)\r\n\r\n\r\nprint(information)\r\n\r\n\r\nprint(posted)","repo_name":"nouragaber/python-project-hend","sub_path":"python-project-Hend.py","file_name":"python-project-Hend.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43444066423","text":"import os\nfrom typing import Tuple\n\nimport dash_bootstrap_components as dbc\nimport polars as pl\nimport yaml\nfrom dash import dcc, html\n\nwith open(os.getcwd() + \"/webapp/styles.yaml\") as file:\n styles = yaml.safe_load(file)\n\n\nstyle_col = styles[\"style_col\"]\nstyle_dropdown = styles[\"style_dropdown\"]\nstyle_top = styles[\"style_top\"]\nstyle_bottom = styles[\"style_bottom\"]\n\n\ndef get_filter_rows(players: pl.DataFrame) -> Tuple[html.Div, html.Div, html.Div]:\n \"\"\"\n Generates filter card\n \"\"\"\n\n players_dropdown = dcc.Dropdown(\n id=\"player_name\",\n placeholder=\"Choose a player\",\n options=[\n {\"label\": str(n), \"value\": str(n)}\n for n in players.select(pl.col(\"player_name\").unique()).to_series().sort()\n ],\n value=\"Roger Federer\",\n clearable=False,\n style=style_dropdown,\n )\n\n surface_dropdown = dcc.Dropdown(\n id=\"surface\", placeholder=\"Select surfaces\", multi=True, style=style_dropdown\n )\n\n tourney_lvl_dropdown = dcc.Dropdown(\n id=\"tourney_level\",\n placeholder=\"Select tournament types\",\n multi=True,\n style=style_dropdown,\n )\n tourney_dropdown = dcc.Dropdown(\n id=\"tournament\",\n placeholder=\"Select tournaments\",\n searchable=True,\n multi=True,\n style=style_dropdown,\n )\n\n opponent_dropdown = dcc.Dropdown(\n id=\"opponent\", placeholder=\"Select opponents\", multi=True, style=style_dropdown\n )\n\n opponent_rank_dropdown = dcc.Dropdown(\n id=\"opponent_rank\",\n placeholder=\"Select max opponent rank\",\n options=[\n {\"label\": \"Top 5\", \"value\": 5},\n {\"label\": \"Top 10\", \"value\": 10},\n {\"label\": \"Top 20\", \"value\": 20},\n {\"label\": \"Top 50\", \"value\": 50},\n {\"label\": \"Top 100\", \"value\": 100},\n ],\n style=style_dropdown,\n )\n\n round_dropdown = dcc.Dropdown(\n id=\"round\", placeholder=\"Select rounds\", multi=True, style=style_dropdown\n )\n\n top_row = dbc.Row(\n id=\"top_row\",\n style=style_top,\n children=[\n # Select Player\n dbc.Col(\n id=\"select_player\",\n children=[\n # html.H3(\"Player\", style=style_h3),\n players_dropdown\n ],\n width=3,\n style=style_col,\n ),\n # Select Surface\n dbc.Col(\n id=\"select_surface\",\n children=[\n # html.H3(\"Surface Type\", style=style_h3),\n surface_dropdown\n ],\n width=3,\n style=style_col,\n ),\n # Select Tournament Level\n dbc.Col(\n id=\"select_tourney_lvl\",\n children=[\n # html.H3(\"Tournament Levels\", style=style_h3),\n tourney_lvl_dropdown,\n ],\n width=3,\n style=style_col,\n ),\n # Select Tournaments\n dbc.Col(\n id=\"select_tourney\",\n children=[\n # html.H3(\"Select Tournaments\", style=style_h3),\n tourney_dropdown,\n ],\n width=3,\n style=style_col,\n ),\n ],\n )\n\n bottom_row = dbc.Row(\n id=\"bottom_row\",\n style=style_bottom,\n children=[\n # Select Opponents\n dbc.Col(\n id=\"select_opponent\",\n children=[\n # html.H3(\"Select Opponents\", style=style_h3),\n opponent_dropdown\n ],\n width=3,\n style=style_col,\n ),\n # Select Top Rank\n dbc.Col(\n id=\"select_opponent_rank\",\n children=[\n # html.H3(\"Opponents Rank\", style=style_h3),\n opponent_rank_dropdown,\n ],\n width=3,\n style=style_col,\n ),\n # Select Rounds\n dbc.Col(\n id=\"select_round\",\n children=[\n # html.H3(\"Rounds\", style=style_h3),\n round_dropdown\n ],\n width=3,\n style=style_col,\n ),\n dbc.Col(\n id=\"select_period\",\n children=[\n # html.H3(\"Time Period\", style=style_h3),\n html.H3(\"\", style={\"margin-top\": \"15px\"}),\n dcc.RangeSlider(id=\"time_period\", step=2),\n ],\n width=3,\n style=style_col,\n ),\n ],\n )\n\n filters = dbc.Card(\n id=\"filters\",\n children=[\n dbc.CardHeader(\"Filters\"),\n dbc.CardBody(\n [\n top_row,\n bottom_row,\n # row3\n ]\n ),\n ],\n style={\"margin-left\": \"1%\", \"margin-right\": \"1%\"},\n )\n return filters\n","repo_name":"FBruzzesi/atp_stats_webapp","sub_path":"webapp/layout/filter_rows.py","file_name":"filter_rows.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"34876588586","text":"import unittest\nimport sys\nimport os\nfrom config.properties import Configuration\n\n# Adding app folder to Python path:\nappFolder = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(appFolder+'/../')\n\nfrom app import app\n\n\nclass TestWebApplication(unittest.TestCase):\n\n # Create a tester instance of the application:\n tester = app.test_client()\n\n # Frequently used variable is the most recent schema version:\n mostRecentSchemaVersion = Configuration.schemaVersion\n\n # Generally supported submission type:\n submissionType = 'METADATA'\n\n def test_endpoint_template_schema(self):\n\n # Get response from endpoint:\n response = self.tester.get('/v1/template-schema', content_type='html/json')\n self.assertEqual(response.status_code, 200)\n\n # Test if the available schemas contain the current version:\n supportedSchemas = response.get_json()['schema_versions']\n self.assertIn(self.mostRecentSchemaVersion,supportedSchemas)\n\n def test_endpoint_template_schema_supported_version(self):\n\n # Submit quiery:\n response = self.tester.get('/v1/template-schema/{}'.format(self.mostRecentSchemaVersion), content_type='html/json')\n self.assertEqual(response.status_code, 200)\n\n schema = response.get_json()\n\n # Do we find the version:\n self.assertEqual(schema['schema_version'], self.mostRecentSchemaVersion)\n\n # Do we have a list of submission types?\n self.assertIn('submission_types', schema)\n\n # Is this a dictionary with submission types:\n self.assertIsInstance(schema['submission_types'], dict)\n\n # At least metadata must be there:\n self.assertIn('METADATA', schema['submission_types'])\n\n def test_endpoint_template_schema_submission_type(self):\n\n # Submit query:\n response = self.tester.get('/v1/template-schema/{}/{}'.format(self.mostRecentSchemaVersion, self.submissionType),\n content_type='html/json')\n self.assertEqual(response.status_code, 200)\n\n # Extract response content:\n responseSchema = response.get_json()\n\n # Must not contain errror:\n self.assertNotIn('error', responseSchema)\n\n # Must contain version:\n self.assertIn('version', responseSchema)\n\n # Version must be the requested version:\n self.assertEqual(self.mostRecentSchemaVersion, responseSchema['version'])\n\n # Must be man keys:\n self.assertGreater(len(responseSchema), 3)\n\n def test_endpoint_template_schema_wrong_submission_type(self):\n wrongSubmissionType = 'cicaful'\n\n # Submit query:\n response = self.tester.get('/v1/template-schema/{}/{}'.format(self.mostRecentSchemaVersion, wrongSubmissionType),\n content_type='html/json')\n self.assertEqual(response.status_code, 200)\n\n # Error must be in the key:\n self.assertIn('error', response.get_json())\n\n def test_endpoint_template_schema_wrong_version(self):\n # Get a wrong schema version:\n wrongSchema = 'cicaful'\n\n # Submit quiery:\n response = self.tester.get('/v1/template-schema/{}'.format(wrongSchema), content_type='html/json')\n self.assertEqual(response.status_code, 200)\n\n schema = response.get_json()\n\n # Do we have errors:\n self.assertIn('error', schema)\n\n # Is the error message as expected:\n self.assertEqual('Unknown schema versions', schema['error'])\n\n # Do we see the supported schema:\n self.assertIn(self.mostRecentSchemaVersion, schema['supported_versions'])\n\n def test_templates(self):\n # Submit quiery:\n response = self.tester.post('/v1/templates', content_type='html/json')\n self.assertEqual(response.status_code, 200)\n\n # Test if the returned value is a blob:\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"EBISPOT/gwas-template-services","sub_path":"tests/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"}
+{"seq_id":"42627309794","text":"import re\nfrom bs4 import BeautifulSoup\n\n\n\ndef color_map(values):\n\n states = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AS': 'American Samoa',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'GU': 'Guam',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MP': 'Northern Mariana Islands',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NA': 'National',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'PR': 'Puerto Rico',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VI': 'Virgin Islands',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n }\n\n states = states.keys() # because laziness\n\n #SVG map\n svg = open('USA.svg', 'r').read()\n soup = BeautifulSoup(svg)#, selfClosingTags=['defs', 'sodipodi:namedview'])\n\n #counties\n paths = soup.find_all('path')\n\n colors = [\"#FFFFCC\", \"#FFEDA0\", \"#FED976\", \"#FEB24C\", \"#FD8D3C\", \"#FC4E2A\", \"#E31A1C\", \"#B10026\"]\n\n\n # Change colors, replacing whole style instead of parsing for fill\n path_style = 'font-size:12px;fill-rule:nonzero;stroke:#000000;stroke-opacity:1;stroke-width:0.1;stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel;fill:'\n\n\n\n for p in paths:\n\n if p['id'] not in [\"State_Lines\", \"separator\"]:\n\n try:\n rate = values[p['inkscape:label'][-2:]]\n except:\n continue\n\n if rate > 7:\n color_class = 7\n elif rate > 6:\n color_class = 6\n elif rate > 5:\n color_class = 5\n elif rate > 4:\n color_class = 4\n elif rate > 3:\n color_class = 3\n elif rate > 2:\n color_class = 2\n elif rate > 1:\n color_class = 1\n else:\n color_class = 0\n\n\n color = colors[color_class]\n p['style'] = path_style + color\n\n return soup.prettify()\n","repo_name":"IHautaI/consumer-complaints","sub_path":"color_map.py","file_name":"color_map.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"21969118675","text":"d={\n \"P\":\"Pediatric\",\n \"O\":\"Orthopedics\",\n \"E\":\"ENT\"\n}\n\nl=input().split()\nd2={}\nfor i in range(1,len(l),2):\n if(l[i] not in d2.keys()):\n d2[l[i]]=1\n else:\n d2[l[i]]+=1\nprint(d2)\nfor i in d2.keys():\n if(d2[i]==max(d2.values())):\n print(d[i])","repo_name":"subhasish7077/Super_Coder_GIETU","sub_path":"Week 1(Python_and_Oops)/Practice Questions/Q19_highest_patient.py","file_name":"Q19_highest_patient.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34878339776","text":"#!/usr/bin/env python3\n\n# This file is part of MGnify genome analysis pipeline.\n#\n# MGnify genome analysis pipeline is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# MGnify genome analysis pipeline is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with MGnify genome analysis pipeline. If not, see .\n\n\nimport os\nimport sys\nimport argparse\nfrom shutil import copy\n\n\ndef get_scores(sdb):\n scores = {}\n with open(sdb, \"r\") as file_in:\n next(file_in)\n for line in file_in:\n values = line.strip().split(\",\")\n scores.setdefault(values[0], values[1])\n return scores\n\n\ndef getClusters(clst_file):\n clusters = {}\n with open(clst_file, \"r\") as f:\n next(f)\n for line in f:\n args = line.rstrip().split(\",\")\n clusters.setdefault(args[1], []).append(args[0])\n return clusters\n\n\ndef parse_mashfile(mash_dist, outname, genlist):\n header = \"genome1,genome2,dist,similarity\"\n with open(mash_dist, \"r\") as f, open(outname, \"w\") as fout:\n fout.write(\"%s\\n\" % (header))\n next(f)\n for line in f:\n line = line.rstrip()\n cols = line.split(\",\")\n if cols[0] in genlist and cols[1] in genlist:\n fout.write(\"%s\\n\" % (line))\n\n\ndef splitMash(mash_dist, genlist, outdir, cluster_name):\n outname = \"%s/%s/%s_mash.tsv\" % (outdir, cluster_name, cluster_name)\n print(outname)\n if not os.path.isdir(os.path.join(outdir, cluster_name)):\n os.makedirs(os.path.join(outdir, cluster_name))\n parse_mashfile(mash_dist, outname, genlist)\n\n\ndef generate_mash_folder(mash_dist, out_mash_folder, cluster_name, genlist):\n outname = os.path.join(out_mash_folder, cluster_name + \"_mash.tsv\")\n parse_mashfile(mash_dist, outname, genlist)\n\n\ndef create_cluster_folders(out_folder, cluster, genomes, fasta_folder):\n cluster_output = \"%s/%s/%s\" % (out_folder, \"clusters\", cluster)\n if not os.path.isdir(cluster_output):\n os.makedirs(cluster_output)\n for genome in genomes:\n src = \"%s/%s\" % (os.path.abspath(fasta_folder), genome)\n dst = \"%s/%s\" % (cluster_output, genome)\n copy(src, dst)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Split dRep results by species cluster\"\n )\n parser.add_argument(\"-f\", dest=\"fasta_folder\", help=\"FASTA folder\", required=False)\n parser.add_argument(\n \"-o\", dest=\"output_folder\", help=\"Output folder [REQUIRED]\", required=True\n )\n parser.add_argument(\n \"--cdb\",\n dest=\"cdb\",\n help=\"dRep output folder/data_tables/Cdb.csv\",\n required=True,\n )\n parser.add_argument(\n \"--mdb\",\n dest=\"mdb\",\n help=\"dRep output folder/data_tables/Mdb.csv\",\n required=False,\n )\n parser.add_argument(\"--sdb\", dest=\"sdb\", help=\"dRep Sdb.csv\", required=True)\n parser.add_argument(\n \"--create-clusters\",\n action=\"store_true\",\n help=(\n \"Set this flag to generate folders with genomes and mash-files inside for\"\n \" each cluster\"\n ),\n )\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n else:\n args = parser.parse_args()\n\n # get scores for genomes\n scores = get_scores(sdb=args.sdb)\n\n names = {True: \"one_genome\", False: \"many_genomes\"}\n if args.create_clusters and not args.fasta_folder:\n print(\"--create-clusters option requires -f argument presented\")\n exit(1)\n clusters = getClusters(clst_file=args.cdb)\n\n if not os.path.isdir(args.output_folder):\n os.makedirs(args.output_folder)\n\n with open(\n os.path.join(args.output_folder, \"clusters_split.txt\"), \"w\"\n ) as split_file:\n if args.create_clusters and args.fasta_folder:\n for c in clusters:\n genomes = clusters[c]\n create_cluster_folders(\n out_folder=args.output_folder,\n cluster=c,\n genomes=genomes,\n fasta_folder=args.fasta_folder,\n )\n\n genome_scores = [float(scores[genome]) for genome in genomes]\n sorted_genomes = [\n x\n for _, x in sorted(\n zip(genome_scores, genomes),\n reverse=True,\n key=lambda pair: pair[0],\n )\n ]\n split_file.write(\n names[len(genomes) == 1]\n + \":\"\n + c\n + \":\"\n + \",\".join(sorted_genomes)\n + \"\\n\"\n )\n main_rep_name = sorted_genomes[0].split(\".\")[0]\n if args.mdb:\n splitMash(\n mash_dist=args.mdb,\n genlist=genomes,\n outdir=args.output_folder,\n cluster_name=main_rep_name,\n )\n else:\n if args.mdb:\n out_mash_folder = os.path.join(args.output_folder, \"mash_folder\")\n if not os.path.exists(out_mash_folder):\n os.makedirs(out_mash_folder)\n for c in clusters:\n genomes = clusters[c]\n genome_scores = [float(scores[genome]) for genome in genomes]\n sorted_genomes = [\n x\n for _, x in sorted(\n zip(genome_scores, genomes),\n reverse=True,\n key=lambda pair: pair[0],\n )\n ]\n split_file.write(\n names[len(genomes) == 1]\n + \":\"\n + c\n + \":\"\n + \",\".join(sorted_genomes)\n + \"\\n\"\n )\n if args.mdb:\n main_rep_name = sorted_genomes[0].split(\".\")[0]\n if len(genomes) > 1:\n generate_mash_folder(\n mash_dist=args.mdb,\n out_mash_folder=out_mash_folder,\n cluster_name=main_rep_name,\n genlist=genomes,\n )\n","repo_name":"EBI-Metagenomics/genomes-pipeline","sub_path":"bin/split_drep.py","file_name":"split_drep.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"61"}
+{"seq_id":"34066645857","text":"\"\"\"\nName: Wei Siyuan\nDate: 2017-01-29\nBrief Project Description: a reading list to help users to keep tracking of reading history\nGitHub URL: https://github.com/ParisWei/a2-starter-master.git\n\"\"\"\nimport kivy\nkivy.require('1.8.0')\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.button import Button\nfrom kivy.properties import StringProperty\nfrom booklist import BookList\n\n# Create your main program in this file, using the ReadingListApp class\n\n\nclass ReadingListApp(App):\n \"\"\"\n Main app to show readling list\n \"\"\"\n LONG_BOOK_COLOR = [0.2,0.7,0.9,1]\n SHORT_BOOK_COLOR = [0.9,0.7,0.2,1]\n def __init__(self, **kwargs):\n \"\"\"\n Construct main app\n \"\"\"\n super().__init__(**kwargs)\n self.list = BookList()\n self.list.loadBook()\n\n\n def build(self):\n \"\"\"\n Build the Kivy GUI\n :return: reference to the root Kivy widget\n \"\"\"\n self.title = \"Reading List 2.0\"\n self.root = Builder.load_file('app.kv')\n self.create_book_buttons('r')\n return self.root\n\n def create_book_buttons(self, s):\n \"\"\"\n Refresh the list of book on right side\n :param s: the book status, can be 'r' or 'c'\n :return: NA\n \"\"\"\n self.root.ids.listview.clear_widgets()\n total = 0\n for book in self.list.getBooks():\n if book.getStatus() == s:\n total += int(book.getPage())\n if s == 'r' and book.isLong():\n temp_button = Button(text=book.getTitle(), background_color=self.LONG_BOOK_COLOR)\n elif s == 'r':\n temp_button = Button(text=book.getTitle(), background_color=self.SHORT_BOOK_COLOR)\n else:\n temp_button = Button(text=book.getTitle())\n temp_button.bind(on_release=self.press_entry)\n self.root.ids.listview.add_widget(temp_button)\n header = \"Total pages \"\n if (s == 'c'):\n header += 'completed'\n self.root.ids.footer_status.text = \"Click one book to see details\"\n else:\n header += 'to read'\n self.root.ids.footer_status.text = \"Click books to mark them as completed\"\n header += ': ' + str(total)\n self.root.ids.header_status.text = header\n\n\n def press_entry(self, instance):\n \"\"\"\n react to click on one of book in list, either complete the book or show detail information\n :param instance: the button being pressed\n :return: NA\n \"\"\"\n name = instance.text\n book = self.list.getBookByTitle(name)\n if(book.getStatus() == 'r'):\n book.markComplete()\n self.press_clear()\n self.create_book_buttons('r')\n else:\n self.root.ids.footer_status.text = book.__str__()\n instance.state = 'down'\n\n def press_clear(self):\n \"\"\"\n Clear three input fields on left side\n :return: NA\n \"\"\"\n self.root.ids.title.text = ''\n self.root.ids.author.text = ''\n self.root.ids.page.text = ''\n\n\n\n def press_add(self):\n \"\"\"\n Add a required book based on the information filled\n :return: NA\n \"\"\"\n if (self.getInputString(self.root.ids.title.text)\n and self.getInputString(self.root.ids.author.text)\n and self.getInputInt(self.root.ids.page.text)):\n self.list.addBook(self.root.ids.title.text, self.root.ids.author.text, self.root.ids.page.text)\n self.press_clear()\n self.create_book_buttons('r')\n\n def on_stop(self):\n \"\"\"\n save the book list to csv on exit\n :return: NA\n \"\"\"\n self.list.saveBook()\n\n # validate string input\n def getInputString(self, inputStr):\n \"\"\"\n valid whether a string input is empty\n :param inputStr: the string being examined\n :return:\n \"\"\"\n if (len(inputStr) == 0):\n self.root.ids.footer_status.text = (\"All fields must be completed\")\n return False\n else:\n return True\n\n # validate integer input\n def getInputInt(self, intInput):\n \"\"\"\n check whether input is a valid integer\n :param intInput: the string being examined\n :return:\n \"\"\"\n emptyCheck = self.getInputString(intInput)\n if emptyCheck == False:\n return False\n try:\n num = int(intInput)\n except ValueError:\n self.root.ids.footer_status.text = (\"Please enter a valid number\")\n return False\n else:\n if num >= 0:\n return True\n else:\n self.root.ids.footer_status.text = (\"Number must be >= 0\")\n return False\n\nReadingListApp().run()\n\n\n\n","repo_name":"weisiyuan/a2-starter-master","sub_path":"a2-starter-master/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23462345741","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"See http://martin-thoma.com for more mini code samples.\"\"\"\n\n\ndef quadopt(a, b):\n if a == '1':\n return b\n elif b == '1':\n return a\n\n if a == 'i':\n if b == 'i':\n return '-1'\n elif b == 'j':\n return 'k'\n elif b == 'k':\n return '-j'\n elif a == 'j':\n if b == 'i':\n return '-k'\n elif b == 'j':\n return '-1'\n elif b == 'k':\n return 'i'\n elif a == 'k':\n if b == 'i':\n return 'j'\n elif b == 'j':\n return '-i'\n elif b == 'k':\n return '-1'\n\n\ndef solve(chars, X):\n curr_char = '1'\n is_neg = False\n got_i = False\n got_j = False\n got_k = False\n for char in chars*X:\n next_char = quadopt(curr_char, char)\n if next_char.startswith('-'):\n is_neg = not is_neg\n next_char = next_char[1:]\n if not is_neg and next_char == 'i' and not got_i:\n got_i = True\n curr_char = '1'\n elif not is_neg and next_char == 'j' and got_i and not got_j:\n got_j = True\n curr_char = '1'\n elif not is_neg and next_char == 'k' and got_i and got_j and not got_k:\n got_k = True\n curr_char = '1'\n else:\n curr_char = next_char\n #print([got_i, got_j, got_k, is_neg, curr_char])\n if got_i and got_j and got_k and curr_char == '1' and (not is_neg):\n return \"YES\"\n else:\n return \"NO\"\n\n\nif __name__ == \"__main__\":\n testcases = int(raw_input())\n\n for caseNr in range(1, testcases+1):\n L, X = [int(el) for el in raw_input().split(\" \")]\n chars = raw_input()\n print(\"Case #%i: %s\" % (caseNr, solve(chars, X)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_157/608.py","file_name":"608.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23398261871","text":"import os\nimport time\nfrom functools import reduce\n\nt1 = time.time()\n\ndef isPalin(n):\n return str(n) == str(n)[::-1]\n\ndef updateInserts():\n global inserts\n global inserts_last\n global lenIns\n \n temp = inserts\n if lenIns == 1:\n inserts = ['00', '11', '22']\n else:\n inserts = []\n for i in inserts_last:\n inserts.append('0' + i + '0')\n inserts.append('1' + i + '1')\n if lenIns == 2:\n inserts_last = ['00','11']\n else:\n inserts_last = temp\n lenIns = len(inserts[0])\n\ninHandle = open('input.txt','r')\noutHandle = open('output.txt','w')\n\ninserts_last = ['0','1','2']\ninserts = ['0','1','2']\nlenIns = 1\ntenH = 10 ** 100\n\nsolutions = [1,4,9,121,484]\n\nnCases = int(inHandle.readline().replace('\\n',''))\ngo = True\nwhile(go):\n print(lenIns)\n base = '11'\n for i in inserts:\n if(i.find('111111111') == 0):\n continue\n n = '1' + i + '1'\n square = int(n)**2\n if(square <= tenH and isPalin(square)):\n solutions.append(square)\n elif(square > tenH):\n go = False\n break\n twos = []\n if(lenIns == 1):\n twos=['202','212']\n elif(lenIns % 2 == 0):\n twos.append('2' + reduce(lambda x,y:x+y, list('0' * lenIns)) + '2')\n else:\n half = int(lenIns/2)\n twos.append('2' + reduce(lambda x,y:x+y, list('0' * lenIns)) + '2')\n twos.append('2' + reduce(lambda x,y:x+y, list('0' * half)) + '1' + reduce(lambda x,y:x+y, list('0' * half)) + '2')\n for tt in twos:\n square = int(tt)**2\n if(square <= tenH and isPalin(square)):\n solutions.append(square)\n elif(square > tenH):\n go = False\n break\n updateInserts()\n\nsorted_solutions = sorted(set(solutions))\n\nfor case in range(nCases):\n givenRange = inHandle.readline().split(' ')\n low = int(givenRange[0])\n high = int(givenRange[1])\n answer = 0\n for i in sorted_solutions:\n if i < low:\n continue\n if i > high:\n break\n else:\n answer = answer + 1\n\n outHandle.write('Case #' + str(case+1) + ': ' + str(answer) + '\\n')\n\ninHandle.close()\noutHandle.close()\n\nprint(\"finished in \", time.time() - t1, \" seconds\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/858.py","file_name":"858.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31765762301","text":"import turtle\r\n\r\ndef draw_square(t, sz):\r\n for i in range (4):\r\n t.forward(sz)\r\n t.left(90)\r\n\r\nwn = turtle.Screen()\r\nwn.bgcolor(\"lightgreen\")\r\nwn.title(\"Q1\")\r\nturtle = turtle.Turtle()\r\nturtle.color(\"magenta\")\r\nturtle.pensize(3)\r\nfor i in range(5):\r\n draw_square(turtle, 20*(i+1))\r\n turtle.penup()\r\n turtle.setpos(-10*(i+1),-10*(i+1))\r\n turtle.pendown()\r\nwn.mainloop()","repo_name":"FreitasPH/Lista1Python","sub_path":"Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33787283974","text":"from importlib import import_module\nimport sys\n\n__version__ = '0.1'\n\nclass Subcommand:\n def __init__(self, name, entry_point, description=None):\n self.name = name\n self.entry_point = entry_point\n self.description = description\n\nclass Commander:\n def __init__(self, description=None, subcmds=None, package=None):\n assert subcmds is not None\n #self.prog = prog\n self.description = description\n self.subcmds = subcmds\n self.package = package\n\n def __call__(self, argv=None):\n if argv is None:\n argv = sys.argv\n\n if len(argv) < 2:\n print('No subcommand specified')\n print('Available subcommands:', *(s.name for s in self.subcmds))\n return 2\n\n subcmd = argv[1]\n\n if subcmd in {'--help', '-h'}:\n print('Batis - install and distribute desktop applications')\n print('Subcommands:')\n for sc in self.subcmds:\n print(' {:<12} - {}'.format(sc.name, sc.description))\n return 0\n\n for sc in self.subcmds:\n if subcmd == sc.name:\n sub_main = self._load(sc.entry_point)\n return sub_main(argv[2:])\n\n print('Unknown subcommand: {!r}'.format(subcmd))\n print('Available subcommands:', *(s.name for s in self.subcmds))\n return 2\n\n def _load(self, entry_point):\n modname, func = entry_point.split(':')\n mod = import_module(modname, package=self.package)\n return getattr(mod, func)\n","repo_name":"takluyver/vclurk","sub_path":"vclurk/subcmd.py","file_name":"subcmd.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23640733091","text":"import time\r\n\r\n\r\ndef process(filein):\r\n\r\n f_in=open(filein+\".txt\").read().split('\\n')\r\n f_out=open(filein+\"_out.txt\",'w')\r\n\r\n n=int(f_in[0])\r\n\r\n for i in range(n):\r\n t=f_in[1+i]\r\n\r\n\r\n parts=map(int,t.split(' '))\r\n ans=solve3(parts[0],parts[1],parts[2],parts[3:])\r\n\r\n f_out.write(\"Case #%d: %s\\n\" % (1+i,ans))\r\n f_out.close()\r\n\r\n\r\ndef solve3(N,S,p,t):\r\n count={(True,True):0,\r\n (False,True):0,\r\n (True,False):0,\r\n (False,False):0}\r\n for i in range(N):\r\n u=unsurprising(t[i])\r\n s=surprising(t[i])\r\n nu=(u!=[] and max(max(u))>=p)\r\n ns=(s!=[] and max(max(s))>=p)\r\n count[nu,ns]+=1\r\n \r\n score=count[(True,True)]+min(S,count[(False,True)])+count[(True,False)]\r\n return score\r\n\r\n \r\ndef allways(score):\r\n\r\n def ways(score,n):\r\n if n==1:\r\n if 0<=score<=10:\r\n return [[score]]\r\n else:\r\n return []\r\n else:\r\n return [[i]+w for i in range(0,11)\r\n for w in ways(score-i,n-1)\r\n if i<=w[0]]\r\n\r\n return ways(score,3)\r\n\r\ndef unsurprising(score):\r\n return [(a,b,c) for (a,b,c) in allways(score)\r\n if c-a<=1]\r\n\r\ndef surprising(score):\r\n return [(a,b,c) for (a,b,c) in allways(score)\r\n if c-a==2]\r\n\r\nprocess(\"4large\")\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/501.py","file_name":"501.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12515495657","text":"def solve():\n from collections import namedtuple\n Pair = namedtuple('Pair', ['first', 'second'])\n\n T = int(input())\n for t in range(1, T + 1):\n N = int(input())\n ticket_list = [Pair(input(), input()) for _ in range(N)]\n for ticket in ticket_list:\n print(ticket.first, ticket.second)\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"satojkovic/algorithms","sub_path":"kickstart/2014/RoundD/test_namedtuple.py","file_name":"test_namedtuple.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"10179518355","text":"import logging\n\nfrom django import forms\nfrom django.contrib.auth.models import User\n\nfrom shop.models import Order, ProductOrder, Product\nfrom common.models import Customer, DeliveryAddress\nfrom common.utils import create_temporary_user\nfrom common.forms import PhoneValidator\nfrom shop.utils import convert_price, get_delivery_price\n\nlogger = logging.getLogger(__name__)\n\n\nclass CreateOrderForm(forms.ModelForm, PhoneValidator):\n address = forms.CharField(required=True)\n house = forms.CharField(required=True)\n appartaments = forms.CharField(required=True)\n phone = forms.CharField(required=True)\n email = forms.EmailField(required=True)\n name = forms.CharField(required=True, max_length=150)\n\n def clean(self):\n self.errors.pop('customer', None)\n self.errors.pop('total_price', None)\n self.errors.pop('customer_data', None)\n self.errors.pop('products_data', None)\n self.errors.pop('delivery_data', None)\n self.errors.pop('products_price', None)\n self.errors.pop('delivery_address', None)\n self.errors.pop('currency', None)\n self.clean_customer()\n self.clean_products()\n self.clean_delivery_address()\n # customer\n # products\n return self.cleaned_data\n\n def clean_customer(self):\n customer = None\n user = self.data.get('user')\n email = self.cleaned_data.get('email', '')\n name = self.cleaned_data.get('name')\n currency = self.cleaned_data.get('currency')\n phone = self.cleaned_data.get('phone')\n if not currency:\n return\n if not user.is_authenticated:\n if email:\n user = User.objects.filter(email=email).first()\n if not user:\n user = create_temporary_user(email)\n if user.last_name is not name:\n user.last_name = name\n user.save()\n customer, cr = Customer.objects.get_or_create(user=user)\n ch = False\n if customer.currency_id is not currency.id:\n ch = True\n customer.currency = currency\n if phone and not customer.phone == phone:\n ch = True\n customer.phone = phone\n if ch:\n customer.save()\n self.data['user_created'] = cr\n self.data['user'] = user\n self.cleaned_data['customer'] = customer\n self.cleaned_data['customer_data'] = {\n 'email': email,\n 'name': name,\n 'phone': phone\n }\n return customer\n\n def clean_email(self):\n user = self.data.get('user')\n email = self.cleaned_data.get('email', '')\n if user and user.is_authenticated and not user.email == email:\n email_user = User.objects.filter(email=email).first()\n if email_user:\n raise forms.ValidationError('Email already in use.')\n else:\n user.email = email\n user.save()\n return email\n\n def clean_products(self):\n self.errors.pop('products', None)\n products = self.data.get('products')\n currency = self.cleaned_data.get('currency')\n orders = []\n products_price = 0\n products_data = []\n if currency:\n for product_id, quantity in products:\n product = Product.objects.get(id=product_id)\n products_price += convert_price(product.price, currency, product.currency) * int(quantity)\n order = ProductOrder.objects.create(product=product, quantity=quantity)\n orders.append(order)\n products_data.append(order.serialized)\n self.cleaned_data['products_data'] = products_data\n self.cleaned_data['products_price'] = products_price\n self.cleaned_data['total_price'] = products_price + get_delivery_price(products_price, currency)\n self.cleaned_data['products'] = orders\n return products\n\n def clean_delivery_address(self):\n customer = self.cleaned_data.get('customer')\n if not customer:\n return\n address = {\n 'address': self.cleaned_data['address'],\n 'house': self.cleaned_data['house'],\n 'appartaments': self.cleaned_data['appartaments']\n }\n if not hasattr(customer, 'delivery_address'):\n DeliveryAddress.objects.create(customer=customer, **address)\n else:\n DeliveryAddress.objects.filter(customer_id=customer.id).update(**address)\n self.cleaned_data['delivery_data'] = address\n self.cleaned_data['delivery_address'] = customer.delivery_address\n return customer.delivery_address\n\n def save(self):\n res = super().save()\n products = self.cleaned_data.get('products')\n for product in products:\n product.order = res\n product.save()\n return res\n\n class Meta:\n model = Order\n fields = (\n 'customer', 'customer_data', 'products_data', 'details', 'currency', 'products_price',\n 'total_price', 'delivery_address', 'delivery_data', 'payment_method'\n )\n","repo_name":"milashensky/pizzer","sub_path":"pizzer/shop/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35623485977","text":"import sys\nfrom PySide6.QtWidgets import QApplication, QMainWindow\nfrom PySide6.QtWidgets import QMainWindow, QApplication, QPushButton, QFileDialog, QInputDialog, QColorDialog\n\nfrom PySide6.QtCore import QObject, Qt\n\nfrom PySide6 import QtGui\nfrom PySide6.QtGui import QPixmap\n\nfrom DemoUI import Ui_MainWindow as UIWindow\n\nimport requests\n\nimport os, cv2, Camera\n\nimport quickocr as OCR\n\napp = QApplication(sys.argv)\n\nwindow = QMainWindow()\n\nui = UIWindow()\nui.setupUi(window)\n\nwindow.show()\n\nsrcIndex = 0\ncustom_oem_psm_config = r'--oem %d --psm %d'\nprev_file = \"\"\ncam = None\n\ncolor = (255, 0, 0)\n\nocr_dict = None\n\ndef cameraCallback(frame):\n is_success, im_buf_arr = cv2.imencode(\".jpg\", frame)\n RGBarray = im_buf_arr.tobytes()\n\n pixmap = QPixmap()\n\n pixmap.loadFromData(RGBarray)\n\n ui.imgFrame.setPixmap(pixmap)\n\ndef on_combobox_changed(value):\n global srcIndex, cam\n\n srcIndex = value\n \n print(\"combobox changed\", value)\n\n try:\n cam.release()\n cam = None\n except:\n ui.btnReset.setEnabled(False)\n\n if value == 0:\n ui.btnAction.setText(\"Upload\")\n \n if value == 1:\n ui.btnAction.setText(\"Get text\")\n \n if value == 2:\n ui.btnAction.setText(\"Capture\")\n cam = Camera.VideoCapture(0, cameraCallback)\n ui.btnReset.setEnabled(True)\n cam.start()\n\ndef computeFromFile(fname=None):\n global prev_file, ocr_dict\n \n if fname is None:\n fname = prev_file\n\n if len(fname) <= 3:\n return\n\n prev_file = fname\n\n config_ = custom_oem_psm_config % (ui.spnOem.value(), ui.spnPsm.value())\n \n RGBarray, width, height, boxes = OCR.recognize(fname, config_, color)\n\n ocr_dict = boxes\n\n image = QtGui.QImage(RGBarray, width, height, 3*width, QtGui.QImage.Format_RGB888)\n pixmap = QPixmap()\n\n pixmap.loadFromData(RGBarray)\n\n ui.imgFrame.setPixmap(pixmap)\n\ndef open_dialog():\n fname = QFileDialog.getOpenFileName(\n window,\n \"Open File\",\n \"${HOME}\",\n )\n\n computeFromFile(fname[0])\n\ndef performAction():\n if srcIndex == 0:\n open_dialog()\n \n if srcIndex == 1:\n text, ok = QInputDialog.getText(window, 'Image from URL', 'URL:')\n\n if ok and len(text) > 5:\n loc = \"captured.\" + text.split(\".\")[-1]\n \n r = requests.get(text)\n with open(loc, 'wb') as outfile:\n outfile.write(r.content)\n\n computeFromFile(loc)\n \n if srcIndex == 2:\n frame = cam.read()\n cv2.imwrite(\"captured.png\", frame)\n computeFromFile(\"captured.png\")\n\n cam.pause()\n\ndef performReset():\n if cam is not None:\n cam.start()\n\ndef changeConfig():\n computeFromFile()\n\ndef changeFontColor():\n global color\n color = QColorDialog.getColor().getRgb()[0:3]\n print(color)\n\n color = (color[2], color[1], color[0])\n \n computeFromFile()\n\ndef showText(mx, my):\n global ocr_dict\n d = ocr_dict\n \n lines = {}\n\n n_boxes = len(d['level'])\n for i in range(n_boxes):\n text = d['text'][i]\n if len(text) > 0:\n\n (x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])\n\n if (x < mx) and (mx < x + w):\n if (y < my) and (my < y + h):\n print(text)\n break\n\nui.btnAction.clicked.connect(performAction)\nui.btnReset.clicked.connect(performReset)\n\nui.imgFrame.clicked.connect(showText)\n\nui.spnOem.valueChanged.connect(changeConfig)\nui.spnPsm.valueChanged.connect(changeConfig)\n\nui.cmboxSrc.currentIndexChanged.connect(on_combobox_changed)\n\nui.btnColor.clicked.connect(changeFontColor)\n\n# Start the event loop.\napp.exec_()\n","repo_name":"KodinGuy08/Tesseract-Demo","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26168735785","text":"\"\"\"\nAPI for a classification task, written using FastAPI.\n\"\"\"\nimport numpy as np\nfrom fastapi import FastAPI, Request\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.templating import Jinja2Templates\nfrom joblib import load\nfrom pydantic import BaseModel # pylint: disable=no-name-in-module\n\nSCALER_FILE = './models/scaler.pkl'\nMODEL_FILE = './models/model.pkl'\n\n# load the scaler\nwith open(SCALER_FILE, 'rb') as scaler_file:\n standard_scaler = load(scaler_file)\n\n# load the model\nwith open(MODEL_FILE, 'rb') as classifier_file:\n decision_tree_classifier = load(classifier_file)\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"templates\")\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def home(request: Request):\n \"\"\"\n GET function on the root folder.\n \"\"\"\n data = {\n \"page\": \"Home Page\"\n }\n return templates.TemplateResponse(\"page.html\", {\"request\": request, \"data\": data})\n\nclass Point(BaseModel): # pylint: disable=too-few-public-methods\n \"\"\"\n The input to the model, it is a point with x and y coordinates.\n \"\"\"\n x : float\n y : float\n\n\n@app.post(\"/predict\")\nasync def predict(point: Point):\n \"\"\"\n Predict function that defines the end point for the forecastor.\n Input: X and Y.\n Output: the class as a ressult of the classification model.\n \"\"\"\n input_array = np.array([[point.x], [point.y]]).reshape(1,2)\n scaled_input = standard_scaler.transform(input_array)\n model_output = decision_tree_classifier.predict(scaled_input.reshape(1,2))\n output_ = model_output[0].tolist()\n return {\n \"response\" : output_\n }\n","repo_name":"Faiga91/api-for-classification-task","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29712178762","text":"# I want to make a word counter. it needs to get the number of words, lines and characters in a file.\r\n# First I want to define some parameters: I want the program to tell me the number of words and the number of lines.\r\n\r\n#Function for word count\r\ndef count_words(poem):\r\n words = poem.split(' ') # This splits the text by spaces, thereby counting the number of words in the next\r\n num_words= len(words)\r\n return num_words\r\n\r\n#Function for line count\r\ndef count_lines(poem):\r\n lines = poem.split('\\n') # This splits the text by new lines, thereby counting the number of lines in the text\r\n for l in lines: \r\n if not l: # If the line is empty, we remove it from the list using the remove() command.\r\n lines.remove(l)\r\n \r\n return len(lines)\r\n\r\n\r\nf = open('automate.txt', 'r') # Opens the file 'automate.txt'\r\npoem = f.read()\r\nf.close()\r\n\r\nnum_words = count_words(poem)\r\nnum_lines = count_lines(poem)\r\n\r\nprint(\"The number of words: \", num_words)\r\nprint(\"The number of lines: \", num_lines)","repo_name":"Kody-Hedder/Word-counter","sub_path":"word counter_v2.py","file_name":"word counter_v2.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20549290942","text":"from invoke import task\nfrom pathlib import Path\n\nbasepath = \".\"\nopen_cmd = \"open\"\noverleaf = \"/home/anonymizeduser/Dropbox\\ \\(MLS\\)/Apps/Overleaf/gbi\"\n\nfig_names = {\n \"1\": \"paper/fig1\",\n \"2\": \"paper/fig2\",\n \"3\": \"paper/fig3\",\n \"4\": \"paper/fig4\",\n \"5\": \"paper/fig5\",\n \"6\": \"paper/fig6\",\n}\n\n\n@task\ndef syncOverleaf(c, fig):\n c.run(\n \"cp ./{fn}/fig/*.pdf {ol}/figures/ \".format(\n bp=basepath, fn=fig_names[fig], ol=overleaf\n )\n )\n c.run(\n \"cp ./{fn}/fig/*.png {ol}/figures/ \".format(\n bp=basepath, fn=fig_names[fig], ol=overleaf\n )\n )\n\n\n@task\ndef convert(c, fig):\n _convertsvg2pdf(c, fig)\n _convertpdf2png(c, fig)\n\n\n@task\ndef _convertsvg2pdf(c, fig):\n if fig is None:\n for f in range(len(fig_names)):\n _convert_svg2pdf(c, str(f + 1))\n return\n pathlist = Path(\"{bp}/{fn}/fig/\".format(bp=basepath, fn=fig_names[fig])).glob(\n \"*.svg\"\n )\n for path in pathlist:\n c.run(\"inkscape {} --export-pdf={}.pdf\".format(str(path), str(path)[:-4]))\n\n\n@task\ndef _convertpdf2png(c, fig):\n if fig is None:\n for f in range(len(fig_names)):\n _convert_pdf2png(c, str(f + 1))\n return\n pathlist = Path(\"{bp}/{fn}/fig/\".format(bp=basepath, fn=fig_names[fig])).glob(\n \"*.pdf\"\n )\n for path in pathlist:\n c.run(\n 'inkscape {} --export-png={}.png -b \"white\" --export-dpi=250'.format(\n str(path), str(path)[:-4]\n )\n )\n","repo_name":"amortizedgbi/amortizedgbi","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"4037705040","text":"import hail as hl\nfrom hail_scripts.Filter import *\n\ndef add_variant_type(alt_alleles: hl.expr.ArrayExpression) -> hl.expr.StructExpression:\n \"\"\" \n Get Struct of variant_type and n_alt_alleles from ArrayExpression of Strings (all alleles) \n \"\"\"\n ref = alt_alleles[0]\n alts = alt_alleles[1:]\n non_star_alleles = hl.filter(lambda a: a != '*', alts)\n return hl.struct(variant_type=hl.cond(\n hl.all(lambda a: hl.is_snp(ref, a), non_star_alleles),\n hl.cond(hl.len(non_star_alleles) > 1, \"multi-snv\", \"snv\"),\n hl.cond(\n hl.all(lambda a: hl.is_indel(ref, a), non_star_alleles),\n hl.cond(hl.len(non_star_alleles) > 1, \"multi-indel\", \"indel\"),\n \"mixed\")\n ), n_alt_alleles=hl.len(non_star_alleles))\n\ndef generate_split_alleles(mt: hl.MatrixTable) -> hl.Table:\n\n allele_data = hl.struct(nonsplit_alleles=mt.alleles,\n has_star=hl.any(lambda a: a == '*', mt.alleles))\n\n mt = mt.annotate_rows(allele_data=allele_data.annotate(**add_variant_type(mt.alleles)))\n mt = hl.split_multi_hts(mt,left_aligned=True)\n\n allele_type = (hl.case()\n .when(hl.is_snp(mt.alleles[0], mt.alleles[1]), 'snv')\n .when(hl.is_insertion(mt.alleles[0], mt.alleles[1]), 'ins')\n .when(hl.is_deletion(mt.alleles[0], mt.alleles[1]), 'del')\n .default('complex')\n )\n mt = mt.annotate_rows(allele_data=mt.allele_data.annotate(allele_type=allele_type,\n was_mixed=mt.allele_data.variant_type == 'mixed'))\n return mt\n\n# Read in the matrix table\nmt = hl.read_matrix_table('aatd.mt')\n\n# Left normalize and split alleles\nsplit = generate_split_alleles(mt)\nmts = hl.variant_qc(split)\n\n\n# Hard-filtering germline short variants\nmts = mts.filter_rows(mts.info.QD >= 2)\nmts = mts.filter_rows(mts.info.FS <= 60)\nmts = mts.filter_rows(mts.info.SOR <= 3)\nmts = mts.filter_rows(mts.info.MQ >= 40)\nmts = mts.filter_rows(mts.info.MQRankSum >= -12.5)\nmts = mts.filter_rows(mts.info.ReadPosRankSum >= -8)\n\nmts_mq_40 = mts.filter_rows(mts.info.MQ >= 40)\nmts_mq_60 = mts.filter_rows(mts.info.MQ >= 60)\n\n\n# GT Filtering\n# 0/0\nmts_gt = mts.filter_rows(mts.variant_qc.AC[0] == 6)\n\n# 1/1\n# mts.filter_rows((mts.variant_qc.AC[0] == 0) & (mts.variant_qc.AC[1] == 6))\n\n# 0/1 or 1/0\n# mts.filter_rows((mts.variant_qc.AC[0] == 3) & (mts.variant_qc.AC[1] == 3))\n","repo_name":"jinlab-washu/Jin-lab.manual","sub_path":"hail/examples/allele_splitting_and_filtering/allele_splitting_and_filtering.py","file_name":"allele_splitting_and_filtering.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"5416053035","text":"from copy import copy\nimport pathlib\nimport sys\nimport time\nimport unittest\nfrom unittest.mock import Mock, patch, ANY\n\nBASE_DIR = pathlib.Path(__file__).resolve().parent.parent.parent\nif str(BASE_DIR) not in sys.path:\n sys.path.append(str(BASE_DIR))\n\nfrom kaban.receiver import ReceiverThread\nfrom kaban.database import WebhookDB\nfrom kaban.settings import EXIT_EVENT, NEW_MESSAGES_EVENT, USERS, MASTER_UID\n\nfrom tests.fixtures.fixtures import MockDB, reset_mock, TG_REQUEST\n\n\n@patch('kaban.receiver.exit_signal')\n@patch('kaban.receiver.SQLSession')\n@patch('kaban.receiver.send_message')\nclass SetReceiver(MockDB):\n def test_normal_case(self, mock_sender, mock_session, foo):\n tg_request = copy(TG_REQUEST)\n with self.SQLSession() as session:\n session.add(WebhookDB(data=tg_request))\n session.add(WebhookDB(data=tg_request))\n session.commit()\n USERS.setdefault(MASTER_UID, {'AWAITING_FEED': True, 'POTENTIAL_FEED': None})\n\n mock_session.return_value = self.SQLSession()\n mock_bot = Mock()\n recv = ReceiverThread(mock_bot)\n\n recv.new_messages = Mock()\n recv.new_messages.wait.return_value = True\n recv.new_messages.clear.side_effect = EXIT_EVENT.set\n\n recv.start()\n if EXIT_EVENT.wait(10):\n time.sleep(0.1)\n recv.stop()\n\n self.assertEqual(mock_bot.process_new_updates.call_count, 2)\n mock_sender.assert_called_once()\n mock_sender.assert_called_with(mock_bot, MASTER_UID, ANY)\n\n reset_mock(mock_sender, mock_session, foo)\n\n def test_exception(self, *args):\n recv = ReceiverThread(Mock())\n recv.new_messages = Mock()\n recv.new_messages.wait.side_effect = Exception\n recv.exit = Mock()\n recv.start()\n time.sleep(0.1)\n with self.assertRaises(Exception):\n recv.stop()\n\n reset_mock(*args)\n\n def tearDown(self):\n if EXIT_EVENT.is_set():\n EXIT_EVENT.clear()\n if NEW_MESSAGES_EVENT.is_set():\n NEW_MESSAGES_EVENT.clear()\n if USERS.get(MASTER_UID):\n USERS.pop(MASTER_UID)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"voronokKita/kaban-chan","sub_path":"tests/units/test_receiver.py","file_name":"test_receiver.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23567898651","text":"def put(n, k):\r\n\r\n if n == k:\r\n return (0, 0)\r\n\r\n # n stalls, k people\r\n options = {\r\n n: 1\r\n }\r\n\r\n best = 0\r\n while k > 0:\r\n best = max(options.keys())\r\n if options[best] > 1:\r\n options[best] -= 1\r\n else:\r\n del options[best]\r\n\r\n if best % 2 == 1:\r\n last = best // 2\r\n if last in options:\r\n options[last] += 2\r\n else:\r\n options[last] = 2\r\n else:\r\n last = best // 2\r\n if last in options:\r\n options[last] += 1\r\n else:\r\n options[last] = 1\r\n\r\n if (last - 1) in options:\r\n options[last - 1] += 1\r\n else:\r\n options[last - 1] = 1\r\n\r\n k -= 1\r\n\r\n if best % 2 == 1:\r\n result = (best // 2, best // 2)\r\n else:\r\n result = (best // 2, best // 2 - 1)\r\n\r\n return max(result), min(result)\r\n\r\n\r\nt = int(input()) # read a line with a single integer\r\nfor i in range(1, t + 1):\r\n n, k = input().split(' ')\r\n print('Case #{}: {}'.format(i, ' '.join(map(str, put(int(n), int(k))))))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1623.py","file_name":"1623.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1205102698","text":"import torch\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.metrics import Loss\nfrom pprint import pprint\n\nfrom ckpt import *\nfrom metrics import Ngram, Ldiff_Square\nfrom utils import *\nfrom losses import get_loss\n\n### nlg ngram evaluation, loss engine\ndef get_eval(args, model, loss_fn):\n def infer(evaluator, batch):\n model.eval()\n args._training = model.training\n\n batch = prep_batch(args, batch)\n with torch.no_grad():\n if args.model in ['rnnsearch', 'seq2seq']:\n y_pred = model(batch.src, batch.trg)\n y_pred = y_pred\n results = model.inference(batch.src, beamsize= args.beamsize)\n\n elif args.model == 'transformers':\n pass\n\n else:\n \"\"\"not impl\"\"\"\n pass\n\n\n return {\n 'y_pred': y_pred,\n 'trg_idx': batch.trg,\n 'infer': results,\n }\n\n\n engine_g = Engine(infer)\n\n metrics_g = {\n 'loss': Loss(loss_fn, output_transform=lambda x: (x['y_pred'], x['trg_idx']) ),\n 'ngrams_greed': Ngram(args, make_fake_vocab(), output_transform=lambda x:(x['infer']['greedy']['sentidxs'], x['trg_idx']) ),\n 'ngrams_beam': Ngram(args, make_fake_vocab(), output_transform=lambda x:(x['infer']['beam']['sentidxs'], x['trg_idx']) ),\n 'length_greed': Ldiff_Square(args, output_transform=lambda x:(x['infer']['greedy']['sentidxs'], x['trg_idx']) ),\n 'length_beam': Ldiff_Square(args, output_transform=lambda x:(x['infer']['beam']['sentidxs'], x['trg_idx']) ),\n }\n for name, metric in metrics_g.items():\n metric.attach(engine_g, name)\n\n\n return engine_g\n\n\ndef runeval(evaluator, iterator):\n evaluator.run(iterator)\n return evaluator.state\n\ndef only_eval(args):\n if args.load_path is not 'none':\n saved_dict = get_model_ckpt(args)\n its= saved_dict['its']\n model_ = saved_dict['model']\n\n loss_fn = get_loss(ignore_index=PAD_TOKEN, smooth=args.label_smoothing)\n scheduler = None\n evaluator = get_eval(args, model_, loss_fn)\n\n @evaluator.on(Events.STARTED)\n def on_eval_start(engine):\n pprint(args)\n pprint(args.model)\n pprint(args.load_path)\n @evaluator.on(Events.EPOCH_COMPLETED)\n def after_an_epoch(engine):\n def results(spl, state):\n for key, val in state.metrics.items():\n if isinstance(val, dict):\n for key2, v in val.items():\n print(f\"{spl}/{key}/{key2}: {v}\" )\n else:\n print(f\"{name}/{key}\" , val)\n #printout all metrics\n results('test', engine.state)\n print(f\"trg_eval: {engine.state.output['trg_idx'][0]}\")\n print(f\"greedy: {engine.state.output['infer']['greedy']['sentidxs'][0]}\")\n print(f\"beam: {engine.state.output['infer']['beam']['sentidxs'][0]}\")\n\n evaluator.run(its['val'] if args.debug else its['test'],\n max_epochs=1)\n\n\n else:\n print(f\"need to specify args.load_path (e.g. .pth file or pth containing dir )\")\n print(f\"python main.py --load_path trained_models/rnnsearch/rnnsearch_adam_ep100_labelsmooth0.2_d1111_t2037/\")\n print(f\"if specify dir, it will load the lowest loss model.\")\n","repo_name":"sonsus/papago_test","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27659958932","text":"from typing import Dict, Optional, Union\n\nfrom nibe.exceptions import NoMappingException\n\n\ndef is_coil_boolean(coil):\n if coil.factor != 1:\n return False\n\n if coil.min == 0 and coil.max == 1:\n return True\n\n if coil.mappings and all(k in [\"0\", \"1\"] for k in coil.mappings):\n return True\n\n return False\n\n\nclass Coil:\n mappings: Optional[Dict[str, str]]\n reverse_mappings: Optional[Dict[str, str]]\n _value: Union[int, float, str, None]\n\n def __init__(\n self,\n address: int,\n name: str,\n title: str,\n size: str,\n factor: int = 1,\n info: str = None,\n unit: str = None,\n mappings: dict = None,\n write: bool = False,\n **kwargs,\n ):\n assert isinstance(address, int), \"Address must be defined\"\n assert name, \"Name must be defined\"\n assert title, \"Title must be defined\"\n assert factor, \"Factor must be defined\"\n assert not (\n mappings is not None and factor != 1\n ), \"When mapping is used factor needs to be 1\"\n\n self.size = size\n\n self.address = address\n self.name = name\n self.title = title\n self.factor = factor\n\n self.set_mappings(mappings)\n\n self.info = info\n self.unit = unit\n self.is_writable = write\n\n self.other = kwargs\n\n self.raw_min = self.other.get(\"min\")\n self.raw_max = self.other.get(\"max\")\n\n self.min = self.raw_min / factor if self.raw_min is not None else None\n self.max = self.raw_max / factor if self.raw_max is not None else None\n\n self.is_boolean = is_coil_boolean(self)\n if self.is_boolean and not mappings:\n self.set_mappings({\"0\": \"OFF\", \"1\": \"ON\"})\n\n self._value = None\n\n def set_mappings(self, mappings):\n if mappings:\n self.mappings = {k: v.upper() for k, v in mappings.items()}\n self.reverse_mappings = {v.upper(): k for k, v in mappings.items()}\n else:\n self.mappings = None\n self.reverse_mappings = None\n\n @property\n def value(self) -> Union[int, float, str, None]:\n return self._value\n\n @value.setter\n def value(self, value: Union[int, float, str, None]):\n if value is None:\n self._value = None\n return\n\n if self.reverse_mappings:\n assert isinstance(\n value, str\n ), f\"Provided value '{value}' is invalid type (str is supported) for {self.name}\"\n\n value = value.upper()\n assert (\n value in self.reverse_mappings\n ), f\"Provided value '{value}' is not in {self.reverse_mappings.keys()} for {self.name}\"\n\n self._value = value\n return\n\n assert isinstance(\n value, (int, float)\n ), f\"Provided value '{value}' is invalid type (int and float are supported) for {self.name}\"\n\n self.check_value_bounds(value)\n\n self._value = value\n\n @property\n def has_mappings(self):\n return self.mappings is not None\n\n def get_mapping_for(self, value: int):\n if not self.mappings:\n raise NoMappingException(f\"No mappings defined for {self.name}\")\n\n try:\n return self.mappings[str(value)]\n except KeyError:\n raise NoMappingException(\n f\"Mapping not found for {self.name} coil for value: {value}\"\n )\n\n def get_reverse_mapping_for(self, value: Union[int, float, str, None]):\n if not self.reverse_mappings:\n raise NoMappingException(f\"No reverse mappings defined for {self.name}\")\n\n try:\n return self.reverse_mappings[str(value)]\n except KeyError:\n raise NoMappingException(\n f\"Reverse mapping not found for {self.name} coil for value: {value}\"\n )\n\n def check_value_bounds(self, value):\n if self.min is not None:\n assert (\n value >= self.min\n ), f\"{self.name} coil value ({value}) is smaller than min allowed ({self.min})\"\n\n if self.max is not None:\n assert (\n value <= self.max\n ), f\"{self.name} coil value ({value}) is larger than max allowed ({self.max})\"\n\n def check_raw_value_bounds(self, value):\n if self.raw_min is not None:\n assert (\n value >= self.raw_min\n ), f\"value ({value}) is smaller than min allowed ({self.raw_min})\"\n\n if self.raw_max is not None:\n assert (\n value <= self.raw_max\n ), f\"value ({value}) is larger than max allowed ({self.raw_max})\"\n\n def __repr__(self):\n return f\"Coil {self.address}, name: {self.name}, title: {self.title}, value: {self.value}\"\n","repo_name":"elupus/yozik04_nibe","sub_path":"nibe/coil.py","file_name":"coil.py","file_ext":"py","file_size_in_byte":4815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"4915586474","text":"##########################################################################################\n# Utility functions for images\n##########################################################################################\nfrom PIL import Image\nimport numpy as np\nimport os\nimport glob\nfrom matplotlib.image import imread\n\ndef load_image(path, size=None):\n \"\"\"\n Load the image from the given file-path and resize it to the given size if not None.\n Eg: size = (width, height)\n \"\"\"\n img = Image.open(path)\n\n if (size != None) and (size != ''):\n img = img.resize(size=size, resample=Image.LANCZOS)\n\n img = np.array(img)\n\n # Scale image-pixels so they fall between 0.0 and 1.0\n img = img / 255.0\n\n # Convert 2-dim gray-scale array to 3-dim RGB array.\n if (len(img.shape) == 2):\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n\n return np.array(img)\n\n\ndef load_images(image_paths):\n # Load the images from disk.\n images = [imread(path) for path in image_paths]\n\n # Convert to a numpy array and return it.\n return np.asarray(images)\n\n\ndef get_images_path_list_from_dir(dir_path, img_format='jpg'):\n img_regex = os.path.join(dir_path, '*.' + img_format)\n img_paths = glob.glob(img_regex)\n\n # imgs = [load_image(img_path) for img_path in img_paths]\n # return np.array(imgs), img_paths\n\n return img_paths\n\n\ndef save_image(image_np, image_path_name):\n img = Image.fromarray(image_np)\n img.save(image_path_name)\n\n","repo_name":"abhishekrana/Tensorflow_TFRecords_Estimator_Pipeline","sub_path":"utils/utils_image.py","file_name":"utils_image.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"3823414329","text":"import cv2\r\n\r\ndef capture_photo(folder_path, file_name):\r\n # Open the camera\r\n camera = cv2.VideoCapture(0)\r\n\r\n # Capture a frame\r\n _, frame = camera.read()\r\n\r\n # Release the camera\r\n camera.release()\r\n\r\n # Save the image\r\n file_path = f\"{folder_path}/{file_name}.jpg\"\r\n cv2.imwrite(file_path, frame)\r\n\r\n print(f\"Photo saved at: {file_path}\")\r\n\r\n# Prompt the user for folder path and file name\r\nfolder_path = input(\"Enter the folder path to save the photo: \")\r\nfile_name = input(\"Enter the file name for the photo: \")\r\n\r\n# Capture and save the photo\r\ncapture_photo(folder_path, file_name)\r\n","repo_name":"suhanpahari/Photo_taker","sub_path":"photo tker.py","file_name":"photo tker.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"9307971812","text":"from __future__ import absolute_import, division, print_function\n\n# Import Tensorflow and Tensorflow datasets\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n# Helper libraries\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nrml\nimport plotImages\nimport supp\nprint(tf.__version__)\n\n# Build dataset (use only once)\ndataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)\ntrain_dataset, test_dataset = dataset['train'], dataset['test']\n\nclass_names = [\n'T-shirt/top',\n'Trouser ',\n'Pullover ',\n'Dress ',\n'Coat ',\n'Sandal ',\n'Shirt ',\n'Sneaker ',\n'Bag ',\n'Ankle boot '\n]\n\ntrain_numbers = metadata.splits['train'].num_examples\ntest_numbers = metadata.splits['test'].num_examples\nprint('Training number examples {}'.format(train_numbers))\nprint('Test number examples {}'.format(test_numbers))\n\ntrain_dataset = train_dataset.map(nrml.normalize)\ntest_dataset = test_dataset.map(nrml.normalize)\n\n# Iteration parameters\nBATCH_SIZE = 32\nEPOCH = 5\ntrain_dataset = train_dataset.repeat().shuffle(train_numbers).batch(BATCH_SIZE)\ntest_dataset = test_dataset.batch(BATCH_SIZE)\n\n# Create a model\nINPUT_NEURONS = 512\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28, 1)),\n tf.keras.layers.Dense(INPUT_NEURONS, activation=tf.nn.relu),\n tf.keras.layers.Dense(INPUT_NEURONS, activation=tf.nn.relu),\n tf.keras.layers.Dense(INPUT_NEURONS, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nLAYERS = len(model.layers)\n# Compile a model\nmodel.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n# V Train the model V\nmodel.fit(\n train_dataset,\n epochs=EPOCH,\n steps_per_epoch=math.ceil(train_numbers/BATCH_SIZE)\n)\n\n# Evaluation\ntest_loss, test_accuracy = model.evaluate(\n test_dataset,\n steps=math.ceil(test_numbers/BATCH_SIZE)\n)\n\nprint('Accuracy of the model:', test_accuracy)\n\n# Make predictions\nbatchCount = 1\ni = 0\nname = '_LAYERS_' + str(LAYERS) + '_INPUT_NEURON_=_' + str(INPUT_NEURONS) + '_BATCH_SIZE_=_' + str(BATCH_SIZE) + '_Epoch_=_' + str(EPOCH)\nfor test_images, test_labels in test_dataset.take(batchCount):\n test_images = test_images.numpy()\n test_labels = test_labels.numpy()\n predictions = model.predict(test_images)\n i += 1\n plotImages.plotbatch(name,test_loss,test_accuracy,BATCH_SIZE,test_labels,class_names,predictions,test_images, save = True)\n\n\n\n\n\n\n","repo_name":"lukasijus/ItMLu","sub_path":"Lesson_3/fashionmnsitNN.py","file_name":"fashionmnsitNN.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23593362191","text":"import sys\r\nimport re\r\n\r\ndef insertInto(letters,res_map,r,c,m_r,m_c):\r\n isInside = False\r\n minInside = 99999\r\n myL = []\r\n## print \"Before\",letters\r\n for l in letters:\r\n if res_map[m_r][m_c] in l:\r\n isInside = True\r\n myL = l\r\n minInside = min(l)\r\n## print \"Inside! \"+str(minInside)\r\n break\r\n \r\n for l in letters:\r\n if res_map[r][c] in l:\r\n myMin = min(l)\r\n if isInside:\r\n if (myMin > minInside):\r\n myL.extend(l)\r\n del letters[letters.index(l)]\r\n else:\r\n l.extend(myL)\r\n del letters[letters.index(myL)]\r\n else:\r\n l.append(res_map[m_r][m_c])\r\n## print\"After: \",letters\r\n return\r\n \r\n if isInside:\r\n myL.append(res_map[r][c])\r\n else:\r\n letters.append([res_map[r][c],res_map[m_r][m_c]])\r\n## print letters\r\n\r\nfilename = \"C:\\\\Amir\\\\programming\\\\CodeJam\\\\first\\\\2\\\\B-large.in\"\r\nf = open(filename)\r\nlines = f.readlines()\r\nf.close()\r\nlines.reverse()\r\nfirstLine = lines.pop()\r\n#print firstLine\r\nmaps = int(firstLine)\r\nout = open(\"C:\\\\Amir\\\\programming\\\\CodeJam\\\\first\\\\2\\\\out.txt\",'w')\r\nfor z in range(maps):\r\n res_map = []\r\n [rows,cols] = lines.pop().split(\" \")\r\n rows = int(rows)\r\n cols = int(cols)\r\n cur_map = [[20000]*(cols+2)]\r\n for j in range(rows):\r\n cur_map.append([20000]+map(int,lines.pop().split(\" \"))+[20000])\r\n\r\n cur_map.append([20000]*(cols+2))\r\n## print cur_map\r\n cur = 1\r\n for p in range(rows):\r\n res_map.append(range(cur,cur+cols))\r\n cur += cols\r\n\r\n letters = [[1]]\r\n\r\n # finished building map\r\n \r\n # calculating result map\r\n for r in range(1,rows+1):\r\n for c in range(1,cols+1):\r\n## print r,\"-\",c\r\n min_near = min(cur_map[r-1][c],cur_map[r+1][c],cur_map[r][c+1],cur_map[r][c-1])\r\n## print min_near\r\n if (cur_map[r][c] > min_near):\r\n if (min_near == cur_map[r-1][c]):\r\n insertInto(letters,res_map,r-1,c-1,r-2,c-1)\r\n elif (min_near == cur_map[r][c-1]):\r\n insertInto(letters,res_map,r-1,c-1,r-1,c-2)\r\n elif (min_near == cur_map[r][c+1]):\r\n insertInto(letters,res_map,r-1,c-1,r-1,c)\r\n elif (min_near == cur_map[r+1][c]):\r\n insertInto(letters,res_map,r-1,c-1,r,c-1)\r\n else:\r\n # can do it with continue\r\n## print \"not found\"\r\n found = False\r\n for l in letters:\r\n if res_map[r-1][c-1] in l:\r\n found = True\r\n break\r\n if not found:\r\n letters.append([res_map[r-1][c-1]])\r\n\r\n## print \"Result:\"\r\n## print letters\r\n\r\n out.write(\"Case #\"+str(z+1)+\":\\n\")\r\n for i in range(1,cur):\r\n for l in letters:\r\n if i in l:\r\n ind = letters.index(l)\r\n## print ind\r\n out.write(chr(97+ind))\r\n if (i%cols == 0):\r\n out.write(\"\\n\")\r\n else:\r\n out.write(\" \")\r\nout.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_35/272.py","file_name":"272.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72565228034","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nfrom typing import List\nfrom collections import deque\n\nclass Solution:\n def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:\n \n # -------------------------------------\n \n def dfs(node):\n \n if node:\n \n yield from dfs(node.left)\n yield node.val\n yield from dfs(node.right)\n \n # -------------------------------------\n \n l1 = deque( [*dfs(root1)] )\n l2 = deque( [*dfs(root2)] )\n \n result = []\n \n while l1 and l2:\n \n if l1[0] < l2[0]:\n \n result.append( l1.popleft() )\n \n else:\n \n result.append( l2.popleft() )\n \n while l1:\n result.append( l1.popleft() )\n \n while l2:\n result.append( l2.popleft() )\n \n return result\n\n\n\n# m : the number of nodes in the first binary search tree\n# n : the number of nodes in the second binary search tree \n\n## Time Comeplexity: O( m + n )\n#\n# The overhead in time is the cost of DFS and merging process, which is of O( m + n )\n\n## Space Complexity: O( m + n )\n#\n# The overhead in space is the storage for result output, which is of O( m + n )\n\n\nimport unittest\n\nclass Testing( unittest.TestCase ):\n\n def test_case_1( self ):\n\n root1 = TreeNode( 2 )\n root1.left = TreeNode( 1 )\n root1.right = TreeNode( 4 )\n\n root2 = TreeNode( 1 )\n root2.left = TreeNode( 0 )\n root2.right = TreeNode( 3 )\n\n result = Solution().getAllElements(root1=root1, root2=root2)\n self.assertEqual(result, [0, 1, 1, 2, 3, 4])\n\n\n\n def test_case_2( self ):\n\n root1 = TreeNode( 0 )\n root1.left = TreeNode( -10 )\n root1.right = TreeNode( 10 )\n\n root2 = TreeNode( 5 )\n root2.left = TreeNode( 1 )\n root2.right = TreeNode( 7 )\n root2.left.left = TreeNode( 0 )\n root2.left.right = TreeNode( 2 )\n\n result = Solution().getAllElements(root1=root1, root2=root2)\n self.assertEqual(result, [-10,0,0,1,2,5,7,10])\n\n\n def test_case_3( self ):\n\n root1 = None\n\n root2 = TreeNode( 5 )\n root2.left = TreeNode( 1 )\n root2.right = TreeNode( 7 )\n root2.left.left = TreeNode( 0 )\n root2.left.right = TreeNode( 2 )\n\n result = Solution().getAllElements(root1=root1, root2=root2)\n self.assertEqual(result, [0,1,2,5,7])\n\n\n def test_case_4( self ):\n\n root1 = TreeNode( 0 )\n root1.left = TreeNode( -10 )\n root1.right = TreeNode( 10 )\n\n root2 = None\n\n result = Solution().getAllElements(root1=root1, root2=root2)\n self.assertEqual(result, [-10,0,10])\n\n\nif __name__ == '__main__':\n\n unittest.main() ","repo_name":"brianchiang-tw/leetcode","sub_path":"2020_September_Leetcode_30_days_challenge/Week_1_All Elements in Two Binary Search Trees/by_inorder_and_merge.py","file_name":"by_inorder_and_merge.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"}
+{"seq_id":"17074375849","text":"from random import randint, random\n\n\nclass TaxIdGenerator:\n\n @classmethod\n def taxId(cls, chance=0.5):\n if random() > chance:\n return cls.cpf()\n return cls.cnpj()\n\n @classmethod\n def cpf(cls):\n cpf = [randint(0, 9) for _ in range(9)]\n for _ in range(2):\n value = sum([(len(cpf) + 1 - i) * v for i, v in enumerate(cpf)]) % 11\n cpf.append(11 - value if value > 1 else 0)\n return '{}{}{}.{}{}{}.{}{}{}-{}{}'.format(*cpf)\n\n @classmethod\n def cnpj(cls):\n cnpj = [1, 0, 0, 0] + [randint(0, 9) for _ in range(8)]\n for _ in range(2):\n cnpj = [cls._calculateSpecialDigit(cnpj)] + cnpj\n return '{}{}.{}{}{}.{}{}{}/{}{}{}{}-{}{}'.format(*cnpj[::-1])\n\n @classmethod\n def _calculateSpecialDigit(cls, digits):\n digit = 0\n for index, value in enumerate(digits):\n digit += value * (index % 8 + 2)\n digit = 11 - digit % 11\n return digit if digit < 10 else 0\n","repo_name":"starkbank/sdk-python","sub_path":"tests/utils/taxIdGenerator.py","file_name":"taxIdGenerator.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"}
+{"seq_id":"20013176575","text":"import base64\nimport re\n\nimport requests\nfrom pyquery import PyQuery\n\nfrom bin.run import Comic\n\n\ndef get_info(detail_url):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n }\n try:\n res = requests.get(detail_url, headers=headers).text\n print(res)\n nonce = re.findall('window\\[\"n.*?e\"\\]\\s=\\s(.*?);', res)[0]\n data = re.findall('var DATA.*?\\'(.*?)\\'', res)[0]\n chapter = re.findall('title-comicHeading\\\">(.*?)<', res)[0].replace(' ', '')\n chapters = PyQuery(res)('#catalogueList li .tool_chapters_list_title').text().split()\n chapter = str(chapters.index(chapter) + 1).rjust(3, '0') + chapter\n chapters = [str(chapters.index(chapter) + 1).rjust(3, '0') + chapter for chapter in chapters]\n name = re.findall('《(.*?)》', res)[0]\n return (nonce, data, chapter, chapters, name)\n except Exception as er:\n print(detail_url, er)\n\n\ndef __parse_img(nonce, data):\n T = [i for i in data]\n N = re.findall('\\d+[a-zA-Z]+', nonce)\n length = len(N)\n while length:\n locate = int(re.findall('\\d+', N[length - 1])[0]) & 255\n string = re.sub('\\d+', '', N[length - 1])\n del T[locate:locate + len(string)]\n length -= 1\n T = ''.join(T)\n return base64.b64decode(T.encode())\n\n\ndef ac_qq(detail_url):\n nonce, data, chapter, chapters, name = get_info(detail_url)\n d = __parse_img(nonce, data)\n t = re.findall('\"url\":\"(.*?)\"', str(d))\n l = [str(i).replace(r'\\\\', '') for i in t]\n print(l)\n Comic.download_images({'images_url': l, 'chapter': chapter, 'comic_title': name}, chapters)\n\n\nif __name__ == '__main__':\n urls = [\n 'https://ac.qq.com/ComicView/index/id/635142/cid/3',\n ]\n for url in urls:\n ac_qq(url)\n","repo_name":"Amd794/kanleying","sub_path":"expand/aaaac_qq_com.py","file_name":"aaaac_qq_com.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"61"}
+{"seq_id":"23413051491","text":"def trick(test):\n p1 = int(test[0])\n p2 = int(test[5])\n row1 = set(int(i) for i in str.split(test[p1]))\n row2 = set(int(i) for i in str.split(test[5+p2]))\n result = list(row1&row2)\n if len(result) == 1: return str(result[0])\n elif len(result) > 1: return \"Bad magician!\"\n else: return \"Volunteer cheated!\"\n\ndef main():\n inFile = \"A-small-attempt0.in\"\n outFile = \"output.txt\"\n\n inputL = []\n with open(inFile) as inputFile:\n for i, line in enumerate(inputFile):\n inputL += [line]\n \n f = open(outFile,'w')\n numOfTest = int(inputL[0])\n for i in range(0, numOfTest):\n f.write(\"Case #\" + str(i+1) + \": \" + trick(inputL[1+i*10:1+i*10+10]) + \"\\n\")\n f.close()\n\nif __name__ == '__main__':\n main()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2199.py","file_name":"2199.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70451772036","text":"\"\"\"\n545. Boundary of Binary Tree\n\"\"\"\n\nfrom typing import List\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def boundaryOfBinaryTree(self, root: TreeNode) -> List[int]:\n if root == None:\n return []\n ret = [root.val]\n\n self.leftBound(root.left, ret)\n self.visitLeaves(root.left, ret)\n self.visitLeaves(root.right, ret)\n self.rightBound(root.right, ret)\n\n return ret\n\n def leftBound(self, root: TreeNode, ret: List[int]):\n if root == None or ( root.left == None and root.right == None ):\n return\n ret.append(root.val)\n if root.left != None:\n self.leftBound(root.left, ret)\n elif root.right != None:\n self.leftBound(root.right, ret)\n\n def rightBound(self, root: TreeNode, ret: List[int]):\n if root == None or (root.left == None and root.right == None):\n return\n if root.right != None:\n self.rightBound(root.right, ret)\n elif root.left != None:\n self.rightBound(root.left, ret)\n ret.append(root.val)\n\n def visitLeaves(self, root: TreeNode, ret: List[int]):\n if root == None:\n return\n if root.left == None and root.right == None:\n ret.append(root.val)\n\n self.visitLeaves(root.left, ret)\n self.visitLeaves(root.right, ret)\n","repo_name":"dictator-x/practise_as","sub_path":"algorithm/leetCode/0545_boundary_of_binary_tree.py","file_name":"0545_boundary_of_binary_tree.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17724419596","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom IPython.core.interactiveshell import InteractiveShell\n\nInteractiveShell.ast_node_interactivity = \"all\"\n\n\n# %%\nimport pandas as pd\nimport numpy as np\n\npd.set_option(\"display.max_rows\", 200, \"display.max_columns\", 200)\n\n# %% [markdown]\n# Reading in the population files - (i) 2000 - 2009 (ii) 2010 - 2020\n\n# %%\nraw_pre_2010 = pd.read_csv(\n \"C:\\\\Users\\\\deeks\\\\Documents\\\\MIDS\\\\IDS 720_Practising Data Science\\\\Mid-Sem project\\\\Gitdata\\\\pds2021-opioids-team-2-ids720\\\\00_source_data\\\\population_data\\\\2000 - 2010.csv\",\n encoding=\"ISO8859-1\",\n)\nraw_post_2010 = pd.read_csv(\n \"C:\\\\Users\\\\deeks\\\\Documents\\\\MIDS\\\\IDS 720_Practising Data Science\\\\Mid-Sem project\\\\Gitdata\\\\pds2021-opioids-team-2-ids720\\\\00_source_data\\\\population_data\\\\2010 - 2020.csv\",\n encoding=\"ISO8859-1\",\n)\n\n\n# %%\nraw_pre_2010.head()\n\n\n# %%\nraw_post_2010.head()\n\n\n# %%\n# raw_post_2010.iloc[:,0:20].columns\n\n\n# %%\nkeepcols_pre = [\n \"STATE\",\n \"COUNTY\",\n \"STNAME\",\n \"CTYNAME\",\n \"CENSUS2000POP\",\n \"POPESTIMATE2001\",\n \"POPESTIMATE2002\",\n \"POPESTIMATE2003\",\n \"POPESTIMATE2004\",\n \"POPESTIMATE2005\",\n \"POPESTIMATE2006\",\n \"POPESTIMATE2007\",\n \"POPESTIMATE2008\",\n \"POPESTIMATE2009\",\n]\n\nkeepcols_post = [\n \"STATE\",\n \"COUNTY\",\n \"STNAME\",\n \"CTYNAME\",\n \"CENSUS2010POP\",\n \"POPESTIMATE2011\",\n \"POPESTIMATE2012\",\n \"POPESTIMATE2013\",\n \"POPESTIMATE2014\",\n \"POPESTIMATE2015\",\n \"POPESTIMATE2016\",\n \"POPESTIMATE2017\",\n \"POPESTIMATE2018\",\n \"POPESTIMATE2019\",\n \"POPESTIMATE2020\",\n]\n\nraw_pre_2010 = raw_pre_2010.loc[:, raw_pre_2010.columns.isin(keepcols_pre)]\nraw_post_2010 = raw_post_2010.loc[:, raw_post_2010.columns.isin(keepcols_post)]\n\n\n# %%\nraw_pre_2010.shape\nraw_post_2010.shape\n\n\n# %%\nraw_popn_merged = raw_pre_2010.merge(\n raw_post_2010,\n on=[\"STATE\", \"COUNTY\", \"STNAME\", \"CTYNAME\"],\n how=\"left\",\n validate=\"1:1\",\n)\nraw_popn_merged.head()\n\n\n# %%\nraw_popn_merged = raw_popn_merged.loc[\n raw_popn_merged[\"STNAME\"] != raw_popn_merged[\"CTYNAME\"]\n]\nraw_popn_merged.shape\n\n\n# %%\nraw_popn_merged = raw_popn_merged.loc[raw_popn_merged[\"STNAME\"] != \"Alaska\"]\nraw_popn_merged.shape\n\n\n# %%\n# keep_states = [\"Florida\",\"Texas\",\"Washington\",\"Maryland\",\"Delaware\",\"New York\"]\n\n\n# %%\nraw_popn_merged.info()\n\n\n# %%\n(raw_popn_merged.isna().sum() / len(raw_popn_merged)).apply(\n lambda x: \"{:.2%}\".format(x)\n)\n\n\n# %%\nraw_popn_merged.loc[raw_popn_merged[\"CENSUS2010POP\"].isnull()]\n\n\n# %%\nraw_popn_merged.rename(\n {\n \"CENSUS2000POP\": \"2000\",\n \"POPESTIMATE2001\": \"2001\",\n \"POPESTIMATE2002\": \"2002\",\n \"POPESTIMATE2003\": \"2003\",\n \"POPESTIMATE2004\": \"2004\",\n \"POPESTIMATE2005\": \"2005\",\n \"POPESTIMATE2006\": \"2006\",\n \"POPESTIMATE2007\": \"2007\",\n \"POPESTIMATE2008\": \"2008\",\n \"POPESTIMATE2009\": \"2009\",\n \"CENSUS2010POP\": \"2010\",\n \"POPESTIMATE2011\": \"2011\",\n \"POPESTIMATE2012\": \"2012\",\n \"POPESTIMATE2013\": \"2013\",\n \"POPESTIMATE2014\": \"2014\",\n \"POPESTIMATE2015\": \"2015\",\n \"POPESTIMATE2016\": \"2016\",\n \"POPESTIMATE2017\": \"2017\",\n \"POPESTIMATE2018\": \"2018\",\n \"POPESTIMATE2019\": \"2019\",\n \"POPESTIMATE2020\": \"2020\",\n },\n axis=1,\n inplace=True,\n)\n\n\n# %%\nraw_popn_merged.head()\n\n\n# %%\nraw_popn_merged_melted = pd.melt(\n raw_popn_merged,\n id_vars=[\"STATE\", \"COUNTY\", \"STNAME\", \"CTYNAME\"],\n value_vars=[\n \"2000\",\n \"2001\",\n \"2002\",\n \"2003\",\n \"2004\",\n \"2005\",\n \"2006\",\n \"2007\",\n \"2008\",\n \"2009\",\n \"2010\",\n \"2011\",\n \"2012\",\n \"2013\",\n \"2014\",\n \"2015\",\n \"2016\",\n \"2017\",\n \"2018\",\n \"2019\",\n \"2020\",\n ],\n var_name=\"Year\",\n value_name=\"Population\",\n)\nraw_popn_merged_melted.head()\n\n\n# %%\nraw_popn_merged_melted.isna().sum()\n\n\n# %%\nraw_popn_merged_melted[\"Population\"] = np.where(\n raw_popn_merged_melted[\"Population\"] == \"X\",\n np.nan,\n raw_popn_merged_melted[\"Population\"],\n)\n\n\n# %%\nraw_popn_merged_melted.info()\n\n\n# %%\n# Converting dtype of population from object to float\nraw_popn_merged_melted[\"Population\"] = pd.to_numeric(\n raw_popn_merged_melted[\"Population\"], errors=\"coerce\"\n)\nraw_popn_merged_melted.info()\n\n# Fetching state codes\n# %%\nimport requests\n\nresponse = requests.get(\n \"https://gist.githubusercontent.com/mshafrir/2646763/raw/8b0dbb93521f5d6889502305335104218454c2bf/states_hash.json\"\n)\nstate_abbrevs = {v: k for k, v in response.json().items()}\nstate_abbrevs\n\n\n# %%\nraw_popn_merged_melted[\"State_Code\"] = raw_popn_merged_melted[\"STNAME\"].map(\n state_abbrevs\n)\nraw_popn_merged_melted.head()\n\n\n# %%\n# Checking average population counts for states\n# raw_popn_merged_melted.groupby([\"STNAME\"]).mean(\"Population\").sort_values(by = \"Population\",ascending=False)\n\n# Writing cleaned population file\n# %%\nraw_popn_merged_melted.to_csv(\n \"C:\\\\Users\\\\deeks\\\\Documents\\\\MIDS\\\\IDS 720_Practising Data Science\\\\Mid-Sem project\\\\Gitdata\\\\pds2021-opioids-team-2-ids720\\\\20_intermediate_files\\\\population_2000_2020.csv\"\n)\n","repo_name":"anas14680/Opioid-Regulations-Evaluations","sub_path":"10_code/Cleaning_Population_Data.py","file_name":"Cleaning_Population_Data.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4128481720","text":"import cv2 as cv\r\n\r\nimg = cv.imread(\"maliao.jpg\", cv.IMREAD_UNCHANGED)\r\n\r\n# 获取 ROI 区域\r\nface = img[10:175, 100:260]\r\n# 图像赋值\r\nimg[0:165, 0:160] = face\r\n\r\n# 原始图像显示\r\ncv.imshow(\"demo\", img)\r\n\r\n#等待显示\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()","repo_name":"meteor1993/python-learning","sub_path":"python-opencv/blog3-attribute/demo5-roi.py","file_name":"demo5-roi.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"61"}
+{"seq_id":"28499042561","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = ['WSSNet']\n\n\nclass conv_block(nn.Module):\n def __init__(self, ch_in, ch_out):\n super(conv_block, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(ch_out)\n )\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n\nclass final_conv_block(nn.Module):\n def __init__(self, ch_in, ch_out):\n super(final_conv_block, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n\nclass up_conv(nn.Module):\n def __init__(self, ch_in, ch_out):\n super(up_conv, self).__init__()\n self.up = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear'),\n nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n x = self.up(x)\n return x\n\n\nclass WSSNet(nn.Module):\n def __init__(self, img_ch=15, output_ch=3):\n \"\"\"\n :param img_ch: 48*48*15\n :param output_ch: 48*48*3\n \"\"\"\n super(WSSNet, self).__init__()\n\n self.Conv1 = conv_block(ch_in=img_ch, ch_out=64)\n self.Conv2 = conv_block(ch_in=64, ch_out=128)\n self.Conv3 = conv_block(ch_in=128, ch_out=256)\n self.bottom4 = conv_block(ch_in=256, ch_out=512)\n self.Up5 = up_conv(ch_in=512, ch_out=256)\n self.Up_conv5 = conv_block(ch_in=512, ch_out=256)\n self.Up6 = up_conv(ch_in=256, ch_out=128)\n self.Up_conv6 = conv_block(ch_in=256, ch_out=128)\n self.Up7 = up_conv(ch_in=128, ch_out=64)\n self.Up_conv7 = final_conv_block(ch_in=128, ch_out=64)\n self.Output_Conv = nn.Conv2d(64, output_ch, kernel_size=3, stride=1, padding=1)\n\n def forward(self, x):\n # encoding path\n x1 = self.Conv1(x)\n x2 = F.max_pool2d(x1, 2)\n # x2 = self.Maxpool(x1)\n x2 = self.Conv2(x2)\n x3 = F.max_pool2d(x2, 2)\n # x3 = self.Maxpool(x2)\n x3 = self.Conv3(x3)\n x4 = F.max_pool2d(x3, 2)\n # x4 = self.Maxpool(x3)\n # neck path\n x4 = self.bottom4(x4)\n # decoding + concat path\n d5 = self.Up5(x4)\n d5 = torch.cat((x3, d5), dim=1)\n d5 = self.Up_conv5(d5)\n d6 = self.Up6(d5)\n d6 = torch.cat((x2, d6), dim=1)\n d6 = self.Up_conv6(d6)\n d7 = self.Up7(d6)\n d7 = torch.cat((x1, d7), dim=1)\n d7 = self.Up_conv7(d7)\n output = self.Output_Conv(d7)\n return output\n","repo_name":"gaze-wu/WSSNet_pytorch_implement","sub_path":"master/models/WSSNet.py","file_name":"WSSNet.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"9058452783","text":"__lic__ = '''\r\n/**\r\n * AS - the open source Automotive Software on https://github.com/parai\r\n *\r\n * Copyright (C) 2015 AS \r\n *\r\n * This source code is free software; you can redistribute it and/or modify it\r\n * under the terms of the GNU General Public License version 2 as published by the\r\n * Free Software Foundation; See .\r\n *\r\n * This program is distributed in the hope that it will be useful, but\r\n * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\r\n * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\r\n * for more details.\r\n */\r\n '''\r\n \r\nfrom .dcm import *\r\nfrom .s19 import *\r\nfrom .xcp import *\r\n\r\n\r\nfrom PyQt5 import QtCore, QtGui\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtWidgets import *\r\n\r\nimport os\r\nimport glob\r\nimport time\r\n\r\nfrom asserial import *\r\n\r\n__all__ = ['UIFlashloader']\r\n\r\nclass AsFlashloader(QThread):\r\n infor = QtCore.pyqtSignal(str)\r\n progress = QtCore.pyqtSignal(int)\r\n def __init__(self,parent=None):\r\n super(QThread, self).__init__(parent)\r\n self.steps = [ (self.enter_extend_session,True), (self.security_extds_access,True),\r\n (self.enter_program_session,True),(self.security_prgs_access,True),\r\n (self.download_flash_driver,True),(self.check_flash_driver,False),\r\n (self.routine_erase_flash,True), (self.download_application,True),\r\n (self.check_application,False), (self.launch_application,True) ]\r\n self.stepsXcp = [ (self.dummy,False), (self.dummy,False),\r\n (self.enter_program_session_xcp,True),(self.security_prgs_access_xcp,True),\r\n (self.download_flash_driver_xcp,True),(self.check_flash_driver_xcp,False),\r\n (self.routine_erase_flash_xcp,True), (self.download_application_xcp,True),\r\n (self.check_application_xcp,False), (self.launch_application_xcp,True) ]\r\n self.stepsCmd = [ (self.dummy,False), (self.dummy,False),\r\n (self.dummy,False),(self.dummy,False),\r\n (self.download_flash_driver_cmd,True),(self.dummy,False),\r\n (self.routine_erase_flash_cmd,True), (self.download_application_cmd,True),\r\n (self.dummy,False), (self.dummy,False) ]\r\n self.enable = []\r\n for s in self.steps:\r\n self.enable.append(s[1])\r\n self.dcm = dcm(DFTBUS,0x732,0x731)\r\n self.xcp = xcp(DFTBUS, 0x554, 0x555)\r\n self.app = None\r\n self.flsdrv = None\r\n self.protocol = 'UDS'\r\n self.ability = 4096\r\n\r\n def add_progress(self,sz):\r\n self.txSz += sz\r\n self.step_progress((self.txSz*100)/self.sumSz)\r\n\r\n def dummy(self):\r\n return False,None\r\n\r\n def runcmd(self,cmd):\r\n ercd,msg = self.serial.runcmd(cmd)\r\n if(ercd == False):\r\n self.infor.emit('%s\\n%s'%(cmd,msg))\r\n #print(cmd,msg)\r\n return ercd,msg\r\n\r\n def open_cmd(self):\r\n settings = {}\r\n settings['port'] = self.port\r\n settings['baund'] = 115200\r\n settings['bytesize'] = 8\r\n settings['parity']='N'\r\n settings['stopbits']=1\r\n settings['timeout'] = 0.1\r\n self.serial = AsSerial()\r\n ercd,msg = self.serial.open(settings,False)\r\n if(False == ercd):\r\n self.infor.emit(msg)\r\n return ercd,None\r\n\r\n def close_cmd(self):\r\n self.serial.close()\r\n return True,None\r\n\r\n def download_one_section_cmd(self,address,size,data,identifier):\r\n FLASH_WRITE_SIZE = self.writeProperty\r\n left_size = size\r\n pos = 0\r\n # FIXME: according to the shell command implementation\r\n ability = 2000\r\n # round up\r\n size2 = int((left_size+FLASH_WRITE_SIZE-1)/FLASH_WRITE_SIZE)*FLASH_WRITE_SIZE\r\n ercd = True\r\n while(left_size>0 and ercd==True):\r\n cmd = '%s %s '%(identifier,hex(address+pos))\r\n if(left_size > ability):\r\n sz = ability\r\n left_size = left_size - ability\r\n else:\r\n sz = int((left_size+FLASH_WRITE_SIZE-1)/FLASH_WRITE_SIZE)*FLASH_WRITE_SIZE\r\n left_size = 0\r\n for i in range(sz):\r\n if((pos+i)>24)&0xFF,8)\r\n req.append((key>>16)&0xFF,8)\r\n req.append((key>>8)&0xFF,8)\r\n req.append((key>>0)&0xFF,8)\r\n self.infor.emit(' send key')\r\n ercd,res = self.transmit_xcp(req)\r\n\r\n if(ercd == True):\r\n self.infor.emit(' == unlock CALPAG ==')\r\n req = xcpbits()\r\n req.append(0xF8,8)\r\n req.append(0x00,8) # normal mode\r\n req.append(0x01,8) # CALPAG\r\n self.infor.emit(' request seed')\r\n ercd,res = self.transmit_xcp(req)\r\n\r\n if(ercd == True):\r\n res = res.toarray()\r\n if((res[1] != 4) or (len(res) < 6)):\r\n self.infor.emit(' invalid seed size')\r\n return False,None\r\n seed = (res[2]<<24)+(res[3]<<16)+(res[4]<<8)+(res[5]<<0)\r\n key = seed\r\n req = xcpbits()\r\n req.append(0xF7,8)\r\n req.append(4,8)\r\n req.append((key>>24)&0xFF,8)\r\n req.append((key>>16)&0xFF,8)\r\n req.append((key>>8)&0xFF,8)\r\n req.append((key>>0)&0xFF,8)\r\n self.infor.emit(' send key')\r\n ercd,res = self.transmit_xcp(req)\r\n\r\n if(ercd == True):\r\n req = xcpbits()\r\n req.append(0xD2,8)\r\n self.infor.emit(' program start')\r\n ercd,res = self.transmit_xcp(req)\r\n\r\n return ercd,res\r\n\r\n def download_one_section_xcp(self,address,size,data,identifier):\r\n req = xcpbits()\r\n req.append(0xF6,8)\r\n req.append(0x00,16) # reserved\r\n req.append(identifier,8) # extensition\r\n req.append(address,32) # address\r\n self.infor.emit(' set MTA address %s, type %s'%(hex(address), identifier))\r\n ercd,res = self.transmit_xcp(req)\r\n\r\n self.infor.emit(' downloading data...')\r\n ability = self.xcp.get_max_cto()-2\r\n left = size\r\n rPos = 0\r\n while((left > 0) and (ercd == True)):\r\n doSz = left\r\n if(doSz > ability):\r\n doSz = ability\r\n req = xcpbits()\r\n req.append(0xF0,8)\r\n req.append(doSz,8)\r\n for i in range(doSz):\r\n req.append(data[rPos+i],8)\r\n rPos += doSz\r\n left -= doSz\r\n ercd,res = self.transmit_xcp(req,True)\r\n self.add_progress(doSz)\r\n if(ercd == True): self.infor.emit(' success')\r\n return ercd,res\r\n\r\n def upload_one_section_xcp(self,address,size,identifier):\r\n req = xcpbits()\r\n req.append(0xF6,8)\r\n req.append(0x00,16) # reserved\r\n req.append(identifier,8) # extensition\r\n req.append(address,32) # address\r\n self.infor.emit(' set MTA address %s, type %s'%(hex(address), identifier))\r\n ercd,res = self.transmit_xcp(req)\r\n \r\n data = []\r\n left = size\r\n\r\n self.infor.emit(' uploading data...')\r\n ability = self.xcp.get_max_cto()-1\r\n left = size\r\n while((left > 0) and (ercd == True)):\r\n doSz = left\r\n if(doSz > ability):\r\n doSz = ability\r\n req = xcpbits()\r\n req.append(0xF5,8)\r\n req.append(doSz,8)\r\n left -= doSz\r\n ercd,res = self.transmit_xcp(req,True)\r\n if(ercd == True):\r\n res = res.toarray()\r\n for i in range(doSz):\r\n data.append(res[1+i])\r\n self.add_progress(doSz)\r\n if(ercd == True): self.infor.emit(' success')\r\n return ercd,res,data\r\n\r\n def download_flash_driver_xcp(self):\r\n flsdrv = self.flsdrvs\r\n ary = flsdrv.getData()\r\n for ss in ary:\r\n ercd,res = self.download_one_section_xcp(ss['address'],ss['size'],ss['data'],0x00)\r\n if(ercd == False):return ercd,res\r\n return ercd,res\r\n\r\n def check_flash_driver_xcp(self):\r\n flsdrv = self.flsdrvs\r\n ary = flsdrv.getData()\r\n flsdrvr = s19()\r\n for ss in ary:\r\n ercd,res,up = self.upload_one_section_xcp(ss['address'],ss['size'],0x00)\r\n flsdrvr.append(ss['address'],up)\r\n if(ercd and self.compare(ss['data'], up)):\r\n self.infor.emit(' check flash driver pass!')\r\n else:\r\n self.infor.emit(' check flash driver failed!')\r\n flsdrvr.dump('read_%s'%(os.path.basename(self.flsdrv)))\r\n return False,res\r\n flsdrvr.dump('read_%s'%(os.path.basename(self.flsdrv)))\r\n return ercd,res\r\n\r\n def routine_erase_flash_xcp(self):\r\n saddr, eaddr = self.get_app_erase_range()\r\n req = xcpbits()\r\n req.append(0xF6,8)\r\n req.append(0x00,16) # reserved\r\n req.append(0x01,8) # extensition FLASH\r\n req.append(saddr,32) # address\r\n ercd,res = self.transmit_xcp(req, True)\r\n\r\n if(ercd == True):\r\n self.infor.emit(' erase @ address %s, length %s'%(hex(saddr),hex(eaddr-saddr)))\r\n req = xcpbits()\r\n req.append(0xD1,8)\r\n req.append(0x00,8) # absolute access mode\r\n req.append(0x00,16) # reserved\r\n req.append(eaddr-saddr,32) # length\r\n ercd,res = self.transmit_xcp(req)\r\n\r\n return ercd,res\r\n\r\n def program_one_section_xcp(self,address,size,data,identifier):\r\n # TODO: should use command program instead of download\r\n return self.download_one_section_xcp(address,size,data,identifier)\r\n\r\n def download_application_xcp(self):\r\n app = self.apps\r\n ary = app.getData(True)\r\n for ss in ary:\r\n ercd,res = self.program_one_section_xcp(ss['address'],ss['size'],ss['data'],0x01)\r\n if(ercd == False):return ercd,res\r\n return ercd,res\r\n\r\n def check_application_xcp(self):\r\n app = self.apps\r\n ary = app.getData(True)\r\n appr = s19()\r\n for ss in ary:\r\n ercd,res,up = self.upload_one_section_xcp(ss['address'],ss['size'],0x01)\r\n appr.append(ss['address'],up)\r\n if(ercd and self.compare(ss['data'], up)):\r\n self.infor.emit(' check application pass!')\r\n else:\r\n self.infor.emit(' check application failed!')\r\n appr.dump('read_%s'%(os.path.basename(self.app)))\r\n return False,res\r\n appr.dump('read_%s'%(os.path.basename(self.app)))\r\n return ercd,res\r\n\r\n def launch_application_xcp(self):\r\n req = xcpbits()\r\n req.append(0xCF,8)\r\n return self.transmit_xcp(req, True)\r\n\r\n def set_protocol(self,p):\r\n if(p == 'UDS on CAN'):\r\n self.protocol = 'UDS'\r\n self.dcm = dcm(DFTBUS,0x732,0x731)\r\n self.ability = 4096\r\n elif(p == 'UDS on USBCAN'):\r\n self.protocol = 'UDS'\r\n self.dcm = dcm(DFTBUS,0x732,0x731)\r\n self.ability = 4096\r\n self.dcm.usbcan=True\r\n elif(p == 'UDS on CANFD'):\r\n self.protocol = 'UDS'\r\n self.dcm = dcm(DFTBUS,0x732,0x731)\r\n self.dcm.set_ll_dl(64)\r\n self.ability = 4096\r\n elif(p == 'UDS on DOIP'):\r\n self.protocol = 'UDS'\r\n self.dcm = dcm('172.18.0.200',8989)\r\n self.ability = 1400 # tested okay with this value\r\n elif(p == 'XCP on CAN'):\r\n self.protocol = 'XCP'\r\n elif(p.startswith('CMD on COM')):\r\n self.protocol = 'CMD'\r\n self.port = p.split(' ')[2]\r\n else:\r\n self.protocol = 'unknown protocol %s'%(p)\r\n self.infor.emit(self.protocol)\r\n\r\n def is_check_application_enabled(self):\r\n return self.enable[8]\r\n def is_check_flash_driver_enabled(self):\r\n return self.enable[5]\r\n def setTarget(self,app,flsdrv=None, eraseProperty='512', writeProperty='8', signature='8'):\r\n self.app = app\r\n self.flsdrv = flsdrv\r\n self.eraseProperty = eval(str(eraseProperty))\r\n self.writeProperty = eval(str(writeProperty))\r\n self.flsSignature = eval(str(signature))\r\n\r\n def GetSteps(self):\r\n ss = []\r\n if(self.protocol == 'XCP'):\r\n steps = self.stepsXcp\r\n elif(self.protocol == 'CMD'):\r\n steps = self.stepsCmd\r\n else:\r\n steps = self.steps\r\n for s in steps:\r\n ss.append((s[0].__name__.replace('_',' '),s[1]))\r\n return ss\r\n \r\n def SetEnable(self,step,enable):\r\n for id,s in enumerate(self.GetSteps()):\r\n if(step == s[0]):\r\n self.enable[id] = enable\r\n\r\n def step_progress(self,v):\r\n self.progress.emit(v)\r\n\r\n def transmit(self,req,exp,ignore=False):\r\n ercd,res = self.dcm.transmit(req)\r\n if(ercd == True):\r\n if(len(res)>=len(exp)):\r\n for i in range(len(exp)):\r\n if((res[i]!=exp[i]) and (exp[i]!=-1)):\r\n ercd = False\r\n break\r\n else:\r\n ercd = False\r\n if(ercd == True):\r\n if(not ignore): self.infor.emit(' success')\r\n else:\r\n self.infor.emit(' failed')\r\n return ercd,res\r\n\r\n def enter_extend_session(self):\r\n return self.transmit([0x10,0x03], [0x50,0x03])\r\n def security_extds_access(self):\r\n ercd,res = self.transmit([0x27,0x01], [0x67,0x01,-1,-1,-1,-1])\r\n if(ercd):\r\n seed = (res[2]<<24) + (res[3]<<16) + (res[4]<<8) +(res[5]<<0)\r\n key = (seed^0x78934673)\r\n self.infor.emit(' send key %X from seed %X'%(key,seed))\r\n ercd,res = self.transmit([0x27,0x02,(key>>24)&0xFF,(key>>16)&0xFF,(key>>8)&0xFF,(key>>0)&0xFF],[0x67,0x02])\r\n return ercd,res\r\n def enter_program_session(self):\r\n return self.transmit([0x10,0x02], [0x50,0x02])\r\n def security_prgs_access(self):\r\n ercd,res = self.transmit([0x27,0x03], [0x67,0x03,-1,-1,-1,-1])\r\n if(ercd):\r\n seed = (res[2]<<24) + (res[3]<<16) + (res[4]<<8) +(res[5]<<0)\r\n key = (seed^0x94586792)\r\n self.infor.emit(' send key %X from seed %X'%(key,seed))\r\n ercd,res = self.transmit([0x27,0x04,(key>>24)&0xFF,(key>>16)&0xFF,(key>>8)&0xFF,(key>>0)&0xFF],[0x67,0x04])\r\n return ercd,res\r\n def request_download(self,address,size,identifier):\r\n self.infor.emit(' request download')\r\n return self.transmit([0x34,0x00,0x44, \\\r\n (address>>24)&0xFF,(address>>16)&0xFF,(address>>8)&0xFF,(address>>0)&0xFF, \\\r\n (size>>24)&0xFF,(size>>16)&0xFF,(size>>8)&0xFF,(size>>0)&0xFF, \\\r\n identifier],[0x74])\r\n\r\n def request_upload(self,address,size,identifier):\r\n self.infor.emit(' request upload')\r\n return self.transmit([0x35,0x00,0x44, \\\r\n (address>>24)&0xFF,(address>>16)&0xFF,(address>>8)&0xFF,(address>>0)&0xFF, \\\r\n (size>>24)&0xFF,(size>>16)&0xFF,(size>>8)&0xFF,(size>>0)&0xFF, \\\r\n identifier],[0x75])\r\n \r\n def request_transfer_exit(self):\r\n self.infor.emit(' request transfer exit')\r\n return self.transmit([0x37],[0x77])\r\n \r\n def download_one_section(self,address,size,data,identifier):\r\n FLASH_WRITE_SIZE = self.writeProperty\r\n blockSequenceCounter = 1\r\n left_size = size\r\n pos = 0\r\n ability = int(((self.ability-5)/FLASH_WRITE_SIZE)) * FLASH_WRITE_SIZE\r\n # round up\r\n size2 = int((left_size+FLASH_WRITE_SIZE-1)/FLASH_WRITE_SIZE)*FLASH_WRITE_SIZE\r\n ercd,res = self.request_download(address,size2,identifier)\r\n if(ercd == False):return ercd,res\r\n while(left_size>0 and ercd==True):\r\n req = [0x36,blockSequenceCounter,0,identifier]\r\n if(left_size > ability):\r\n sz = ability\r\n left_size = left_size - ability\r\n else:\r\n sz = int((left_size+FLASH_WRITE_SIZE-1)/FLASH_WRITE_SIZE)*FLASH_WRITE_SIZE\r\n left_size = 0\r\n for i in range(sz):\r\n if((pos+i)0 and ercd==True):\r\n req = [0x36,blockSequenceCounter,0,identifier]\r\n ercd,res = self.transmit(req,[0x76,blockSequenceCounter],True)\r\n if(ercd == False):return ercd,res,None\r\n blockSequenceCounter = (blockSequenceCounter + 1)&0xFF\r\n sz = len(res)-2\r\n self.add_progress(sz)\r\n if (left_size > sz):\r\n left_size = left_size - sz\r\n else:\r\n left_size = 0\r\n for b in res[2:]:\r\n data.append(b)\r\n ercd,res = self.request_transfer_exit()\r\n if(ercd == False):return ercd,res,None\r\n return ercd,res,data\r\n \r\n def compare(self,d1,d2):\r\n for i,b in enumerate(d1):\r\n if(b!=d2[i]):\r\n return False\r\n return True\r\n\r\n def download_flash_driver(self):\r\n flsdrv = self.flsdrvs\r\n ary = flsdrv.getData()\r\n for ss in ary:\r\n ercd,res = self.download_one_section(ss['address']-ary[0]['address'],ss['size'],ss['data'],0xFD)\r\n if(ercd == False):return ercd,res\r\n return ercd,res\r\n\r\n def check_flash_driver(self):\r\n flsdrv = self.flsdrvs\r\n ary = flsdrv.getData()\r\n flsdrvr = s19()\r\n for ss in ary:\r\n ercd,res,up = self.upload_one_section(ss['address']-ary[0]['address'],ss['size'],0xFD)\r\n if(ercd == True):\r\n flsdrvr.append(ss['address'],up)\r\n if(ercd and self.compare(ss['data'], up)):\r\n self.infor.emit(' check flash driver pass!')\r\n else:\r\n self.infor.emit(' check flash driver failed!')\r\n flsdrvr.dump('read_%s'%(os.path.basename(self.flsdrv)))\r\n return False,res\r\n flsdrvr.dump('read_%s'%(os.path.basename(self.flsdrv)))\r\n return ercd,res\r\n\r\n def get_app_erase_range(self):\r\n app = self.apps\r\n ary = app.getData(True)\r\n saddr = ary[0]['address']\r\n eaddr = ary[0]['address'] + ary[0]['size']\r\n for ss in ary:\r\n if(ss['address']< saddr):\r\n saddr = ss['address']\r\n if(ss['address']+ss['size'] > eaddr):\r\n eaddr = ss['address']+ss['size']\r\n if(type(self.eraseProperty) == list):\r\n for addr in self.eraseProperty:\r\n if(eaddr <= addr):\r\n eaddr = addr\r\n break\r\n else:\r\n eaddr = int((eaddr+self.eraseProperty-1)/self.eraseProperty)*self.eraseProperty\r\n return saddr, eaddr\r\n\r\n def routine_erase_flash(self):\r\n saddr, eaddr = self.get_app_erase_range()\r\n eaddr = eaddr - saddr # get the length\r\n return self.transmit([0x31,0x01,0xFF,0x01,\r\n (saddr>>24)&0xFF,(saddr>>16)&0xFF,(saddr>>8)&0xFF,(saddr>>0)&0xFF,\r\n (eaddr>>24)&0xFF,(eaddr>>16)&0xFF,(eaddr>>8)&0xFF,(eaddr>>0)&0xFF,\r\n 0xFF],[0x71,0x01,0xFF,0x01])\r\n \r\n def download_application(self):\r\n app = self.apps\r\n ary = app.getData(True)\r\n for id,ss in enumerate(ary):\r\n if((id==0) and (self.flsSignature>0)):\r\n assert(ss['size'] >= self.flsSignature)\r\n addr = ss['address']+self.flsSignature\r\n data = ss['data'][self.flsSignature:]\r\n size = ss['size']-self.flsSignature\r\n if(size == 0):\r\n continue\r\n else:\r\n addr = ss['address']\r\n data = ss['data']\r\n size = ss['size']\r\n ercd,res = self.download_one_section(addr,size,data,0xFF)\r\n if(ercd == False):return ercd,res\r\n # write the signature at last\r\n if(self.flsSignature>0):\r\n addr = ary[0]['address']\r\n data = ary[0]['data'][:self.flsSignature]\r\n size = self.flsSignature\r\n ercd,res = self.download_one_section(addr,size,data,0xFF)\r\n return ercd,res\r\n\r\n def check_application(self):\r\n app = self.apps\r\n ary = app.getData(True)\r\n appr = s19()\r\n for ss in ary:\r\n ercd,res,up = self.upload_one_section(ss['address'],ss['size'],0xFF)\r\n if(ercd == True):\r\n appr.append(ss['address'],up)\r\n if(ercd and self.compare(ss['data'], up)):\r\n self.infor.emit(' check application pass!')\r\n else:\r\n self.infor.emit(' check application failed!')\r\n appr.dump('read_%s'%(os.path.basename(self.app)))\r\n return False,res\r\n appr.dump('read_%s'%(os.path.basename(self.app)))\r\n return ercd,res\r\n\r\n def launch_application(self):\r\n return self.transmit([0x31,0x01,0xFF,0x03], [0x71,0x01,0xFF,0x03])\r\n\r\n def run_common(self, steps):\r\n def ssz(ss):\r\n sz = 0\r\n for s in ss.getData(True):\r\n sz += s['size']\r\n return sz\r\n self.sumSz = 0\r\n if(os.path.exists(self.flsdrv)):\r\n self.flsdrvs = s19(self.flsdrv)\r\n self.sumSz = ssz(self.flsdrvs)\r\n if(self.is_check_flash_driver_enabled()):\r\n self.sumSz += ssz(self.flsdrvs)\r\n self.apps = s19(self.app)\r\n self.sumSz += ssz(self.apps)\r\n if(self.is_check_application_enabled()):\r\n self.sumSz += ssz(self.apps)\r\n self.txSz = 0\r\n self.infor.emit('summary transfer size is %s bytes(app %s, flsdrv %s)!'%(\r\n self.sumSz,ssz(self.apps),ssz(self.flsdrvs)))\r\n pre = time.time()\r\n for id,s in enumerate(steps):\r\n if((self.enable[id] == True) and (s[0].__name__ != 'dummy')):\r\n self.infor.emit('>> '+s[0].__name__.replace('_',' '))\r\n ercd,res = s[0]()\r\n if(ercd == False):\r\n self.infor.emit(\"\\n\\n >> boot failed <<\\n\\n\")\r\n return\r\n cost = time.time() - pre\r\n speed = int(self.sumSz/cost)\r\n self.infor.emit('cost %ss, speed is %sbps'%(cost, speed))\r\n self.progress.emit(100)\r\n\r\n def run_uds(self):\r\n self.run_common(self.steps)\r\n\r\n def run_xcp(self):\r\n self.run_common(self.stepsXcp)\r\n\r\n def run_cmd(self):\r\n self.open_cmd()\r\n self.run_common(self.stepsCmd)\r\n self.close_cmd()\r\n\r\n def run(self):\r\n self.infor.emit('starting with protocol \"%s\"... '%(self.protocol))\r\n if(self.protocol == 'UDS'):\r\n self.run_uds()\r\n elif(self.protocol == 'CMD'):\r\n self.run_cmd()\r\n elif(self.protocol == 'XCP'):\r\n self.run_xcp()\r\n else:\r\n self.infor.emit('invalid protocol')\r\n\r\nclass AsStepEnable(QCheckBox):\r\n enableChanged=QtCore.pyqtSignal(str,bool)\r\n def __init__(self,text,parent=None):\r\n super(QCheckBox, self).__init__(text,parent)\r\n self.stateChanged.connect(self.on_stateChanged)\r\n def on_stateChanged(self,state):\r\n self.enableChanged.emit(self.text(),state)\r\n \r\nclass UIFlashloader(QWidget):\r\n def __init__(self, parent=None):\r\n super(QWidget, self).__init__(parent)\r\n \r\n self.loader = AsFlashloader()\r\n self.loader.infor.connect(self.on_loader_infor)\r\n self.loader.progress.connect(self.on_loader_progress)\r\n\r\n vbox = QVBoxLayout()\r\n \r\n grid = QGridLayout()\r\n grid.addWidget(QLabel('Application'),0,0)\r\n self.leApplication = QLineEdit()\r\n grid.addWidget(self.leApplication,0,1)\r\n self.btnOpenApp = QPushButton('...')\r\n grid.addWidget(self.btnOpenApp,0,2)\r\n\r\n grid.addWidget(QLabel('Flash Driver'),1,0)\r\n self.leFlsDrv = QLineEdit()\r\n grid.addWidget(self.leFlsDrv,1,1)\r\n self.btnOpenFlsDrv = QPushButton('...')\r\n grid.addWidget(self.btnOpenFlsDrv,1,2)\r\n\r\n grid.addWidget(QLabel('Progress'),2,0)\r\n self.pgbProgress = QProgressBar()\r\n self.pgbProgress.setRange(0,100)\r\n grid.addWidget(self.pgbProgress,2,1)\r\n self.cmbxProtocol = QComboBox()\r\n items = ['UDS on CAN','UDS on CANFD','UDS on DOIP','XCP on CAN','UDS on USBCAN']\r\n for i in search_serial_ports():\r\n items.append('CMD on COM%s'%(i))\r\n self.cmbxProtocol.addItems(items)\r\n self.cmbxProtocol.setEditable(True)\r\n grid.addWidget(self.cmbxProtocol,2,2)\r\n self.btnStart=QPushButton('Start')\r\n grid.addWidget(self.btnStart,2,3)\r\n grid.addWidget(QLabel('aslua bootloader:'),3,0)\r\n self.cmbxCanDevice = QComboBox()\r\n self.cmbxCanDevice.addItems(['socket','serial','vxl','peak','tcp'])\r\n self.cmbxCanPort = QComboBox()\r\n self.cmbxCanPort.addItems(['port 0','port 1','port 2','port 3','port 4','port 5','port 6','port 7'])\r\n self.cmbxCanBaud = QComboBox()\r\n self.cmbxCanBaud.addItems(['125000','250000','500000','1000000','115200'])\r\n self.btnStartASLUA=QPushButton('Start')\r\n grid.addWidget(self.cmbxCanDevice,3,1)\r\n grid.addWidget(self.cmbxCanPort,3,2)\r\n grid.addWidget(self.cmbxCanBaud,3,3)\r\n grid.addWidget(self.btnStartASLUA,3,4)\r\n vbox.addLayout(grid)\r\n\r\n grid.addWidget(QLabel('Erase Property:'),4,0)\r\n self.leFlsEraseProperty = QLineEdit()\r\n grid.addWidget(self.leFlsEraseProperty,4,1)\r\n self.leFlsEraseProperty.setToolTip('Sector start address list or the smallest sector size\\nfor example:\\n list:[0,128*1024,...]\\n size: 512')\r\n self.leFlsEraseProperty.setText('128*1024')\r\n grid.addWidget(QLabel('Write Property:'),5,0)\r\n self.leFlsWriteProperty = QLineEdit()\r\n grid.addWidget(self.leFlsWriteProperty,5,1)\r\n self.leFlsWriteProperty.setToolTip('the smallest page size')\r\n self.leFlsWriteProperty.setText('8')\r\n grid.addWidget(QLabel('Signature:'),5,2)\r\n self.leFlsSignature = QLineEdit()\r\n self.leFlsSignature.setMaximumWidth(120)\r\n grid.addWidget(self.leFlsSignature,5,3)\r\n self.leFlsSignature.setToolTip('the signature size at the begining of Image')\r\n self.leFlsSignature.setText('8')\r\n hbox = QHBoxLayout()\r\n vbox2 = QVBoxLayout()\r\n self.cbxEnableList = []\r\n for s in self.loader.GetSteps():\r\n cbxEnable = AsStepEnable(s[0])\r\n cbxEnable.setChecked(s[1])\r\n cbxEnable.enableChanged.connect(self.on_enableChanged)\r\n vbox2.addWidget(cbxEnable)\r\n self.cbxEnableList.append(cbxEnable)\r\n hbox.addLayout(vbox2)\r\n self.leinfor = QTextEdit()\r\n self.leinfor.setReadOnly(True)\r\n hbox.addWidget(self.leinfor)\r\n \r\n vbox.addLayout(hbox)\r\n \r\n self.setLayout(vbox)\r\n \r\n self.btnOpenApp.clicked.connect(self.on_btnOpenApp_clicked)\r\n self.btnOpenFlsDrv.clicked.connect(self.on_btnOpenFlsDrv_clicked)\r\n self.btnStart.clicked.connect(self.on_btnStart_clicked)\r\n self.btnStartASLUA.clicked.connect(self.on_btnStartASLUA_clicked)\r\n self.cmbxProtocol.currentIndexChanged.connect(self.on_cmbxProtocol_currentIndexChanged)\r\n self.btnStartASLUA.setDisabled(True)\r\n aspath = os.path.abspath('%s/../../..'%(os.curdir))\r\n default_app='%s/com/as.application/board.mpc56xx/MPC5634M_MLQB80/Project/bin/internal_FLASH.mot'%(aspath)\r\n default_flsdrv='%s/com/as.application/board.mpc56xx/MPC5634M_MLQB80/FlsDrv/bin/internal_FLASH.mot'%(aspath)\r\n if(not os.path.exists(default_app)):\r\n default_app = ''\r\n if(not os.path.exists(default_flsdrv)):\r\n default_flsdrv = ''\r\n if(os.path.exists(aspath)):\r\n for ss in glob.glob('%s/release/ascore/*.s19'%(aspath)):\r\n default_app = ss\r\n self.leFlsEraseProperty.setText('2*1024')\r\n self.leFlsSignature.setText('0x500')\r\n break\r\n for ss in glob.glob('%s/release/asboot/*-flsdrv.s19'%(aspath)):\r\n default_flsdrv = ss\r\n break\r\n if(os.path.exists(default_app)):\r\n self.leApplication.setText(default_app)\r\n if(os.path.exists(default_flsdrv)):\r\n self.leFlsDrv.setText(default_flsdrv)\r\n\r\n def on_cmbxProtocol_currentIndexChanged(self,index):\r\n self.loader.set_protocol(str(self.cmbxProtocol.currentText()))\r\n\r\n for id,s in enumerate(self.loader.GetSteps()):\r\n self.cbxEnableList[id].setText(s[0])\r\n self.cbxEnableList[id].setChecked(s[1])\r\n\r\n def on_enableChanged(self,step,enable):\r\n self.loader.SetEnable(step, enable)\r\n\r\n def on_loader_infor(self,text):\r\n self.leinfor.append(text)\r\n \r\n def on_loader_progress(self,prg):\r\n self.pgbProgress.setValue(prg)\r\n\r\n def on_btnOpenApp_clicked(self):\r\n rv = QFileDialog.getOpenFileName(None,'application file', '','application (*.s19 *.bin *.mot)')\r\n self.leApplication.setText(rv[0])\r\n\r\n def on_btnOpenFlsDrv_clicked(self):\r\n rv = QFileDialog.getOpenFileName(None,'flash driver file', '','flash driver (*.s19 *.bin *.mot)')\r\n self.leFlsDrv.setText(rv[0])\r\n\r\n def on_btnStart_clicked(self):\r\n if(os.path.exists(str(self.leApplication.text()))):\r\n self.pgbProgress.setValue(1)\r\n self.loader.setTarget(str(self.leApplication.text()), str(self.leFlsDrv.text()),\r\n str(self.leFlsEraseProperty.text()),str(self.leFlsWriteProperty.text()),\r\n str(self.leFlsSignature.text()))\r\n self.loader.start()\r\n else:\r\n QMessageBox.information(self, 'Tips', 'Please load a valid application first!')\r\n\r\n def on_btnStartASLUA_clicked(self):\r\n aslua = os.path.abspath('%s/pyas/aslua.exe'%(os.curdir))\r\n fbl = os.path.abspath('%s/pyas/flashloader.lua'%(os.curdir))\r\n cmd = '%s %s %s %s %s %s %s'%(aslua, fbl, self.leFlsDrv.text(), self.leApplication.text(),\r\n self.cmbxCanDevice.currentText(),\r\n str(self.cmbxCanPort.currentText()).replace('port',''),\r\n self.cmbxCanBaud.currentText())\r\n print(cmd)\r\n self.leinfor.append(cmd+'\\n')\r\n if(0 == os.system(cmd)):\r\n self.leinfor.append('run aslua bootloader done successfully!')\r\n else:\r\n self.leinfor.append('run aslua bootloader done failed!')\r\n","repo_name":"hello-yaowq/as-1","sub_path":"com/as.tool/lua/script/flashloader.py","file_name":"flashloader.py","file_ext":"py","file_size_in_byte":34399,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"7774897896","text":"import matplotlib.pyplot as plt\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom sklearn.cluster import AgglomerativeClustering\n\n\ndef run_agglomerative_clustering(data, n_clusters=None, metric=\"euclidean\", linkage=\"ward\", distance_treshold=None):\n \"\"\" Apply Agglomerative hierarchial clustering on a dataset. \n For more see: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html#sklearn.cluster.AgglomerativeClustering\n\n Args:\n data (pd.DataFrame): The used dataset\n n_clusters (int, optional): The number of clusters as stopping criterion. Defaults to None.\n metric (str, optional): The distance metric. Defaults to \"euclidean\".\n linkage (str, optional): The linkage method. Defaults to \"ward\".\n distance_treshold ([type], optional): [The linkage distance threshold above which, \n clusters will not be merged. Defaults to None.\n\n Returns:\n [type]: [description]\n \"\"\"\n clustering = AgglomerativeClustering(n_clusters=n_clusters, affinity=metric, linkage=linkage,\n distance_threshold=distance_treshold).fit(data)\n return clustering.n_clusters_, clustering.labels_\n\n\ndef dendrogram_plot(data):\n \"\"\" Plot the dendrogram for a given dataset\n\n Args:\n data (pd.DataFrame): The used dataset\n \"\"\"\n cluster_linkage_array = linkage(data, 'ward')\n fig = plt.figure(figsize=(25, 10))\n plt.title('Hierarchical Clustering Dendrogram')\n dendrogram(cluster_linkage_array, p=8, orientation='top', distance_sort='descending',\n show_leaf_counts=True, show_contracted=True)\n ax = plt.gca()\n ax.axes.xaxis.set_ticks([])\n plt.xlabel('sample index')\n plt.ylabel('distance')\n plt.show()\n fig.savefig(\"Dendrogram.svg\", format=\"svg\")\n","repo_name":"lstappen/MuSe-Toolbox","sub_path":"src/muse-toolbox/diarisation/clustering_algorithms/AgglomerativeClustering.py","file_name":"AgglomerativeClustering.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"}
+{"seq_id":"7576461911","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nfrom email.message import EmailMessage\nimport ssl\nimport smtplib\nimport sqlite3\nfrom dotenv import load_dotenv\nload_dotenv()\nimport os\nimport MySQLdb\n\nconnection = MySQLdb.connect(\n host= os.getenv(\"HOST\"),\n user=os.getenv(\"USERNAME\"),\n passwd= os.getenv(\"PASSWORD\"),\n db= os.getenv(\"DATABASE\"),\n autocommit = True,\n ssl_mode = \"VERIFY_IDENTITY\",\n ssl = {\n \"ca\": \"/etc/ssl/certs/ca-certificates.crt\"\n }\n)\n\ntry:\n cur = connection.cursor()\nexcept:\n print(\"error\")\n os.exit()\n\n\nemail_sender=\"ptracking968@gmail.com\"\n#main_password = \"Paudha77_L\"\npassword = \"snilqrhdolrbcrgy\"\nsubject = \"Price change!\"\n\n\n\n\n\n#email_receiver=\"daksh.srivastava.10@gmail.com\"\n#link = \"https://www.amazon.com/RK-ROYAL-KLUDGE-Mechanical-Ultra-Compact/dp/B089GN2KBT/?th=1\"\n\n\n#Fix scope problem with email_reciever and link\n\ndef run():\n\n \n\n HEADERS = {\n 'User-Agent': ('Mozilla/5.0 (X11; Linux x86_64)'\n 'AppleWebKit/537.36 (KHTML, like Gecko)'\n 'Chrome/44.0.2403.157 Safari/537.36'),\n 'Accept-Language': 'en-US, en;q=0.5'\n }\n\n\n threshold = 0.8\n\n while True:\n cur.execute(\"SELECT link, email, prevPrice FROM Products\")\n lst = cur.fetchall()\n print(lst)\n if lst == [] or lst == ():\n time.sleep(5)\n continue\n\n try:\n for l in lst:\n prevprice = l[2]\n print(prevprice)\n\n \n \n link = l[0]\n email_receiver = l[1]\n em = EmailMessage()\n em['From'] = email_sender\n em['Subject'] = subject\n em['To'] = email_receiver\n \n\n page=requests.get(link, headers=HEADERS)\n mybytes = page.text\n #print(mybytes)\n mystr = mybytes#.decode(\"utf8\")\n\n #print(mystr)\n\n soup = BeautifulSoup(mystr, features=\"html.parser\")\n #print(soup.prettify())\n price = float(soup.find('span', class_='a-price-whole').get_text() + soup.find('span', class_='a-price-fraction').get_text())\n print(price)\n if price <= prevprice*threshold:\n print(\"New low price!\")\n body = f\"\"\"\n The following product now has a lower price: {link} \n \"\"\"\n em.set_content(body)\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL('smtp.gmail.com', 465, context=context) as smtp:\n smtp.login(email_sender, password)\n smtp.sendmail(email_sender, email_receiver, em.as_string()) \n cur.execute(\"UPDATE Products SET prevPrice=? WHERE link=? AND email=?\", (price, link, email_receiver))\n except:\n continue\n\nrun()","repo_name":"dakshsriv/Price_tracker","sub_path":"backend/Price_checker.py","file_name":"Price_checker.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13932474551","text":"from time import sleep\n\nimport pytest\n\nfrom pages.search_results import SearchResults\nfrom pages.start_screen import *\n\n\nclass TestSearch:\n @pytest.mark.search\n @pytest.mark.button\n def test_search_button_available(self):\n '''\n test that the search button is available\n '''\n assert StartScreen().search_button.should(be.clickable)\n\n @pytest.mark.search\n @pytest.mark.input\n def test_search_input_available(self):\n '''\n test that the search input available\n '''\n assert StartScreen().search_input.should(be.visible)\n \n \n @pytest.mark.debug\n def test_search_for_keyword(self):\n '''\n search for keyword \"Testing\"\n '''\n StartScreen().search_input.\\\n send_keys('testing')\n StartScreen().search_button.click()\n \n assert SearchResults().num_of_posts == 20\n\n\n\n","repo_name":"vanderloos/qa_workshop","sub_path":"pytest/tests/test_search_results.py","file_name":"test_search_results.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13696370625","text":"import logging\nimport os\nimport time\nfrom typing import List\n\nfrom xinference.client import Client\nfrom xinference.model.llm import BUILTIN_LLM_FAMILIES\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.getLevelName(\"INFO\".upper()))\n\n\n# Parameters\nendpoint = \"http://127.0.0.1:9997\"\nNUM_ITER = 5\nGPU_ID = 0\nSKIP_MODELS: List[str] = []\n\n\ndef get_gpu_mem_info(gpu_id=GPU_ID):\n \"\"\"\n Obtain gpu memory usage information according to gpu id, in MB.\n Returns\n -------\n total: all gpu memory\n used: currently used gpu memory\n free: available gpu memory\n \"\"\"\n try:\n import pynvml\n except ImportError:\n raise ImportError(\"Failed to import module 'pynvml', Please make sure 'pynvml' is installed.\\n\")\n\n pynvml.nvmlInit()\n if gpu_id < 0 or gpu_id >= pynvml.nvmlDeviceGetCount():\n logger.info(f\"gpu_id {gpu_id} does not exist!\")\n return 0, 0, 0\n\n handler = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)\n meminfo = pynvml.nvmlDeviceGetMemoryInfo(handler)\n total = round(meminfo.total / 1024 / 1024, 2)\n used = round(meminfo.used / 1024 / 1024, 2)\n free = round(meminfo.free / 1024 / 1024, 2)\n return total, used, free\n\n\ndef get_cpu_mem_info():\n \"\"\"\n Get the memory information of the current machine, in MB\n Returns\n -------\n mem_total: all memory of the current machine\n mem_free: available memory of the current machine\n mem_process_used: memory used by the current process\n \"\"\"\n try:\n import psutil\n except ImportError:\n raise ImportError(\"Failed to import module 'psutil', Please make sure 'psutil' is installed.\\n\")\n\n mem_total = round(psutil.virtual_memory().total / 1024 / 1024, 2)\n mem_free = round(psutil.virtual_memory().available / 1024 / 1024, 2)\n mem_process_used = round(\n psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024, 2\n )\n return mem_total, mem_free, mem_process_used\n\n\ndef get_speed_for_chat_model(model, prompt, chat_history):\n t1 = time.time()\n a = model.chat(prompt, chat_history, generate_config={\"max_tokens\": 1024})\n t2 = time.time()\n t = t2 - t1\n completion_tokens = a[\"usage\"][\"completion_tokens\"]\n speed = completion_tokens / t\n logger.info(f\"text: {a}\")\n logger.info(f\"tokens: {completion_tokens}\")\n logger.info(f\"time: {t}\")\n logger.info(f\"speed: {speed}\")\n return speed\n\n\ndef get_speed_for_generate_model(model, prompt):\n t1 = time.time()\n a = model.generate(prompt, generate_config={\"max_tokens\": 1024})\n t2 = time.time()\n t = t2 - t1\n completion_tokens = a[\"usage\"][\"completion_tokens\"]\n speed = completion_tokens / t\n logger.info(f\"text: {a}\")\n logger.info(f\"tokens: {completion_tokens}\")\n logger.info(f\"time: {t}\")\n logger.info(f\"speed: {speed}\")\n return speed\n\n\ndef run_model(endpoint):\n client = Client(endpoint)\n\n for model_family in BUILTIN_LLM_FAMILIES:\n model_name = model_family.model_name\n if model_name in SKIP_MODELS:\n continue\n for model_spec in model_family.model_specs:\n model_format = model_spec.model_format\n model_size = model_spec.model_size_in_billions\n quantizations = model_spec.quantizations\n if model_format == \"ggmlv3\":\n # only test 1 quantization for ggml model\n quantizations = [\"q4_0\"] if \"q4_0\" in quantizations else quantizations[:1]\n for quantization in quantizations:\n logger.info(\n f\"Model: {model_name}-{model_format}-{model_size}b-{quantization}\"\n )\n try:\n model_uid = client.launch_model(\n model_name=model_name,\n model_format=model_format,\n model_size_in_billions=model_size,\n quantization=quantization,\n )\n logger.info(\n f\"model launch success: {model_name}-{model_format}-{model_size}b-{quantization}\"\n )\n except Exception:\n # raise f\"model launch failed: {model_name}-{model_format}-{model_size}b-{quantization}\"\n logger.info(\n f\"model launch failed: {model_name}-{model_format}-{model_size}b-{quantization}\"\n )\n continue\n\n try:\n logger.info(\"After launch model:\")\n gpu_mem_total, gpu_mem_used, gpu_mem_free = get_gpu_mem_info(\n gpu_id=GPU_ID\n )\n logger.info(\n f\"Current GPU memory usage: Total {gpu_mem_total} MB, Used {gpu_mem_used} MB, Remaining {gpu_mem_free} MB\"\n )\n\n (\n cpu_mem_total,\n cpu_mem_free,\n cpu_mem_process_used,\n ) = get_cpu_mem_info()\n logger.info(\n f\"Current machine memory usage:Total {cpu_mem_total} MB, Used {cpu_mem_process_used} MB by the current process, Remaining {cpu_mem_free} MB\"\n )\n\n model = client.get_model(model_uid)\n\n if \"chat\" in model_family.model_ability:\n chat_history = []\n prompt = \"What't the top 10 largest animals in the world?\"\n\n list_speed = []\n for _ in range(NUM_ITER):\n s = get_speed_for_chat_model(model, prompt, chat_history)\n list_speed.append(s)\n logger.info(\n f\"average speed: {sum(list_speed[1:]) / len(list_speed[1:])}\"\n )\n else:\n prompt = \"Once upon a time, there was a very old computer.\"\n\n list_speed = []\n for _ in range(NUM_ITER):\n s = get_speed_for_generate_model(model, prompt)\n list_speed.append(s)\n logger.info(\n f\"average speed: {sum(list_speed[1:]) / len(list_speed[1:])}\"\n )\n\n logger.info(\"\\nAfter chat:\")\n gpu_mem_total, gpu_mem_used, gpu_mem_free = get_gpu_mem_info(\n gpu_id=GPU_ID\n )\n logger.info(\n f\"Current GPU memory usage: Total {gpu_mem_total} MB, Used {gpu_mem_used} MB, Remaining {gpu_mem_free} MB\"\n )\n\n (\n cpu_mem_total,\n cpu_mem_free,\n cpu_mem_process_used,\n ) = get_cpu_mem_info()\n logger.info(\n f\"Current machine memory usage:Total {cpu_mem_total} MB, Used {cpu_mem_process_used} MB by the current process, Remaining {cpu_mem_free} MB\"\n )\n except Exception:\n logger.info(\n f\"model chat failed: {model_name}-{model_format}-{model_size}b-{quantization}\"\n )\n\n client.terminate_model(model_uid)\n logger.info(\n f\"\\n{model_name}-{model_format}-{model_size}b-{quantization} is terminated\\n\\n\\n\"\n )\n\n\nif __name__ == \"__main__\":\n run_model(endpoint)\n","repo_name":"ChengjieLi28/inference","sub_path":"benchmark/benchmark_script.py","file_name":"benchmark_script.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"23324574580","text":"from http import HTTPStatus\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom starlette.exceptions import HTTPException as StarletteHTTPException\nfrom starlette.status import (\n HTTP_400_BAD_REQUEST,\n HTTP_401_UNAUTHORIZED,\n HTTP_403_FORBIDDEN,\n HTTP_404_NOT_FOUND,\n HTTP_405_METHOD_NOT_ALLOWED,\n HTTP_500_INTERNAL_SERVER_ERROR,\n HTTP_503_SERVICE_UNAVAILABLE,\n)\n\n\nclass StarLiteException(Exception):\n def __init__(self, *args: Any, detail: str = \"\", **kwargs: Dict[str, Any]):\n self.detail = detail\n super().__init__(*(str(arg) for arg in args if arg), detail, **kwargs)\n\n def __repr__(self) -> str:\n if self.detail:\n return f\"{self.__class__.__name__} - {self.detail}\"\n return self.__class__.__name__\n\n def __str__(self) -> str:\n return \" \".join(self.args).strip()\n\n\nclass MissingDependencyException(StarLiteException, ImportError):\n pass\n\n\nclass HTTPException(StarletteHTTPException, StarLiteException):\n status_code = HTTP_500_INTERNAL_SERVER_ERROR\n extra: Optional[Union[Dict[str, Any], List[Any]]] = None\n\n def __init__(\n self,\n *args: Any,\n detail: Optional[str] = None,\n status_code: Optional[int] = None,\n extra: Optional[Union[Dict[str, Any], List[Any]]] = None,\n **kwargs: Dict[str, Any],\n ):\n if not detail:\n detail = args[0] if len(args) > 0 else HTTPStatus(status_code or self.status_code).phrase\n self.extra = extra\n super().__init__(status_code or self.status_code, *args, **kwargs) # type: ignore\n self.detail = detail\n self.args = (f\"{self.status_code}: {self.detail}\", *args)\n\n def __repr__(self) -> str:\n return f\"{self.status_code} - {self.__class__.__name__} - {self.detail}\"\n\n\nclass ImproperlyConfiguredException(HTTPException, ValueError):\n pass\n\n\nclass ValidationException(HTTPException, ValueError):\n status_code = HTTP_400_BAD_REQUEST\n\n\nclass NotAuthorizedException(HTTPException):\n status_code = HTTP_401_UNAUTHORIZED\n\n\nclass PermissionDeniedException(HTTPException):\n status_code = HTTP_403_FORBIDDEN\n\n\nclass NotFoundException(HTTPException, ValueError):\n status_code = HTTP_404_NOT_FOUND\n\n\nclass MethodNotAllowedException(HTTPException):\n status_code = HTTP_405_METHOD_NOT_ALLOWED\n\n\nclass InternalServerException(HTTPException):\n status_code = HTTP_500_INTERNAL_SERVER_ERROR\n\n\nclass ServiceUnavailableException(HTTPException):\n status_code = HTTP_503_SERVICE_UNAVAILABLE\n\n\nclass TemplateNotFound(InternalServerException):\n def __init__(self, *args: Any, template_name: str, **kwargs: Dict[str, Any]):\n super().__init__(*args, detail=f\"Template {template_name} not found.\", **kwargs) # type: ignore\n","repo_name":"185504a9/starlite","sub_path":"starlite/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"15473008019","text":"import numpy as np\nimport os\nimport cv2 as cv\nfrom tqdm import tqdm\nfrom Render.camera import Camera\nfrom Render.gl.color_render import ColorRender\nfrom Render.gl.normal_render import NormalRender\nfrom utils.ObjIO import *\nfrom utils.cam_util import *\nfrom Render.mesh import compute_normal\nview_num = 1\ncam_f = 5000\ncam_dist = 10\npre_cam=np.array([0.8845,-0.0299,0.1250])\ncam_t=np.array([0,0,10])#pred_cam[:, 0:1] * cam_c * cam_tz / cam_f\nimg_resw = 512\nimg_resh = 512\ncam_type='p'\nclass SmplVtx(object):\n \"\"\"\n Local class used to load and store SMPL's vertices coordinate at rest pose\n with mean shape\n \"\"\"\n def __init__(self):\n self.smpl_vtx_std = np.loadtxt('vertices.txt')\n min_x = np.min(self.smpl_vtx_std[:, 0])\n max_x = np.max(self.smpl_vtx_std[:, 0])\n min_y = np.min(self.smpl_vtx_std[:, 1])\n max_y = np.max(self.smpl_vtx_std[:, 1])\n min_z = np.min(self.smpl_vtx_std[:, 2])\n max_z = np.max(self.smpl_vtx_std[:, 2])\n\n self.smpl_vtx_std[:, 0] = (self.smpl_vtx_std[:, 0]-min_x)/(max_x-min_x)\n self.smpl_vtx_std[:, 1] = (self.smpl_vtx_std[:, 1]-min_y)/(max_y-min_y)\n self.smpl_vtx_std[:, 2] = (self.smpl_vtx_std[:, 2]-min_z)/(max_z-min_z)\n\n\n_smpl_vtx = SmplVtx()\n\n\ndef get_smpl_semantic_code():\n \"\"\"gets semantic code definition on SMPL model\"\"\"\n return _smpl_vtx.smpl_vtx_std\ndef main():\n mesh=load_obj_data('dataset_example/FRONT_smpl_normalized.obj')\n # mesh=load_obj_data('test/pre_smpl2.obj')\n # mesh['v'][:,0]=(mesh['v'][:,0]+pre_cam[1])*pre_cam[0]/(5000/(256))*10\n # mesh['v'][:,1]=(mesh['v'][:,1]+pre_cam[2])*pre_cam[0]/(5000/(256))*10\n # mesh['v'][:,2]=(mesh['v'][:,2])*pre_cam[0]/(5000/(256))*10\n # mesh=save_obj_data(mesh,'test/pre_smpl1.obj')\n if mesh['vn'] is None or mesh['vn'].shape!=mesh['v'].shape:\n mesh['vn']=compute_normal(mesh['v'],mesh['f'])\n # color=0.5*(norm+1)\n mesh['vc']=get_smpl_semantic_code()\n rndr = ColorRender(width=img_resw, height=img_resh)\n rndr_noraml=NormalRender(width=img_resw, height=img_resh)\n # mesh['vc']= np.array([[0.65098039, 0.74117647, 0.85882353]]).repeat(mesh['v'].shape[0],0)\n # rndr.set_mesh(vertices=vertices,faces=faces,color=color,faces_clr=faces)\n rndr.set_mesh(vertices=mesh['v'],faces=mesh['f'],color=mesh['vc'],faces_clr=mesh['f'],norms=mesh['vn'],faces_nml=mesh['f'])\n rndr_noraml.set_mesh(vertices=mesh['v'],faces=mesh['f'],norms=mesh['vn'],face_normals=mesh['f'])\n rndr.set_norm_mat(axis=np.array([1,-1,-1]))\n rndr_noraml.set_norm_mat(axis=np.array([1,1,1]))\n cam = Camera(width=img_resw, height=img_resh,focal=cam_f,camera_type=cam_type)\n cam.center=cam_t\n # cam_params = generate_cameras(dist=cam_dist, view_num=view_num)\n # sh_list = []\n for view_id in tqdm(range(0,view_num)):\n R=make_rotate(0,view_id*np.pi/180,0)\n rndr.rot_matrix=R\n rndr_noraml.rot_matrix=R\n cam.sanity_check()\n rndr.set_camera(cam)\n rndr.display()\n rndr_noraml.set_camera(cam)\n rndr_noraml.display()\n out_all_f = rndr.get_color(0)\n out_mask = out_all_f[:, :, 3]\n out_all_f = cv.cvtColor(out_all_f, cv.COLOR_RGBA2BGR)\n cv.imwrite(os.path.join('./test/%04d_rgb.png' % view_id), np.uint8(out_all_f * 255))\n # out_all_f = rndr_noraml.get_color(0)\n # out_mask = out_all_f[:, :, 3]\n # out_all_f = cv.cvtColor(out_all_f, cv.COLOR_RGBA2BGR)\n # cv.imwrite(os.path.join('./test/pn%04d.jpg' % view_id), np.uint8(out_all_f * 255))\n cv.imwrite(os.path.join('./test/%04d_mask.png' % view_id), np.uint8(out_mask * 255))\n # index=4\n\n\nif __name__ == '__main__':\n main()","repo_name":"sunjc0306/3DRender","sub_path":"render_images.py","file_name":"render_images.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24502014479","text":"import socket\nimport sys\nimport time\nimport datetime\nimport random\nimport os\n\nimport logging\nimport argparse\n\nENCODE = 'utf-8'\n\nIP_SERVER = ''\nPORT_SERVER = 12345\n\nDEFAULT_LOG_LEVEL = logging.INFO\nTIME_FORMAT = '%Y-%m-%d,%H:%M:%S'\n\ndef main():\n\n\tparser = argparse.ArgumentParser(description='sistema de vendas')\n\n\tparser.add_argument(\"--purchase\", \"-p\", help='amount of purchases', type=int)\n\tparser.add_argument(\"--ip\", \"-i\", help=\"ip server\", type=str)\n\tparser.add_argument(\"--servers\", \"-ns\", help=\"number servers\", type=str)\n\tparser.add_argument(\"--timetoken\", \"-tt\", help=\"time token\", type=str)\n\n\n\thelp_msg = \"Logging level (INFO=%d DEBUG=%d)\" % (logging.INFO, logging.DEBUG)\n\tparser.add_argument(\"--log\", \"-l\", help=help_msg, default=DEFAULT_LOG_LEVEL, type=int)\n\t\n\targs = parser.parse_args()\n\n\tif args.log == logging.DEBUG:\n\t\tlogging.basicConfig(format='%(asctime)s.%(msecs)03d %(levelname)s {%(module)s} [%(funcName)s] %(message)s', datefmt=TIME_FORMAT, level=args.log)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s.%(msecs)03d %(message)s', datefmt=TIME_FORMAT, level=args.log)\n\n\targs = parser.parse_args()\n\n\tglobal IP_SERVER \n\n\tIP_SERVER = args.ip\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\ts.connect((IP_SERVER, PORT_SERVER))\n\n\tdirs = 'times_servers_'+args.servers\n\t\n\tif not os.path.exists(dirs):\n\t\tos.makedirs(dirs)\n\n\tsFile = 'client'+IP_SERVER[7:]+'_'+args.timetoken+'.txt'\n\tfile = open(dirs+'/'+sFile, 'w')\n\n\ttimes = []\n\tfor i in range(args.purchase):\n\n\t\ttime_begin = datetime.datetime.now()\n\n\t\ts.send(bytes('purchase', ENCODE))\n\n\t\tmsg = s.recv(1024).decode(ENCODE)\n\n\t\t_, _, qnt = msg.split(' ')\n\n\t\tlogging.info(msg)\n\n\t\ts.send(bytes(str(random.randint(1,9)), ENCODE))\n\n\t\tresponse = s.recv(1024).decode(ENCODE)\n\n\t\tlogging.info(response)\n\n\t\ttime_end = datetime.datetime.now()\n\t\ttime_diff = (time_end - time_begin)\n\t\ttime_diff_seconds = time_diff.seconds + (time_diff.microseconds/1000000.0)\n\t\ttimes.append(time_diff_seconds)\n\t\ttime.sleep(0.01)\n\n\ts.send(bytes('disconnect', ENCODE))\n\n\tfor t in times:\n\t\tfile.write(str(args.servers)+' '+str(args.timetoken)+' '+str(t)+'\\n')\n\n\tfile.close()\t\n\ts.close()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"fanhenrique/distributed-sales-system","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15964839554","text":"#!/usr/bin/env python3\n\nimport re\n\nimport requests\nimport whitelist\n\nAGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0'\nHEADERS = {'Referer': 'http://forum.xda-developers.com/', 'User-Agent': AGENT}\nYOYO_DATA = {\"mimetype\": \"plaintext\", \"hostformat\": \"unixhosts\"}\n\nHOST_LIKE_LISTS = [\n ('http://www.malwaredomainlist.com/hostslist/hosts.txt', None),\n ('http://adblock.gjtech.net/?format=unix-hosts', None),\n ('http://someonewhocares.org/hosts/hosts', None),\n ('http://winhelp2002.mvps.org/hosts.txt', None),\n ('http://adaway.org/hosts.txt', None),\n #('http://adblock.mahakala.is/', HEADERS),\n]\n\nHOST = \"0.0.0.0\"\nOUTPUT_FILE = 'hosts-adblock-alobbs'\nCOMMENT = (\"alobbs hosts adblock\", \"https://github.com/alobbs/hosts-adblock\")\n\n\ndef bad_domain(d):\n if not d or not d.strip():\n return True\n if any([n in d for n in ['127.0.0.1', '::1', '#', ' ', '\\\\', '$']]):\n return True\n if any([re.match(ex, d) for ex in whitelist.allow]):\n return True\n\n\ndef hosts_entry(d):\n d = (d or \"\").strip().replace('\\t', ' ')\n if not d or d[0] == '#':\n return\n d = ' '.join([w for w in d.split(' ') if w])\n return d.split(' ')[1]\n\n\ndef get(url, **kw):\n if 'data' in kw:\n print(\"[POST] {}\".format(url))\n return requests.post(url, **kw)\n print(\"[GET] {}\".format(url))\n return requests.get(url, **kw)\n\n\ndef get_hosts(url, **kw):\n tmp = [hosts_entry(l) for l in get(url, **kw).text.split('\\n')]\n return [e for e in tmp if not bad_domain(e)]\n\n\nr = get('http://pgl.yoyo.org/adservers/serverlist.php?', data=YOYO_DATA)\ndomains = set([l.strip() for l in r.text.split('\\n') if not bad_domain(l)])\n\nfor (url, headers) in HOST_LIKE_LISTS:\n domains |= set(get_hosts(url, headers=headers))\n\nwith open(OUTPUT_FILE, 'w+') as f:\n f.write(''.join(['# {}\\n'.format(l) for l in COMMENT]))\n f.write(''.join(['{} {}\\n'.format(HOST, n) for n in sorted(domains)]))\n\nprint(\"{} creater with {} entries\".format(OUTPUT_FILE, len(domains)))\n","repo_name":"alobbs/hosts-adblock","sub_path":"hosts-adblock.py","file_name":"hosts-adblock.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"29183811588","text":"# coding:utf-8\nimport re\nimport os\n\n\ndef Tran2Str(src):\n res = ''\n for i in src.split(\"\\\\\"):\n if len(i) > 0:\n res += chr(int(i))\n return res\n\n\ndef ReplaceAllStr(src):\n p = re.compile(r'\\\\(1\\d\\d|2[01234]\\d|25[0123456]|[1-9]\\d?)')\n return p.sub(lambda m: Tran2Str(m.group(1)), src)\n\n\ndef ReplaceFile(filePath):\n baseFilename = os.path.basename(filePath)\n dirName = os.path.dirname(filePath)\n (shotname, extension) = os.path.splitext(baseFilename)\n outputFilePath = os.path.join(dirName, shotname + \"_decode\" + extension)\n\n with open(filePath, 'r') as fr:\n content = fr.read()\n content = ReplaceAllStr(content)\n\n with open(outputFilePath, 'w') as fw:\n fw.write(content)\n\n\na = \"\\\"\\\\s2\\\\175\\\\183\\\\232\\\\190\\\\147\\\\229\\\\133\\\\165\\\\229\\\\174\\\\137\\\\229\\\\190\\\\189\\\\231\\\\148\\\\181\\\\228\\\\191\\\\161\\\\230\\\\137\\\\139\\\\230\\\\156\\\\186\\\\229\\\\143\\\\183\\\\231\\\\160\\\\129\\\"\"\n\n# print re.findall(r'\\\".*?(\\\\[0-9]{1,3}).*?\\\"',a)\n\nprint(ReplaceAllStr(a))\n# print \"\\\\232\\\\175\\\\183\\\\232\\\\190\\\\147\".decode('unicode-escape')\n# ReplaceFile('common/util_dec.lua')\n","repo_name":"uncleheart/PublicFunction","sub_path":"python/NormalLibrary/deocdeLua.py","file_name":"deocdeLua.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41948684730","text":"# -*- coding=utf8 -*-\n\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtNetwork import *\n\nfrom constants import ICON_PATH, PLAYLIST_FAVORITE, PLAYLIST_MINE\n\n\nclass SpreadWidget(QWidget):\n \"\"\"支持展开和收缩动画的widget\"\"\"\n def __init__(self, parent=None):\n super().__init__(parent)\n self._layout = QVBoxLayout()\n\n self.fold_animation = QPropertyAnimation(self, QByteArray(b'maximumHeight'))\n self.spread_animation = QPropertyAnimation(self, QByteArray(b'maximumHeight'))\n self.maximum_height = 2000 # 大点,字不会挤到一起\n\n self.Qss = False\n\n self._init_signal_binding()\n self._set_prop()\n self.set_widget_prop()\n self.set_layout_prop()\n self.setLayout(self._layout)\n\n def paintEvent(self, event):\n option = QStyleOption()\n option.initFrom(self)\n painter = QPainter(self)\n style = self.style()\n style.drawPrimitive(QStyle.PE_Widget, option, painter, self)\n\n def _init_signal_binding(self):\n self.spread_animation.finished.connect(self.show)\n self.fold_animation.finished.connect(self.hide)\n\n def _set_prop(self):\n self.fold_animation.setDuration(300)\n self.fold_animation.setStartValue(self.maximum_height)\n self.fold_animation.setEndValue(0)\n\n self.spread_animation.setDuration(300)\n self.spread_animation.setStartValue(0)\n self.spread_animation.setEndValue(self.maximum_height)\n\n def fold_spread_with_animation(self):\n\n if self.fold_animation.state() == QAbstractAnimation.Running:\n self.fold_animation.stop()\n\n self.spread_animation.setStartValue(self.height())\n self.spread_animation.start()\n return\n\n if self.isVisible(): # hide the widget\n if self.spread_animation.state() == QAbstractAnimation.Running:\n self.spread_animation.stop()\n self.fold_animation.setStartValue(self.height())\n else:\n self.fold_animation.setStartValue(self.maximum_height)\n self.fold_animation.start()\n else:\n self.spread_animation.setStartValue(0)\n self.show()\n\n def showEvent(self, event):\n self.spread_animation.start()\n\n def hideEvent(self, event):\n self.parent().update()\n\n def set_widget_prop(self):\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n\n def set_layout_prop(self):\n self._layout.setContentsMargins(0, 0, 0, 0)\n self._layout.setSpacing(0)\n\n self._layout.addStretch(1)\n\n def add_widget(self, *args, **kw):\n self._layout.addWidget(*args, **kw)\n\n\nclass _BaseItem(QFrame):\n \"\"\"左边是图案,右边是文字按钮的widget\"\"\"\n\n signal_text_btn_clicked = pyqtSignal()\n\n active_item = []\n\n active_qss = \"\"\"\n QFrame#playlist_container {\n border-top: 0px;\n border-bottom: 0px;\n padding-left: 11px;\n border-left:4px solid #993333;\n background-color: #333;\n }\n \"\"\"\n normal_qss = \"\"\"\n QFrame#playlist_container {\n border-top: 0px;\n border-bottom: 0px;\n padding-left: 15px;\n border: 0px solid #993333;\n }\n QFrame#playlist_container:hover{\n background-color: #333;\n border-left:8px solid #993333;\n border-top: 10px solid #333;\n border-bottom: 10px solid #333;\n }\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self._icon_label = QLabel(self)\n self._text_btn = QPushButton(self)\n self._layout = QHBoxLayout(self)\n self.setLayout(self._layout)\n\n self._icon_width = 16\n self._whole_width = 200\n self._whole_height = 30\n\n self._icon_label.setFixedSize(self._icon_width, self._icon_width)\n self.setFixedSize(self._whole_width, self._whole_height)\n self._text_btn.setFixedSize(self._whole_width-self._icon_width-10,\n self._whole_height)\n\n self._layout.setContentsMargins(0, 0, 0, 0)\n self._layout.setSpacing(0)\n self._layout.addWidget(self._icon_label)\n self._layout.addSpacing(10)\n self._layout.addWidget(self._text_btn)\n\n self._text_btn.clicked.connect(self.on_text_btn_clicked)\n self._text_btn.setObjectName('playlist_name') # in order to apply css\n self.setObjectName('playlist_container')\n\n @classmethod\n def set_active(cls, w):\n \"\"\"控制当前active的playlistitem\n\n :param w: 将被active的playlistitem\n :return: 如果当前item已经是active,return False\n \"\"\"\n if len(cls.active_item) != 0:\n if w is cls.active_item[0]: # 判断是否重复点击\n return False\n cls.active_item[0].setStyleSheet(cls.normal_qss)\n cls.active_item.pop()\n w.setStyleSheet(cls.active_qss)\n cls.active_item.append(w)\n return True\n\n @classmethod\n def de_active_all(cls):\n for item in cls.active_item:\n item.setStyleSheet(cls.normal_qss)\n cls.active_item.remove(item)\n\n @pyqtSlot()\n def on_text_btn_clicked(self):\n if _BaseItem.set_active(self):\n self.signal_text_btn_clicked.emit()\n\n def set_btn_text(self, text):\n self._text_btn.setText(text)\n\n def set_icon_pixmap(self, pixmap):\n self._icon_label.setPixmap(pixmap)\n\n\nclass PlaylistItem(_BaseItem):\n signal_text_btn_clicked = pyqtSignal([int], name='text_btn_clicked')\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n self.data = {}\n\n @pyqtSlot()\n def on_text_btn_clicked(self):\n if PlaylistItem.set_active(self):\n self.signal_text_btn_clicked.emit(self.data['id'])\n\n def set_playlist_item(self, playlist_model):\n self.data = playlist_model\n if playlist_model['type'] == 5:\n self._icon_label.setObjectName('playlist_img_favorite')\n else:\n self._icon_label.setObjectName('playlist_img_mine')\n\n metrics = QFontMetrics(self._text_btn.font())\n text = metrics.elidedText(playlist_model['name'], Qt.ElideRight,\n self._text_btn.width()-40)\n self._text_btn.setToolTip(playlist_model['name'])\n self._text_btn.setText(text)\n\n\nclass RecommendItem(_BaseItem):\n def __init__(self, parent=None, text=None, pixmap=None):\n super().__init__(parent)\n if text:\n self.set_btn_text(text)\n if pixmap:\n self.set_icon_pixmap(pixmap)\n","repo_name":"justsoso8/FeelUOwn","sub_path":"src/widgets/playlist_widget.py","file_name":"playlist_widget.py","file_ext":"py","file_size_in_byte":6722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"34258695717","text":"import csv\nimport ctypes.wintypes\nimport re\nimport os\n\n\ndef documents_path():\n CSIDL_PERSONAL = 5 # My Documents\n SHGFP_TYPE_CURRENT = 0 # Get current, not default value\n\n buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)\n ctypes.windll.shell32.SHGetFolderPathW(None, CSIDL_PERSONAL, None, SHGFP_TYPE_CURRENT, buf)\n\n path = buf.value + r'\\SAP\\SAP GUI'\n return path\n\n\ndef run_update():\n sk38 = 0\n seat = 0 \n bmw = 0 \n audi = 0 \n br223 = 0 \n tmp = 0\n if os.path.isfile(documents_path() + r'\\data.txt'):\n pass\n else:\n with open(documents_path() + r'\\data.txt', 'w') as f:\n f.close()\n wb = documents_path() + r'\\data.txt'\n with open(wb, newline='') as File: \n reader = csv.reader(File)\n for row in reader:\n try:\n tmp = row[0].split(\"|\")[4]\n tmp = int(tmp)\n try:\n if int(tmp) > 1:\n for i in row:\n try:\n if re.search(r'seat', i.split('|')[2].lower()) and not re.search(r'bmw', i.split('|')[2].lower()):\n seat += float(i.split('|')[3])\n if re.search(r'bmw', i.split('|')[2].lower()):\n bmw += float(i.split('|')[3])\n if re.search(r'br223', i.split('|')[2].lower()):\n br223 += float(i.split('|')[3])\n if re.search(r'sk38', i.split('|')[2].lower()):\n sk38 += float(i.split('|')[3])\n except IndexError:\n continue\n if int(tmp) > 2:\n for i in row:\n try:\n if re.search(r'audi', i.split('|')[2].lower()):\n audi += float(i.split('|')[3])\n except IndexError:\n continue \n except ValueError:\n continue \n except IndexError:\n continue\n except ValueError:\n continue\n return seat, bmw, audi, br223, sk38 \n","repo_name":"evheniu/automated_graph","sub_path":"data_parser.py","file_name":"data_parser.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20097629053","text":"import numpy as np\r\nfrom scipy.stats import mode\r\nfrom decision_tree import *\r\nfrom utilities import CE, information_gain, split, best_split\r\nimport pandas as pd\r\n\r\nclass randomforest(object):\r\n # initialise the attributes of the class\r\n # same as before apart from num_trees= number of trees in the forest\r\n # bootstrap, a variable in (0,1) from which we consider (number of samples)*(boostrap value) within our tree - this helps reduce overfitting\r\n def __init__(self, num_trees, num_predictors, max_depth,\r\n min_samples_leaf, bootstrap):\r\n\r\n self.num_trees = num_trees\r\n self.num_predictors = num_predictors\r\n self.max_depth = max_depth\r\n self.min_samples_leaf = min_samples_leaf\r\n self.bootstrap = bootstrap\r\n self.forest = []\r\n\r\n # training function for the forest\r\n def train(self, X, y):\r\n # initialising variables and creating the number of sub samples as mentioned\r\n self.forest = []\r\n num_samples = X.shape[0]\r\n num_sub_samples = int(round(num_samples*self.bootstrap))\r\n \r\n # iterate through the number of trees, each time building a tree and appending this to the forest for which we will later use to predict\r\n for i in range(self.num_trees):\r\n # generate reduced X_train and y_train\r\n X_subset = X[:num_sub_samples]\r\n y_subset = y[:num_sub_samples]\r\n\r\n # build the tree, calling the decisiontree object\r\n tree = decisiontree(self.num_predictors, self.max_depth, self.min_samples_leaf)\r\n tree.train(X_subset, y_subset)\r\n self.forest.append(tree)\r\n\r\n # make predictions from the forest \r\n def predict(self, X):\r\n num_samples = X.shape[0]\r\n num_trees = self.num_trees\r\n # prediction matrix to store values\r\n preds = np.zeros([num_trees, num_samples])\r\n for i in range(num_trees):\r\n preds[i,:] = self.forest[i].predict(X)\r\n\r\n return mode(preds)[0][0]\r\n\r\n # score function to determine accuracy or the confusion matrix\r\n def score(self, X_test, y_test, confusion=False):\r\n # y prediction\r\n y_pred = self.predict(X_test)\r\n if confusion==False:\r\n # accuracy\r\n return np.float(sum(y_pred==y_test))/float(len(y_test))\r\n else:\r\n # confusion matrix\r\n y_pred = pd.Series(y_pred, name='Predicted')\r\n y_true = pd.Series(y_test, name='Actual')\r\n confusion_mat = pd.crosstab(y_true, y_pred)\r\n return confusion_mat\r\n\r\n\r\n","repo_name":"gwhilton/Random-Forest","sub_path":"random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"9487107107","text":"\nfrom scipy import signal\nfrom scipy.io import wavfile\nimport sklearn.neural_network as nn\nimport sklearn.model_selection as ms\nimport sklearn.preprocessing as pp\nimport sklearn.metrics as mt\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\n\ndef write_to_csv(file_names,output_file,data_array):\n output = open(output_file, 'w')\n output.write(\",\")\n for i in file:\n output.write(i+',')\n for i in range(67):\n output.write('\\n'+file_names[i])\n for j in range(67):\n output.write(','+str(data_array[i,j]))\n output.close()\n\ndef prepare_and_print(data_array,labels_array,metric_type):\n data_array = (data_array-np.min(data_array))/(np.max(data_array)-np.min(data_array))#normalize data\n data_array_t = np.transpose(data_array) #transpose to get a second array in a pair KL(x,y)\n data_array =np.triu(data_array) #remove duplicate pairs\n data_array_t =np.triu(data_array_t) \n data_array_label1 = conditions_label* data_array \n data_array_label2 = data_array-data_array_label1\n data_array_t_label1 = conditions_label* data_array_t\n data_array_t_label2 = data_array_t-data_array_t_label1 #get arrays of pairs for each label\n \n plt.figure()\n plt.xlabel(metric_type+'(a,b)')\n plt.ylabel(metric_type+'(b,a)')\n plt.scatter(data_array_label1, data_array_t_label1,s=0.5)\n plt.scatter(data_array_label2, data_array_t_label2,s=0.5)\n plt.show()\n \ndef make_class_labels(file_names):\n class_labels = []\n for i in file_names:\n label_tmp=0\n if 'iphone' in i:\n label_tmp+=2\n if 'kitchen' in i:\n label_tmp+=1 #iphone-kitchen=3 iphone-room=2 android kitchen = 1 android room = 0\n class_labels.append(label_tmp)\n return np.array(class_labels) \n\ndef separate_by_conditions(class_labels):\n conditions_label = []\n for i in range(class_labels.shape[0]):\n for j in range(i,class_labels.shape[0]):\n if class_labels[i]==class_labels[j]:\n conditions_label.append(1)\n else:\n conditions_label.append(0)\n return conditions_label\n\ndef get_metrics(y_true, y_pred):\n acc = mt.accuracy_score(y_true, y_pred)\n precision = mt.precision_score(y_true, y_pred)\n rcall = mt.recall_score(y_true, y_pred)\n f1 = mt.f1_score(y_true, y_pred)\n return acc,f1,rcall,precision\n\nwav_files=[]\nfile =[]\nwritten=0\nfor r, d, f in os.walk('dataset1/'):\n for i in f:\n wav_files.append(i)\n file.append(i)\n for i in file:\n samprate,data = wavfile.read(os.path.join(r,i))\n if (len(np.shape(data))==2):\n data_size = np.shape(data)[0]\n data = np.ravel(data)\n data=data[:data_size]\n written=written+1\n wavfile.write(os.path.join(r,i),samprate,data)\n\n \nwav_files=[]\nfreq_and_pow = {}\nfor r, d, f in os.walk('my_set/'):\n for i in f:\n wav_files.append(i)\nfor i in wav_files:\n samprate,data = wavfile.read(os.path.join(r,i))\n freq,Pow = signal.welch(data,samprate,nperseg=2048)\n # plt.semilogy(freq,Pow)\n # plt.xlabel('Частота в Hz')\n # plt.ylabel('Спектральная мощность')\n # plt.show()\n freq_and_pow[i]={}\n freq_and_pow[i]['Sample rate']=samprate\n freq_and_pow[i]['frequency_size']=np.size(freq)\n freq_and_pow[i]['Power_size']=np.size(Pow)\n freq_and_pow[i]['Data_size']=np.shape(data)\n \noutput = open('dataset2.csv', 'w')\noutput.write(\"Название файла,\")\noutput.write('Частота дискретизации,')\noutput.write('Формат по частоте,')\noutput.write('Формат по мощности')\noutput.write('Формат данных')\nfor key in freq_and_pow.keys():\n output.write('\\n'+key+',')\n for column in freq_and_pow[key].keys():\n output.write(str(freq_and_pow[key][column])+',')\noutput.close()\n\nwav_files=[]\npower = []\nfreqs = []\nfor r, d, f in os.walk('my_set/'):\n for i in f:\n wav_files.append(os.path.join(i))\n for i in wav_files:\n samprate,data = wavfile.read(os.path.join(r,i))\n freq,Pow = signal.welch(data,samprate)\n Pow = Pow/np.sum(Pow)\n power.append(Pow)\n freqs.append(freq)\n \npower=np.array(power)\n\nKL_np = np.zeros([67,67])\nKL_val = 0\nfor i in range(67):\n for j in range(67):\n KL_val=np.sum(power[i]*np.log2(power[i]/power[j]))\n KL_np[i,j]=KL_val \n KL_val = 0\n\n\nIS_np = np.zeros([67,67])\nIS_val = 0\nfor i in range(67):\n for j in range(67):\n IS_val=np.sum(power[i]/power[j]+np.log2(power[i]/power[j])-1) #правильно?\n IS_np[i,j] = IS_val\n IS_val =0\n \nclass_label = make_class_labels(file) \nconditions_label = separate_by_conditions(class_label)\n \nwrite_to_csv(file, 'results_KL.csv', KL_np)\nwrite_to_csv(file, 'results_IS.csv', IS_np)\n\n#prepare_and_print(KL_np,conditions_label,'KL')\n#prepare_and_print(IS_np,conditions_label,'IS')\n\nKL_np_ab=[]\nKL_np_ba=[]\nIS_np_ab=[]\nIS_np_ba=[]\n\nfor i in range(67):\n for j in range(i,67):\n KL_np_ab.append(KL_np[i,j])\n KL_np_ba.append(KL_np[j,i])\n IS_np_ab.append(IS_np[i,j])\n IS_np_ba.append(IS_np[j,i])\n\nKL_np_ab=np.array(KL_np_ab)\nKL_np_ba=np.array(KL_np_ba)\nIS_np_ab=np.array(IS_np_ab)\nIS_np_ba=np.array(IS_np_ba)\n\n\nKL_np_ab_train,KL_np_ab_test,KL_np_ba_train,KL_np_ba_test,IS_np_ab_train,IS_np_ab_test,IS_np_ba_train,IS_np_ba_test,label_train,label_test = ms.train_test_split(np.ravel(KL_np_ab),\n np.ravel(KL_np_ba),\n np.ravel(IS_np_ab),\n np.ravel(IS_np_ba),\n np.ravel(conditions_label))\nfeatures_train = np.vstack((KL_np_ab_train,KL_np_ba_train,IS_np_ab_train,IS_np_ba_train))\nfeatures_train = np.transpose(features_train)\nfeatures_test = np.vstack((KL_np_ab_test,KL_np_ba_test,IS_np_ab_test,IS_np_ba_test))\nfeatures_test = np.transpose(features_test)\n#label_train = label_train.reshape(-1,1)\n#label_test = label_test.reshape(-1,1)\n\npp.normalize(features_train)\npp.normalize(features_test)\n\nMLP = nn.MLPClassifier().fit(features_train,label_train)\nprint(MLP.get_params())\nprint(MLP.score(features_test,label_test))\nmetrics = get_metrics(label_test, MLP.predict(features_test))\npredict = MLP.predict(features_test)\nroc_x,roc_y, threshold = mt.roc_curve(label_test, MLP.predict_proba(features_test)[:,1],pos_label=1)\nplt.figure()\nplt.title('ROC-кривая')\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.plot(roc_x,roc_y)\nplt.plot([0,1],[0,1])\nplt.show()\nclass_report = mt.classification_report(label_test, MLP.predict(features_test))\nprint(class_report)","repo_name":"vfkon/siamese-net","sub_path":"untitled5.py","file_name":"untitled5.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28908347456","text":"# Ported from QSvip by SineStriker\nimport abc\nimport dataclasses\nimport enum\nimport math\nimport struct\nfrom itertools import chain\nfrom typing import Generic, Literal, NamedTuple, Optional, TypeVar\n\nfrom more_itertools import chunked\n\nXSItem = TypeVar(\"XSItem\")\n\n\ndef to_backing_field(key: str) -> str:\n return f\"<{key}>k__BackingField\"\n\n\nclass XSLineParamNode(NamedTuple):\n pos: int = 0\n value: int = 0\n\n\n@dataclasses.dataclass\nclass XSLineParam:\n \"\"\"SingingTool.Model.Line.LineParam\"\"\"\n\n line_param: bytes = dataclasses.field(\n default=b\"\",\n metadata={\n \"alias\": \"LineParam\",\n },\n )\n nodes: list[XSLineParamNode] = dataclasses.field(init=False)\n\n def __post_init__(self):\n self.nodes = []\n if len(self.line_param) >= 4:\n (node_count,) = struct.unpack(\" firerate:\n self.fire_time -= firerate\n self.projectiles.add(Projectile(self.Pla.circle.pos, (0,-0.5), (250, 50, 50), 50 ))\n\n if self.spawn_time > spawn_rate:\n self.spawn_time -= spawn_rate\n self.zombies.add(Zombie((random.randint(50,width-50), 0), (0, 0.05), (50, 250, 50), 100))\n\n\n for zombie in self.zombies:\n for bullet in self.projectiles:\n bullet.collide(zombie)\n\n\n self.Pla.update(dt, controller)\n\n remove_zombies = set()\n\n for zombie in self.zombies:\n if (zombie.update(dt)):\n remove_zombies.add(zombie)\n\n for remove_ in remove_zombies:\n self.zombies.remove(remove_)\n\n remove_proj = set()\n\n for projec in self.projectiles:\n if (projec.update(dt)):\n remove_proj.add(projec)\n\n for remove in remove_proj:\n self.projectiles.remove(remove)\n\n print(len(remove_proj),len(self.projectiles))\n\n\n\n def render(self,screen):\n self.Pla.render(screen)\n self.timer.render(screen)\n for projec in self.projectiles:\n projec.render(screen)\n\n for zombie in self.zombies:\n zombie.render(screen)\n","repo_name":"DortyTheGreat/Code-Samples","sub_path":"Pyhton/PyGameEvo/GameField.py","file_name":"GameField.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23390950451","text":"import math\nimport itertools\n\ndef check_palindrome(strng):\n if (len(strng) == 1):\n return True\n if (len(strng) == 2):\n return strng[0] == strng[1]\n if (strng[0] == strng[-1]):\n return check_palindrome(strng[1:-1])\n return False\n\n\ndef check_square(num):\n sqr = num * num\n return check_palindrome(str(sqr)), sqr\n\n\ndef perf_palindrome_kd(k, A, B, cnt):\n digits = [str(n) for n in range(10)]\n if (k % 2 == 0):\n d = itertools.combinations(digits, k/2)\n else:\n d = itertools.combinations(digits, (k+1)/2)\n while True:\n try:\n num_str = d.next()\n if (num_str[0] == '0'):\n continue\n if (k % 2 == 0):\n num = int(''.join(num_str + num_str[::-1]))\n else:\n num = int(''.join(num_str + num_str[:-1][::-1]))\n sts, val = check_square(num)\n if (sts):\n if (A <= val <= B):\n cnt = cnt + 1\n except StopIteration:\n return cnt\n\ndef palindrome(A, B):\n a = math.sqrt(A)\n b = math.sqrt(B)\n na = len(str(int(a)))\n nb = len(str(int(b)))\n cnt = 0\n for k in range(na, nb + 1):\n cnt = perf_palindrome_kd(k, A, B, cnt)\n return cnt\n\n\nfid_ip = open('C-small-attempt2.in', 'r')\nfid_op = open('output.in', 'w')\n\nnum_cases = int(fid_ip.readline())\n\nfor case in range(num_cases):\n [A, B] = [int(x) for x in fid_ip.readline().split()]\n fid_op.write('Case #' + str(case+1) + ': ' + str(palindrome(A, B))+\"\\n\")\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/1177.py","file_name":"1177.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16923776065","text":"from DyCommon.Ui.DyTableWidget import *\n\nfrom EventEngine.DyEvent import *\n\n\nclass DyStockBackTestingStrategyResultPositionWidget(DyTableWidget):\n\n header = ['代码','名称','可用数量/总数量','成本价/现价','盈亏(%)','最大盈/亏(%)','持有期','除权除息']\n\n def __init__(self, dataViewer):\n super().__init__(None, True, False)\n\n self._dataViewer = dataViewer\n \n self.setColNames(self.header)\n self.setAutoForegroundCol('盈亏(%)')\n\n def update(self, pos):\n # remove non-existing codes\n rows = self.getAll()\n\n for row in rows:\n if row[0] not in pos:\n self.removeRow(row[0])\n\n # update new positions\n for code, pos_ in pos.items():\n self[code] = [pos_.code, pos_.name,\n '%.2f/%.2f'%(pos_.availVolume, pos_.totalVolume),\n '%.3f/%.3f'%(pos_.cost, pos_.price),\n pos_.pnlRatio,\n '%.2f/%.2f'%(pos_.maxPnlRatio, pos_.minPnlRatio),\n pos_.holdingPeriod,\n '是' if pos_.xrd else '否'\n ]\n","repo_name":"MicroEngine/DevilYuan","sub_path":"Stock/BackTesting/Ui/Basic/Strategy/DyStockBackTestingStrategyResultPositionWidget.py","file_name":"DyStockBackTestingStrategyResultPositionWidget.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"61"}
+{"seq_id":"46029000410","text":"from ..models import Image, Product, Care\nfrom django import template\n\nregister = template.Library()\n\n@register.simple_tag\ndef get_image(product):\n image = (product.image_set.all())[0]\n return str(image.image)\n\n@register.simple_tag\ndef sub_categories(category):\n products = Product.objects.filter(category=category)\n categories = []\n for item in products:\n if item.sub_category not in categories:\n categories.append(item.sub_category)\n return categories\n\n@register.simple_tag\ndef get_care(product):\n return product.care_set.all()\n \n\n","repo_name":"Rafia-khuram/Ecommerce_website-python-django-","sub_path":"Web_App/templatetags/webApp_tags.py","file_name":"webApp_tags.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22006036233","text":"import sys; sys.stdin = open(\"27159.txt\", \"r\")\n\nN = int(input())\narr = list(map(int, input().split()))\n\ntotal = arr[0]\nfor i in range(1, N):\n if arr[i] - arr[i-1] > 1:\n total += arr[i]\n\nprint(total)","repo_name":"vreez/APS","sub_path":"boj/boj_27159_노 땡스!.py","file_name":"boj_27159_노 땡스!.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16929064668","text":"from flask import Blueprint, request, jsonify\nfrom models.user_model import User, UserSchema\nfrom utils.db import db\n\nusers = Blueprint('users', __name__)\n\n@users.route(\"/users\")\ndef get_users():\n all_users = User.query.all()\n user_schema = UserSchema(many=True)\n all_users = user_schema.dump(all_users)\n print(all_users)\n\n return jsonify(all_users)\n\n@users.route(\"/users/\", methods=[\"GET\"])\ndef get_user(document):\n user_exist = User.query.get(document)\n if(not user_exist):\n return \"That user does not exist\"\n \n user_schema = UserSchema()\n user = user_schema.dump(user_exist)\n return jsonify(user)\n\n@users.route(\"/users\", methods=['POST'])\ndef new_user():\n document_id = request.form[\"document_id\"]\n first_name = request.form[\"first_name\"]\n last_name = request.form[\"last_name\"]\n document_type = request.form[\"document_type\"]\n user_type = request.form[\"user_type\"]\n phone = request.form[\"phone\"]\n gender = request.form[\"gender\"]\n email = request.form[\"email\"]\n password = request.form[\"password\"] \n\n new_user = User(document_id, first_name, last_name, document_type, user_type, phone,gender, email, password)\n\n db.session.add(new_user)\n db.session.commit()\n\n return first_name\n\n@users.route(\"/users/update/\", methods=[\"PUT\"])\ndef update_user(document):\n user_exist = User.query.get(document)\n\n if(not user_exist):\n return \"That user does not existe\"\n\n user_exist.document_id = request.form[\"document_id\"]\n user_exist.first_name = request.form[\"first_name\"]\n user_exist.last_name = request.form[\"last_name\"]\n user_exist.document_type = request.form[\"document_type\"]\n user_exist.user_type = request.form[\"user_type\"]\n user_exist.phone = request.form[\"phone\"]\n user_exist.gender = request.form[\"gender\"]\n user_exist.email = request.form[\"email\"]\n user_exist.password = request.form[\"password\"] \n\n db.session.commit()\n user_schema = UserSchema()\n user = user_schema.dump(user_exist)\n\n return jsonify(user)\n\n@users.route(\"/users/delete/\", methods=[\"DELETE\"])\ndef delete_user(document):\n user_exist = User.query.get(document)\n if(not user_exist):\n return \"That User does not exist\"\n\n db.session.delete(user_exist)\n db.session.commit()\n return f'User with document: {document} was removed'","repo_name":"JoseMMC99/WolfCub","sub_path":"usersApi/controllers/user_controllers.py","file_name":"user_controllers.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23613324681","text":"in_file = open(\"sin.txt\", \"r\")\nout_file = open(\"sout.txt\", \"w\")\n\ntest_cases = int(in_file.readline())\ninput_line = []\n\nfor test_case_count in range(1, test_cases+1):\n input_line = in_file.readline().split(' ')\n\n move_count = 0\n\n curr_robot_prev_button = 1\n prev_robot_prev_button = 1\n\n prev_robot = ''\n curr_delta = 0\n prev_delta = 0\n\n buttons_to_press = int(input_line.pop(0))\n\n for button_press_count in range(1, buttons_to_press+1):\n #button_press = in_file.readline()\n robot = input_line.pop(0)\n button = int(input_line.pop(0))\n #(robot, button) = button_press.split(' ')\n \n # If this is the first robot,\n # Set current delta move\n if len(prev_robot) == 0:\n move = button # as it is starting in position 1\n curr_robot_prev_button = button\n \n #print(\"Robot \" + robot + \" moves \" + str(move) + \" steps\")\n \n # If same as prev robot\n # Add the move to delta\n elif robot == prev_robot:\n move = abs(curr_robot_prev_button - button)\n move = move + 1\n #move = move + curr_delta\n curr_robot_prev_button = button\n\n #print(\"Robot \" + robot + \" should move \" + str(move) + \" steps\")\n\n if move <= prev_delta:\n #prev_delta = prev_delta - move\n move = 1\n else:\n move = move - prev_delta\n #prev_delta = 0\n\n #print(\"Robot \" + robot + \" moves \" + str(move) + \" steps\")\n\n # If robot has changed\n elif robot != prev_robot:\n move = abs(prev_robot_prev_button - button)\n move = move + 1\n #move = move + prev_delta \n\n #print(\"Robot \" + robot + \" should move \" + str(move) + \" steps\")\n\n # interchange current and prev robot's prev buttons\n prev_robot_prev_button = curr_robot_prev_button\n curr_robot_prev_button = button\n\n # interchange delta\n prev_delta = curr_delta\n curr_delta = 0\n \n # calc delta\n if move <= prev_delta:\n #prev_delta = prev_delta - move\n move = 1\n else:\n move = move - prev_delta\n #prev_delta = 0\n\n #print(\"Robot \" + robot + \" moves \" + str(move) + \" steps\")\n\n prev_delta = 0\n curr_delta = curr_delta + move\n #print(\"curr_delta = \" + str(curr_delta))\n move_count = move_count + move\n #print(\"move_count = \" + str(move_count))\n prev_robot = robot\n\n #move_count = move_count + curr_delta\n print(\"Case #\" + str(test_case_count) + \": \" + str(move_count), \\\n file=out_file)\n\nin_file.close()\nout_file.close()\n \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_74/1270.py","file_name":"1270.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20813583445","text":"import glob\nimport numpy as np\nimport os\nfrom shutil import copyfile\n\n\ndef create_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n print(\"Creation of the directory %s failed\" % path)\n else:\n print(\"Successfully created the directory %s \" % path)\n\n\ndef split_to_train_test_val(in_path, train_path, val_path, test_path, train_frac=0.6, test_frac=0.2, val_frac=0.2):\n\n\n files = glob.glob(in_path + \"/*.png\") # impurities\n random = np.random.permutation(files)\n num_files = len(files)\n train_files = random[0:int(num_files*train_frac)]\n val_files = random[-int(num_files*val_frac):]\n\n if test_path is not None:\n test_files = random[int(num_files * train_frac): int(num_files * train_frac + num_files * test_frac)]\n\n for train_file in train_files:\n base = os.path.basename(train_file)\n copyfile(train_file, train_path + \"/\" + base)\n for val_file in val_files:\n base = os.path.basename(val_file)\n copyfile(val_file, val_path + \"/\" + base)\n if test_path is not None:\n for test_file in test_files:\n base = os.path.basename(test_file)\n copyfile(test_file, test_path + \"/\" + base)\n\n\ndef split_to_classes(input_data_path, test_path=None, out_two_classes=None, out_one_class=None,\n train_frac=0.6, test_frac=0.2, val_frac=0.2):\n if out_one_class is None and out_two_classes is None:\n return\n if out_one_class is not None and out_two_classes is not None:\n print(\"Nothing has changed: Please choose only one out method! One class / Two classes.\")\n return\n\n if out_one_class is not None:\n create_dir(out_one_class)\n base_path = out_one_class\n else:\n create_dir(out_two_classes)\n base_path = out_two_classes\n\n train_path = base_path + \"/train\"\n train_path_normal = train_path + \"/normal\"\n create_dir(train_path)\n create_dir(train_path_normal)\n\n val_path = base_path + \"/validation\"\n val_path_normal = val_path + \"/normal\"\n create_dir(val_path)\n create_dir(val_path_normal)\n\n test_path_normal_class = None\n test_path_anomaly_class = None\n if test_path is not None:\n test_path_normal = test_path + \"/normal\"\n test_path_anomaly = test_path + \"/anomaly\"\n test_path_normal_class = test_path_normal + \"/test\"\n test_path_anomaly_class = test_path_anomaly + \"/test\"\n create_dir(test_path)\n create_dir(test_path_normal)\n create_dir(test_path_anomaly)\n create_dir(test_path_normal_class)\n create_dir(test_path_anomaly_class)\n\n if out_one_class is not None:\n split_to_train_test_val(in_path=input_data_path+\"/normal\", train_path=train_path_normal,\n val_path=val_path_normal, test_path=test_path_normal_class)\n\n if out_two_classes is not None:\n train_path_anomaly = train_path + \"/anomaly\"\n create_dir(train_path_anomaly)\n\n val_path_anomaly = val_path + \"/anomaly\"\n create_dir(val_path_anomaly)\n\n split_to_train_test_val(in_path=input_data_path+\"/normal\", train_path=train_path_normal,\n val_path=val_path_normal, test_path=test_path_normal_class)\n split_to_train_test_val(in_path=input_data_path+\"/anomaly\", train_path=train_path_anomaly,\n val_path=val_path_anomaly, test_path=test_path_anomaly_class)\n\n# split_to_classes(input_data_path=\"./data/rescaled_extended\", test_path=\"./data/test_rescaled_extended\",\n# out_two_classes=\"./data/rescaled_extended_2_classes\", out_one_class=None)\nsplit_to_classes(input_data_path=\"./data/rescaled_extended\", test_path=None, # need to create test dir only once\n out_two_classes=None, out_one_class=\"./data/rescaled_extended_1_class\")\n","repo_name":"Scientific-Computing-Lab-NRCN/MLography","sub_path":"split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"22782004396","text":"import os\nimport numpy as np\nimport astropy.units as u\nimport astropy.constants as c\nimport matplotlib.pyplot as plt\nfrom scipy.special import erf\nfrom scipy.interpolate import interp1d\n\nfrom pypeit import io\nfrom pypeit import specobjs\nfrom pypeit import spec2dobj\nfrom pypeit.sensfunc import IRSensFunc\nfrom pypeit.images.detector_container import DetectorContainer\n\nnarrow_spec1df = os.path.join('../arxiv/LRIS_2203/LRIS_220305/reduced/all/Science/', \n 'spec1d_r220305_00206-GD153_LRISr_20220305T155330.106.fits')\nnarrow_spec2df = os.path.join('../arxiv/LRIS_2203/LRIS_220305/reduced/all/Science/',\n 'spec2d_r220305_00206-GD153_LRISr_20220305T155330.106.fits')\nwide_spec1df = os.path.join('../arxiv/LRIS_2203/LRIS_220305/reduced/long_8.7/Science',\n 'spec1d_r220305_00207-GD153_LRISr_20220305T155549.814.fits')\nwide_spec2df = os.path.join('../arxiv/LRIS_2203/LRIS_220305/reduced/long_8.7/Science',\n 'spec2d_r220305_00207-GD153_LRISr_20220305T155549.814.fits')\ndetname = DetectorContainer.get_name(det=1)\nnarrow_sobjs = specobjs.SpecObjs.from_fitsfile(narrow_spec1df, chk_version=False)\nnarrow_spec2DObj = spec2dobj.Spec2DObj.from_file(narrow_spec2df, detname, chk_version=False)\n\nwide_sobjs = specobjs.SpecObjs.from_fitsfile(wide_spec1df, chk_version=False)\nwide_spec2DObj = spec2dobj.Spec2DObj.from_file(wide_spec2df, detname, chk_version=False)\n\npixel_scale = 0.123 # arcsec/pixel\nbinning = 2 # in spatial direction\nwide_fwhm_func = interp1d(wide_sobjs[0].OPT_WAVE, wide_sobjs[0].FWHMFIT, fill_value=\"extrapolate\")\nwide_fwhmfit = wide_fwhm_func(narrow_sobjs[0].OPT_WAVE)\nnarrow_fwhmfit = narrow_sobjs[0].FWHMFIT\n\nratio = erf((8.7/2)/(np.sqrt(2)*wide_fwhmfit*pixel_scale*binning/2.355)) / \\\n erf((1/2)/(np.sqrt(2)*narrow_fwhmfit*pixel_scale*binning/2.355))\n\n# ratio = erf((8.7/2)/(np.sqrt(2)*wide_sobjs[0].FWHM*pixel_scale*binning/2.355)) / \\\n# erf((1/2)/(np.sqrt(2)*narrow_sobjs[0].FWHM*pixel_scale*binning/2.355))\nprint(ratio)\nplt.plot(narrow_sobjs[0].OPT_WAVE, narrow_sobjs[0].OPT_COUNTS*ratio, label='Narrow (corrected)')\nplt.plot(narrow_sobjs[0].OPT_WAVE, narrow_sobjs[0].OPT_COUNTS, label='Narrow', alpha=0.8)\nplt.plot(wide_sobjs[0].OPT_WAVE, wide_sobjs[0].OPT_COUNTS, label='Wide')\nplt.legend()\nplt.show()\n\nplt.plot(narrow_sobjs[0].OPT_WAVE, ratio)\nplt.show()\n# wide_func = interp1d(wide_sobjs[0].OPT_WAVE, wide_sobjs[0].OPT_COUNTS, fill_value=\"extrapolate\")\n# plt.plot(narrow_sobjs[0].OPT_WAVE, narrow_sobjs[0].OPT_COUNTS/wide_func(narrow_sobjs[0].OPT_WAVE))\n# plt.show()","repo_name":"enigma-igm/highz_qso_arxiv","sub_path":"highz_qso_arxiv/script/plot_standard_slitloss.py","file_name":"plot_standard_slitloss.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"75066600194","text":"import re\n \n \nwith open('big_word.txt', encoding='utf-8') as f:\n words = f.read().strip()\n \narr = [word for word in re.split('[ ,.\\\"\\n]', words) if word]\nmax_len_word = max(arr, key=len)\ncount_word = arr.count(max_len_word)\n\nprint(f'Самое длинное слово: \"{max_len_word}\"\\n{count_word} раз встречается в тексте')","repo_name":"avdivo/lesson_1","sub_path":"my_tasks/big_word.py","file_name":"big_word.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18869358984","text":"\r\nimport re\r\n\r\n\r\nclass Reg_Exp:\r\n pattern_punctuation = r\"\"\"[!?,.:;\"#$£€%&'()+-/<≤=≠≥>@[\\]^_{|},。、—‘’“”:;【】¥…《》?!()]\"\"\"\r\n pattern_url = r\"[(http(s)?):\\/\\/(www\\.)?a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\"\r\n pattern_email = r\"[\\w\\-\\.]+@([\\w\\-]+\\.)+[\\w\\-]{2,4}\"\r\n pattern_arabic = r\"[\\u0600-\\u06FF]\"\r\n pattern_chinese = r\"[\\u4e00-\\u9fff]\"\r\n pattern_tamil = r\"[\\u0B80-\\u0BFF]\"\r\n pattern_thai = r\"[\\u0E00-\\u0E7F]\"\r\n pattern_russian = r\"[\\u0400-\\u04FF]\"\r\n pattern_korean = r\"[\\uac00-\\ud7a3]\"\r\n pattern_japanese = r\"[\\u3040-\\u30ff\\u31f0-\\u31ff]\"\r\n pattern_vietnamese = r\"[àáãạảăắằẳẵặâấầẩẫậèéẹẻẽêềếểễệđìíĩỉịòóõọỏôốồổỗộơớờởỡợùúũụủưứừửữựỳỵỷỹýÀÁÃẠẢĂẮẰẲẴẶÂẤẦẨẪẬÈÉẸẺẼÊỀẾỂỄỆĐÌÍĨỈỊÒÓÕỌỎÔỐỒỔỖỘƠỚỜỞỠỢÙÚŨỤỦƯỨỪỬỮỰỲỴỶỸÝ]\"\r\n pattern_emoji = r'[\\U0001F1E0-\\U0001F1FF\\U0001F300-\\U0001F64F\\U0001F680-\\U0001FAFF\\U00002702-\\U000027B0]'\r\n\r\n\r\ndef unwanted_character_detected(text_for_detect):\r\n matchs = re.search(\r\n r'[^a-zA-Z0-9\\s\\t{}{}{}{}{}{}{}{}{}{}]'.format(\r\n Reg_Exp.pattern_punctuation[1:-1],\r\n Reg_Exp.pattern_arabic[1:-1],\r\n Reg_Exp.pattern_chinese[1:-1],\r\n Reg_Exp.pattern_tamil[1:-1],\r\n Reg_Exp.pattern_thai[1:-1],\r\n Reg_Exp.pattern_russian[1:-1],\r\n Reg_Exp.pattern_korean[1:-1],\r\n Reg_Exp.pattern_japanese[1:-1],\r\n Reg_Exp.pattern_vietnamese[1:-1],\r\n Reg_Exp.pattern_emoji[1:-1],\r\n ), text_for_detect, re.I)\r\n if matchs:\r\n return True\r\n return False\r\n\r\n\r\ndef detokenize_zh(text):\r\n text = re.sub(r'\\s?([\\u4e00-\\u9fff,。、—‘’“”:;【】¥…《》?!()])\\s?', r'\\1', text)\r\n return text\r\n\r\n\r\ndef detokenize_en(text):\r\n if not text.strip():\r\n return\r\n step1 = text.replace(\"`` \", '\"').replace(\r\n \" ''\", '\"').replace('. . .', '...')\r\n step2 = step1.replace(\" ( \", \" (\").replace(\" ) \", \") \")\r\n step3 = re.sub(r' ([.,:;?!%]+)([ \\'\"`])', r\"\\1\\2\", step2)\r\n step4 = re.sub(r' ([.,:;?!%]+)$', r\"\\1\", step3)\r\n step5 = step4.replace(\" '\", \"'\").replace(\" n't\", \"n't\").replace(\r\n \"can not\", \"cannot\")\r\n step6 = step5.replace(\" ` \", \" '\")\r\n return step6.strip()\r\n\r\n\r\ndef tokenize_by_char_zh(sent):\r\n chars = re.split(r'([\\u4e00-\\u9fff\\W])', sent)\r\n chars = [w for w in chars if len(w.strip()) > 0]\r\n return chars\r\n\r\n\r\ndef recaser_vi_en_ms(sent):\r\n\r\n if not sent or not sent.strip():\r\n return\r\n words_list = sent.split()\r\n for i, word in enumerate(words_list):\r\n if i == 0 or words_list[i-1] in '.?!\"':\r\n words_list[i] = word[0].upper()+word[1:]\r\n return ' '.join(words_list)\r\n\r\n\r\n","repo_name":"zouxunlong/web_crawl","sub_path":"scripts/dags_python_scripts/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40356777636","text":"from speak import say\nfile = open('text.txt','r',encoding=\"utf8\")\ntext=\"Hello there mate, how are you\"\nsay(text,'en')\n\"\"\"\nfor each in file:\n text+=each\n print(text)\n #text=eval(input(\"Enter urdu text:\"))\n say(text,'ur')\n\"\"\"","repo_name":"furkhan67/codes","sub_path":"projects/python/speech/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23550206521","text":"from datetime import datetime, time, timedelta\n\ndef add_time(value, T):\n departure = datetime.combine(datetime.today(), time(int(value[0:2]), int(value[3:])))\n ready = departure + timedelta(minutes=T)\n if ready.date() == departure.date():\n return ready.strftime(\"%H:%M\")\n return \"99:99\" # Too long to wait, the departure is not today\n\ndef insert_train(trains, train, i):\n if i == len(trains) - 1: return i + 1\n if train > trains[-1]: return len(trains)\n j = i + 1\n k = len(trains) - 1\n while True:\n if j >= k - 1:\n if train > trains[j]:\n return j + 1\n return j\n l = (j+k)//2\n if trains[l] >= train: k = l\n else: j = l \n\ndef send_train(trains, T, i):\n train = trains[i]\n arrival = train[6:11]\n # Construct the new train line.\n new_train = add_time(arrival, T) + \" \"*7\n if train[-1] == \"A\": new_train += \"B\"\n else: new_train += \"A\"\n # Insert this line into the list.\n trains.insert(insert_train(trains, new_train, i), new_train)\n return trains\n\ndef solve(trains, T):\n max_a, max_b = 0, 0\n cur_a, cur_b = 0, 0\n i = 0\n while i < len(trains):\n train = trains[i] # train is a string: \"HH:MM HH:MM X\"\n if train[6:11] == \" \"*5:\n # This train came from the other station.\n if train[-1] == \"A\": cur_a += 1\n else: cur_b += 1\n else:\n # This train must go on schedule.\n if train[-1] == \"A\":\n if cur_a > 0: cur_a -= 1\n else: max_a += 1\n else:\n if cur_b > 0: cur_b -= 1\n else: max_b += 1\n trains = send_train(trains, T, i)\n i += 1\n return (max_a, max_b)\n\nf = open(\"b-large.txt\")\nN = int(f.readline())\nfor n in range(0, N):\n T = int(f.readline())\n NAB = f.readline().split()\n NA = int(NAB[0])\n NB = int(NAB[1])\n trains = [f.readline().rstrip() + \" A\" for i in range(0, NA)]\n trains += [f.readline().rstrip() + \" B\" for i in range(0, NB)]\n trains.sort()\n print(\"Case #%d: %d %d\" % ((n + 1,) + solve(trains, T)))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_2/202.py","file_name":"202.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70322019075","text":"try:\n from typing import Optional, List\nexcept ImportError:\n pass\nfrom adafruit_display_shapes.multisparkline import MultiSparkline\n\n\nclass Sparkline(MultiSparkline):\n \"\"\"A sparkline graph.\n\n :param int width: Width of the sparkline graph in pixels\n :param int height: Height of the sparkline graph in pixels\n :param int max_items: Maximum number of values housed in the sparkline\n :param bool dyn_xpitch: (Optional) Dynamically change xpitch (True)\n :param int|None y_min: Lower range for the y-axis. Set to None for autorange.\n :param int|None y_max: Upper range for the y-axis. Set to None for autorange.\n :param int x: X-position on the screen, in pixels\n :param int y: Y-position on the screen, in pixels\n :param int color: Line color, the default value is 0xFFFFFF (WHITE)\n\n Note: If dyn_xpitch is True (default), the sparkline will allways span\n the complete width. Otherwise, the sparkline will grow when you\n add values. Once the line has reached the full width, the sparkline\n will scroll to the left.\n \"\"\"\n\n # pylint: disable=too-many-arguments\n def __init__(\n self,\n width: int,\n height: int,\n max_items: int,\n dyn_xpitch: Optional[bool] = True, # True = dynamic pitch size\n y_min: Optional[int] = None, # None = autoscaling\n y_max: Optional[int] = None, # None = autoscaling\n x: int = 0,\n y: int = 0,\n color: int = 0xFFFFFF, # line color, default is WHITE\n ) -> None:\n super().__init__(\n width, height, max_items, [color], dyn_xpitch, [y_min], [y_max], x, y\n )\n\n # pylint: enable=too-many-arguments\n\n def add_value(self, value: float, update: bool = True) -> None:\n \"\"\"Add a value to the sparkline.\n\n :param float value: The value to be added to the sparkline\n :param bool update: trigger recreation of primitives\n\n Note: when adding multiple values it is more efficient to call\n this method with parameter 'update=False' and then to manually\n call the update()-method\n \"\"\"\n\n self.add_values([value], update)\n\n def update(self) -> None:\n \"\"\"Update the drawing of the sparkline.\"\"\"\n\n self.update_line(0)\n\n def values(self) -> List[float]:\n \"\"\"Returns the values displayed on the sparkline.\"\"\"\n\n return self.values_of(0)\n\n @property\n def y_top(self) -> float:\n \"\"\"\n :return: The actual maximum value of the vertical scale, will be updated if autorange\n \"\"\"\n return self.y_tops[0]\n\n @property\n def y_bottom(self) -> float:\n \"\"\"\n :return: The actual minimum value of the vertical scale, will be updated if autorange\n \"\"\"\n return self.y_bottoms[0]\n","repo_name":"DJDevon3/My_Circuit_Python_Projects","sub_path":"Boards/espressif/Adafruit MatrixPortal S3/128x96 RGB Matrix/lib/adafruit_display_shapes/sparkline.py","file_name":"sparkline.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"}
+{"seq_id":"106022561","text":"# -*- coding: utf-8 -*-\nfrom cmath import phase\nimport os\nfrom tkinter.font import names\nimport warnings\nimport time\nimport pandas as pd\nimport scipy.stats\nimport scipy.io\nfrom scipy.optimize import curve_fit\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import RandomizedSearchCV\n# ignore all warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef read_mat(mat_path):\n mat = scipy.io.loadmat(mat_path)\n mat = np.asarray(mat['feats_mat'], dtype=np.float)\n mat[np.isnan(mat)] = 0\n mat[np.isinf(mat)] = 0\n return mat\n\n\ndef compute_metrics(y_pred, y):\n\n SRCC = scipy.stats.spearmanr(y, y_pred)[0]\n PLCC = scipy.stats.pearsonr(y, y_pred)[0]\n RMSE = np.sqrt(mean_squared_error(y, y_pred))\n return [SRCC, PLCC, RMSE]\n\ndef formatted_print(snapshot, phase, params, duration):\n print('======================================================')\n \n if not params == None:\n print('params: ', params)\n \n print('SRCC-'+phase+':', snapshot[0])\n print('PLCC-'+phase+':', snapshot[1])\n print('RMSE-'+phase+':', snapshot[2])\n print('======================================================')\n \n if not duration == None:\n print(' -- ' + str(duration) + ' seconds elapsed...\\n\\n')\n\ndef train_single_epoch(feats, mos, param_grid, rnd_seed):\n t_start = time.time()\n \n # grid search for parameter\n grid = RandomizedSearchCV(SVR(), param_grid, cv=3, n_jobs=-1, random_state=rnd_seed)\n scaler = MinMaxScaler().fit(feats)\n feats = scaler.transform(feats)\n \n grid.fit(feats, mos)\n best_params = grid.best_params_\n\n # retrain model with the best parameters\n regressor = SVR(C=best_params['C'], gamma=best_params['gamma'])\n regressor.fit(feats, mos)\n \n # predict\n mos_pred = regressor.predict(feats)\n # evaluate train acc\n metrics = compute_metrics(mos_pred, mos)\n\n t_end = time.time()\n formatted_print(metrics, phase='train', params=best_params, duration=(t_end - t_start))\n\n return regressor\n\ndef test_single_epoch(feats_test, regressor, output_pred=True):\n\n mos_test_pred = regressor.predict(feats_test)\n \n if output_pred:\n return mos_test_pred\n\n\nif __name__=='__main__':\n\n \n feature_path = r'./feats/selected' \n label_path = r'./label'\n out_path = r'./pred'\n algo_name = 'DFGC1st_withstd_feats360'\n\n # load training sets\n feats_train = read_mat(os.path.join(feature_path, 'DFGC-train_'+algo_name+'.mat'))\n\n df_train = pd.read_csv(os.path.join(label_path, 'train_set.csv'), skiprows=[])\n names_train = list(df_train['file'])\n mos_train = np.array(list(df_train['mos']), dtype=np.float)\n\n # load test sets\n phase = ['1', '2', '3']\n feats_test = []\n names_test = []\n for p in phase:\n feats = read_mat(os.path.join(feature_path, 'DFGC-test'+p+'_'+algo_name+'.mat'))\n df_test = pd.read_csv(os.path.join(label_path, 'test_set'+p+'.txt'), sep=',', names=['file'])\n feats_test.append(feats)\n names_test.append(list(df_test['file']))\n\n # train SVR model\n # param_grid and rnd_seed are used when grid searching for the best hyper parameters of SVR\n # you may modify them for a better result\n param_grid = {'C': np.logspace(1, 10, 10, base=2), 'gamma': np.logspace(-8, 1, 10, base=2)}\n rnd_seed = 42\n model = train_single_epoch(feats_train, mos_train, param_grid, rnd_seed)\n \n # predict and save\n for p in phase:\n # set output_pred=True to retun prediction results\n pred_test = test_single_epoch(feats_test[int(p)-1], regressor=model, output_pred=True)\n out_df = pd.DataFrame({'file':names_test[int(p)-1], 'pred_mos':pred_test})\n out_df.to_csv(os.path.join(out_path, 'DFGC-test'+p+'_'+algo_name+'_pred.txt'), index=None, header=None)\n\n","repo_name":"bomb2peng/DFGC-VRA-starterkit","sub_path":"train_and_pred.py","file_name":"train_and_pred.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"71206761795","text":"from random import randint\n\n\ndef perfectGetal(getal):\n som = 0\n\n for i in range(1, getal):\n if getal % i == 0:\n som += i\n\n if som == getal:\n return \"Perfect\"\n else:\n return \"Niet perfect\"\n\n\ndef genereer_getal():\n random_getal1 = randint(1, 3)\n random_getal2 = randint(1, 3)\n random_getal3 = randint(1, 3)\n random_getal4 = randint(1, 3)\n\n string = f\"{random_getal1}{random_getal2}{random_getal3}{random_getal4}\"\n\n return int(string)\n\n\ndef main():\n random_getal = randint(1, 100)\n\n getal = int(input(\"Geef een geheel getal in: \"))\n print(f\"Is {getal} een perfect getal? {perfectGetal(getal)}\")\n print(f\"Is {random_getal} een perfect getal? {perfectGetal(random_getal)}\")\n print(f\"Is {genereer_getal()} een perfect getal? {perfectGetal(genereer_getal())}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"WoutGrovenPXL/IT-Essentials","sub_path":"5_functies/extra_oefeningen/oef5.3.py","file_name":"oef5.3.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1777817149","text":"from exe.export.exportmediaconverter import ExportMediaConverter\nfrom exe.export.xmlpage import XMLPage\n\"\"\"\nHangmanBlock can render and process HangmanIdevices as XHTML and Javascript to\nmake a game\n\"\"\"\n\nimport logging\nfrom exe.webui.block import Block\nfrom exe.webui.element import TextAreaElement\nfrom exe.webui.element import ImageElement\nfrom exe.webui import common\nfrom exe.webui.element import TextElement\nfrom exe.engine.extendedfieldengine import field_engine_is_delete_request\n\nlog = logging.getLogger(__name__)\n\n\n# ===========================================================================\nclass HangmanBlockInc(Block):\n \"\"\"\n ExampleBlock can render and process ExampleIdevices as XHTML\n GenericBlock will replace it..... one day\n \"\"\"\n def __init__(self, parent, idevice):\n Block.__init__(self, parent, idevice)\n self.titleElement = TextElement(idevice.titleField)\n self.contentElement = TextAreaElement(idevice.content)\n self.contentElement.height = 250\n self.chanceImageElements = []\n\n #go through all image fields in the list and create an image element linked to that field\n for chanceImageField in idevice.chanceImageFields:\n newImgElement = ImageElement(chanceImageField)\n self.chanceImageElements.append(newImgElement)\n\n self.wordElements = []\n self.hintElements = []\n #go through all of the word fields and hint fields and create an \n for wordIndex, word in enumerate(idevice.wordTextFields):\n newWordElement = TextElement(word)\n self.wordElements.append(newWordElement)\n newHintElement = TextElement(idevice.hintTextFields[wordIndex])\n self.hintElements.append(newHintElement)\n\n #make an element for the alphabet\n self.alphabetElement = TextElement(idevice.alphabet)\n\n #element for the messages that are shown to the player\n self.wrongGuessTextElement = TextAreaElement(self.idevice.wrongGuessMessageField)\n self.lostLevelTextElement = TextAreaElement(self.idevice.lostLevelMessageField)\n self.levelPassedTextElement = TextAreaElement(self.idevice.levelPasssedMessageField)\n self.gameWonTextElement = TextAreaElement(self.idevice.gameWonMessageField)\n \n self.letterButtonStyleElement = TextElement(self.idevice.letterButtonStyle)\n self.wrongLetterButtonStyleElement = TextElement(self.idevice.wrongLetterButtonStyle)\n self.rightLetterButtonStyleElement = TextElement(self.idevice.rightLetterButtonStyle)\n\n self.hintFieldStyleElement = TextElement(self.idevice.hintFieldStyle)\n self.wordAreaStyleElement = TextElement(self.idevice.wordAreaStyle)\n\n self.resetButtonTextElement = TextElement(self.idevice.resetButtonText)\n self.resetButtonStyleElement = TextElement(self.idevice.resetButtonStyle)\n \n\n def process(self, request):\n \"\"\"\n Process the request arguments from the web server to see if any\n apply to this block\n \"\"\"\n \n #Make sure that we don't do anything when it's time to die...\n Block.process(self, request)\n self.idevice.message = \"\"\n \n if field_engine_is_delete_request(request):\n return\n \n self.idevice.addGameScript()\n\n self.titleElement.process(request)\n self.idevice.title = self.titleElement.renderView()\n self.alphabetElement.process(request)\n self.wrongGuessTextElement.process(request)\n self.lostLevelTextElement.process(request)\n self.levelPassedTextElement.process(request)\n self.gameWonTextElement.process(request)\n\n self.letterButtonStyleElement.process(request)\n self.wrongLetterButtonStyleElement.process(request)\n self.rightLetterButtonStyleElement.process(request)\n self.hintFieldStyleElement.process(request)\n self.wordAreaStyleElement.process(request)\n\n self.resetButtonTextElement.process(request)\n self.resetButtonStyleElement.process(request)\n \n #see if we need to delete a word\n blankWords = False\n for wordIndex in range(0, len(self.wordElements)):\n if self.wordElements[wordIndex].renderView() == \"\":\n blankWords = True\n elif self.hintElements[wordIndex].renderView() == \"\":\n blankWords = True\n \n if blankWords is True:\n self.idevice.message = _(\"One or more words or hints are blank. Please do not have any blank hints or words - you can delete unused ones.\")\n self.idevice.edit = True\n \n \n #see if we need to add another chance\n if (\"addChance\"+unicode(self.id)) in request.args: \n self.idevice.addChance()\n self.idevice.edit = True\n # disable Undo once a question has been added:\n self.idevice.undo = False\n \n if(\"addWord\"+unicode(self.id)) in request.args:\n self.idevice.addWord()\n self.idevice.edit = True\n self.idevice.undo = False\n\n content = self.contentElement.process(request)\n for imgElement in self.chanceImageElements:\n imgElement.process(request)\n if \"action\" in request.args and request.args[\"action\"][0] == imgElement.id:\n self.idevice.chanceImageFields.remove(imgElement.field)\n imgElement.field.idevice.undo = False\n imgElement.field.idevice.edit = True\n \n\n for wordElement in self.wordElements:\n wordElement.process(request)\n if \"action\" in request.args and request.args[\"action\"][0] == wordElement.id:\n wordIdx = self.wordElements.index(wordElement)\n self.idevice.wordTextFields.remove(wordElement.field)\n self.idevice.hintTextFields.remove(self.hintElements[wordIdx].field)\n wordElement.field.idevice.undo = False\n wordElement.field.idevice.edit = True\n \n for hintElement in self.hintElements:\n hintElement.process(request)\n\n if content:\n self.idevice.content = content\n\n #\n # Get an TextArea render back according to mode\n def _renderHTMLElement(self, mode, element, containerId = None):\n retVal = \"\"\n idStr = \"\"\n if containerId is not None:\n idStr = \" id='%s' \" % containerId\n retVal += \"\" % idStr\n if mode == \"preview\":\n retVal += element.renderPreview()\n else:\n retVal += element.renderView()\n \n retVal += \"
\"\n return retVal\n #\n # This will generate the HTML elements and javascript that will be required\n # for this to be shown as a Javascript game in the web browser\n # \n def _renderGame(self, style, mode = \"view\"):\n hangmanGameId = \"hangman\" + self.id\n \n resPath = \"\"\n if mode == \"preview\":\n resPath = \"/templates/\" \n \n html = u\"\\n\"\n html += common.ideviceHeader(self, style, mode)\n html += \"\" % {\"gameId\" : hangmanGameId}\n html += self._renderHTMLElement(mode, self.wrongGuessTextElement, \"hmwrong\" + hangmanGameId)\n html += self._renderHTMLElement(mode, self.lostLevelTextElement, \"hmlost\" + hangmanGameId)\n html += self._renderHTMLElement(mode, self.levelPassedTextElement, \"hmpassed\" + hangmanGameId)\n html += self._renderHTMLElement(mode, self.gameWonTextElement, \"hmwon\" + hangmanGameId)\n \n html += \"
\"\n html += u\"\n \"\"\" % {\"gameid\" : hangmanGameId }\n \n html += messagesStr\n \n\n html += \"\"\n #render view of these images\n for imgElement in self.chanceImageElements:\n if imgElement.field.imageResource and imgElement.field.imageResource is not None:\n html += \"
\"\n \n if mode == \"view\":\n html += imgElement.renderView()\n else: \n html += imgElement.renderPreview()\n html += \"
\"\n \n html += \"
\"\n\n messageTopMargin = (imgMaxHeight - 30) / 2\n gameWidth = max(600, imgMaxWidth)\n gameAreaHTML = \"\"\"\n\n
\n \n
\n
\n
\n\n
\n
\n
\n
\n
\n
\n\n \"\"\" % { \"gameId\" : hangmanGameId, \"width\" : gameWidth, \"height\": imgMaxHeight, \\\n \"messagetopmargin\" : messageTopMargin, 'hintStyle' : self.hintFieldStyleElement.renderView(), \\\n 'wordStyle' : self.wordAreaStyleElement.renderView(), 'resetText' : self.resetButtonTextElement.renderView(), \\\n 'resetStyle' : self.resetButtonStyleElement.renderView() }\n html += gameAreaHTML\n html += \"\" % hangmanGameId\n\n return html\n\n\n\n def renderEdit(self, style):\n \"\"\"\n Returns an XHTML string with the form element for editing this block\n \"\"\"\n html = u\"\\n\"\n html += common.ideviceShowEditMessage(self)\n \n \n html += self.titleElement.renderEdit()\n html += self.contentElement.renderEdit()\n html += self.alphabetElement.renderEdit()\n\n #messages to show the user for different events\n html += self.wrongGuessTextElement.renderEdit()\n html += self.lostLevelTextElement.renderEdit()\n html += self.levelPassedTextElement.renderEdit()\n html += self.gameWonTextElement.renderEdit()\n html += self.resetButtonTextElement.renderEdit()\n\n divId = \"fieldtype_advanced\" + self.id\n html += \"
\"\n \n html += _(\"Show Advanced Options\") + \"
\"\n html += \"
\"\n \n #styles for buttons\n html += self.letterButtonStyleElement.renderEdit()\n html += self.wrongLetterButtonStyleElement.renderEdit()\n html += self.rightLetterButtonStyleElement.renderEdit()\n\n #style of the text fields\n html += self.hintFieldStyleElement.renderEdit()\n html += self.wordAreaStyleElement.renderEdit()\n\n html += self.resetButtonStyleElement.renderEdit()\n html += \"
\"\n \n #render edit of these images\n for imgElement in self.chanceImageElements:\n html += imgElement.renderEdit()\n html += common.submitImage(imgElement.id, imgElement.field.idevice.id, \n \"/images/stock-cancel.png\",\n _(\"Remove This Life\")) + \"
\"\n\n addChanceButtonLabel = _(\"Add Chance\")\n html += common.submitButton(\"addChance\"+unicode(self.id), addChanceButtonLabel)\n html += \"
\"\n\n #show words to be guessed\n html += _(\"
Words to Guess \")\n for wordIndex in range(0, len(self.wordElements)):\n word = self.wordElements[wordIndex]\n html += word.renderEdit()\n html += self.hintElements[wordIndex].renderEdit()\n html += \"
\"\n if wordIndex > 0:\n html += common.submitImage(word.id, word.field.idevice.id, \n \"/images/stock-cancel.png\",\n _(\"Remove This Word\")) + \"
\"\n \n html += common.submitButton(\"addWord\"+unicode(self.id), _(\"Add Word\")) \n html += \"
\"\n html += self.renderEditButtons()\n html += u\"
\\n\"\n return html\n\n\n def renderPreview(self, style):\n \"\"\"\n Returns an XHTML string for previewing this block\n \"\"\"\n html = u\"\\n\"\n html += self.contentElement.renderView()\n html += self._renderGame(style, mode = \"preview\")\n\n html += self.renderViewButtons()\n html += \"
\\n\"\n return html\n\n def renderXML(self, style):\n xml = u\"\"\n \n mediaConverter = ExportMediaConverter.getInstance()\n width = mediaConverter.getProfileWidth()\n height = mediaConverter.getProfileHeight()\n \n if mediaConverter is not None:\n for imgElement in self.chanceImageElements:\n if imgElement.field.imageResource is not None:\n mediaConverter.resizeImg(XMLPage.currentOutputDir/imgElement.field.imageResource.storageName, \\\n width, height, {}, {\"resizemethod\" : \"stretch\"})\n \n xml += \"\\n\" % self.idevice.id\n xml += \"\\n\"\n for imgElement in self.chanceImageElements:\n if imgElement.field.imageResource is not None:\n xml += \" \\n\" % imgElement.field.imageResource.storageName\n \n xml += \" \\n\"\n \n xml += \"%s \\n\" % self.alphabetElement.renderView()\n xml += \" \\n\" % self.wrongGuessTextElement.renderView()\n xml += \" \\n\" % self.lostLevelTextElement.renderView()\n xml += \" \\n\" % self.levelPassedTextElement.renderView()\n xml += \" \\n\" % self.gameWonTextElement.renderView()\n \n xml += \"\"\n for wordIndex in range(0, len(self.wordElements)):\n word = self.wordElements[wordIndex]\n if word != \"\":\n xml += \"\\n%(hint)s \\n%(answer)s \\n \\n\" \\\n % {\"answer\" : word.renderView() , \"hint\" : self.hintElements[wordIndex].renderView()}\n \n xml += \" \\n\"\n \n xml += \" \\n\"\n return xml\n\n\n def renderView(self, style):\n \"\"\"\n Returns an XHTML string for viewing this block\n \"\"\"\n html = u\"\\n\"\n html += self.contentElement.renderView()\n html += self._renderGame(style, mode = \"view\")\n html += u\"
\\n\"\n return html\n \n\n# ===========================================================================\n\"\"\"Register this block with the BlockFactory\"\"\"\nfrom exe.engine.hangmanidevice import HangmanIdeviceInc\nfrom exe.webui.blockfactory import g_blockFactory\ng_blockFactory.registerBlockType(HangmanBlockInc, HangmanIdeviceInc) \n\n# ===========================================================================\n","repo_name":"exelearning/iteexe","sub_path":"exe/webui/hangmanblock.py","file_name":"hangmanblock.py","file_ext":"py","file_size_in_byte":19283,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"61"}
+{"seq_id":"74196132993","text":"\nfrom flask import Flask\nimport logging\nimport os\n\n#import src.api.config as config\n#import src.api.utils as utils\n\nimport config\nimport utils\n\napp = Flask(__name__)\n\nlogger = utils.setup_logger()\n\n\n\n\n@app.route('/')\ndef hello_world():\n return 'Running Correctly!'\n\n\n\n\n\nif __name__ == '__main__':\n\n if config.STARTUP[\"DOWNLOAD\"]:\n utils.downloadAllData()\n\n if config.STARTUP[\"EXTRACT\"]:\n utils.extractData()\n\n if config.STARTUP[\"PARSE\"]:\n data = utils.parseData()\n\n if config.STARTUP[\"REBUILD_DB\"]:\n utils.buildDB(data)\n\n #app.run(debug=config.DEBUG, host = config.HOST)\n app.run( host=config.HOST)","repo_name":"maattfox/COT-Dashboard","sub_path":"src/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17613567513","text":"import logging\n\nfrom unittest import TestCase\nfrom mock import patch, Mock, create_autospec, MagicMock, sentinel, ANY\n\nfrom mock_interface import create_interface_mock\n\nfrom Products.ZenHub.zenhub import ZenHub\nfrom Products.ZenHub.invalidationmanager import (\n coroutine,\n DeviceComponent,\n FILTER_EXCLUDE,\n FILTER_INCLUDE,\n filter_obj,\n IInvalidationFilter,\n IInvalidationProcessor,\n InvalidationManager,\n InvalidationPipeline,\n oid_to_obj,\n POSKeyError,\n PrimaryPathObjectManager,\n set_sink,\n transform_obj,\n)\n\n\nPATH = {\"src\": \"Products.ZenHub.invalidationmanager\"}\n\n\nclass InvalidationManagerTest(TestCase):\n def setUp(t):\n logging.disable(logging.CRITICAL)\n\n t.get_utility_patcher = patch(\n \"{src}.getUtility\".format(**PATH), autospec=True\n )\n t.getUtility = t.get_utility_patcher.start()\n t.addCleanup(t.get_utility_patcher.stop)\n\n t.dmd = Mock(\n name=\"dmd\", spec_set=[\"getPhysicalRoot\", \"pauseHubNotifications\"]\n )\n t.log = Mock(name=\"log\", spec_set=[\"debug\", \"warn\", \"info\"])\n t.syncdb = Mock(name=\"ZenHub.async_syncdb\", spec_set=[])\n t.poll_invalidations = Mock(\n name=\"ZenHub.storage.poll_invalidations\", spec_set=[]\n )\n\n t.send_event = Mock(ZenHub.sendEvent, name=\"ZenHub.sendEvent\")\n t.im = InvalidationManager(\n t.dmd, t.log, t.syncdb, t.poll_invalidations, t.send_event\n )\n\n def test___init__(t):\n t.assertEqual(t.im._InvalidationManager__dmd, t.dmd)\n t.assertEqual(t.im.log, t.log)\n t.assertEqual(t.im._InvalidationManager__syncdb, t.syncdb)\n t.assertEqual(\n t.im._InvalidationManager__poll_invalidations, t.poll_invalidations\n )\n t.assertEqual(t.im._InvalidationManager__send_event, t.send_event)\n\n t.assertEqual(t.im._currently_paused, False)\n t.assertEqual(t.im.totalEvents, 0)\n t.assertEqual(t.im.totalTime, 0)\n t.getUtility.assert_called_with(IInvalidationProcessor)\n t.assertEqual(t.im.processor, t.getUtility.return_value)\n\n @patch(\"{src}.getUtilitiesFor\".format(**PATH), autospec=True)\n def test_initialize_invalidation_filters(t, getUtilitiesFor):\n MockIInvalidationFilter = create_interface_mock(IInvalidationFilter)\n filters = [MockIInvalidationFilter() for i in range(3)]\n # Weighted in reverse order\n for i, fltr in enumerate(filters):\n fltr.weight = 10 - i\n getUtilitiesFor.return_value = [\n (\"f%s\" % i, f) for i, f in enumerate(filters)\n ]\n\n initialized_filters = t.im.initialize_invalidation_filters(t.dmd)\n\n for fltr in filters:\n fltr.initialize.assert_called_with(t.dmd)\n\n # check sorted by weight\n filters.reverse()\n t.assertListEqual(initialized_filters, filters)\n\n @patch(\"{src}.time\".format(**PATH), autospec=True)\n def test_process_invalidations(t, time):\n \"\"\"synchronize with the database, and poll invalidated oids from it,\n filter the oids, send them to the invalidation_processor\n \"\"\"\n timestamps = [10, 20]\n time.side_effect = timestamps\n t.im._paused = create_autospec(t.im._paused, return_value=False)\n t.poll_invalidations.return_value = [sentinel.oid]\n\n def process_oid(oid):\n t.im._queue.add(oid)\n\n invalidation_pipeline = create_autospec(t.im.invalidation_pipeline)\n t.im.invalidation_pipeline = invalidation_pipeline\n t.im.invalidation_pipeline.side_effect = process_oid\n\n t.im.process_invalidations()\n\n t.syncdb.assert_called_with()\n t.poll_invalidations.assert_called_with()\n t.im.invalidation_pipeline.run.assert_called_with(sentinel.oid)\n t.im.processor.processQueue.assert_called_with(t.im._queue)\n t.assertEqual(t.im._queue, set())\n\n t.assertEqual(t.im.totalTime, timestamps[1] - timestamps[0])\n t.assertEqual(t.im.totalEvents, 1)\n\n def test__syncdb(t):\n t.im._syncdb()\n t.syncdb.assert_called_with()\n\n def test__paused_pause(t):\n t.im._currently_paused = False\n t.im._InvalidationManager__dmd.pauseHubNotifications = True\n\n ret = t.im._paused()\n\n t.assertEqual(ret, True)\n t.send_event.assert_called_with(t.im._invalidation_paused_event)\n\n def test__paused_currently_paused(t):\n t.im._currently_paused = True\n t.im._InvalidationManager__dmd.pauseHubNotifications = True\n\n ret = t.im._paused()\n\n t.assertEqual(ret, True)\n t.send_event.assert_not_called()\n\n def test__paused_unpause(t):\n t.im._currently_paused = True\n t.im._InvalidationManager__dmd.pauseHubNotifications = False\n\n ret = t.im._paused()\n\n t.assertEqual(ret, False)\n t.send_event.assert_called_with(t.im._invalidation_unpaused_event)\n\n def test_poll_invalidations(t):\n ret = t.im._poll_invalidations()\n t.assertEqual(ret, t.poll_invalidations.return_value)\n\n def test__send_event(t):\n t.im._send_event(sentinel.event)\n t.send_event.assert_called_with(sentinel.event)\n\n\nclass InvalidationPipelineTest(TestCase):\n \"\"\"A Pipeline that filters and transforms an invalidated oid,\n before sending it to IInvalidationProcessor\n \"\"\"\n\n def setUp(t):\n t.mocks = {}\n for obj in [\"subscribers\", \"getUtility\"]:\n patcher = patch(\"{src}.{}\".format(obj, **PATH), autospec=True)\n t.mocks[obj] = patcher.start()\n t.addCleanup(patcher.stop)\n\n # constructor parameters\n t.app = MagicMock(name=\"dmd.root\", spec_set=[\"_p_jar\", \"zport\"])\n t.filters = [Mock(name=\"filter_a\"), Mock(name=\"filter_b\")]\n t.sink = set()\n # Environment, and args\n t.device = MagicMock(PrimaryPathObjectManager, __of__=Mock())\n t.device_obj = sentinel.device_obj\n t.device.__of__.return_value.primaryAq.return_value = t.device_obj\n\n t.oid = 111\n t.app._p_jar = {t.oid: t.device}\n adapter = Mock(name=\"transform adapter\", spec_set=[\"transformOid\"])\n adapter.transformOid.side_effect = lambda x: x\n adapters = [adapter]\n t.mocks[\"subscribers\"].return_value = adapters\n\n t.invalidation_pipeline = InvalidationPipeline(\n t.app, t.filters, t.sink\n )\n\n def test_invalidation_pipeline(t):\n t.invalidation_pipeline.run(t.oid)\n\n t.assertEqual(t.sink, set([t.oid]))\n\n def test__build_pipeline(t):\n __pipeline = t.invalidation_pipeline._build_pipeline()\n __pipeline.send(t.oid)\n\n t.assertEqual(t.sink, set([t.oid]))\n\n @patch(\"{src}.log\".format(**PATH), autospec=True)\n def test_run_handles_exceptions(t, log):\n \"\"\"An exception in any of the coroutines will first raise the exception\n then cause StopIteration exceptions on subsequent runs.\n we handle the first exception and rebuild the pipeline\n \"\"\"\n x = \"invalid key\"\n with t.assertRaises(KeyError):\n t.invalidation_pipeline._InvalidationPipeline__pipeline.send(x)\n\n t.invalidation_pipeline.run(x) # causes an exception\n t.invalidation_pipeline.run(t.oid)\n\n log.exception.assert_called_with(ANY)\n t.assertEqual(t.sink, set([t.oid]))\n # ensure the dereferenced pipeline is cleaned up safely\n import gc\n\n gc.collect()\n\n\nclass coroutine_Test(TestCase):\n def test_coroutine_decorator(t):\n \"\"\"Used to create our pipe segments.\n parameters configure the segment\n call .send() to provide input through yield\n \"\"\"\n\n @coroutine\n def magnitude(mag, output):\n while True:\n input = yield\n output.send(mag * input)\n\n output = Mock(spec_set=[\"send\"])\n mag10 = magnitude(10, output)\n\n mag10.send(1)\n output.send.assert_called_with(10)\n mag10.send(2)\n output.send.assert_called_with(20)\n\n\nclass oid_to_obj_Test(TestCase):\n def setUp(t):\n t.sink = Mock(name=\"sink\", spec_set=[\"send\"])\n t.out_pipe = Mock(name=\"output_pipe\", spec_set=[\"send\"])\n\n def test_oid_to_obj(t):\n device = MagicMock(PrimaryPathObjectManager, __of__=Mock())\n device_obj = sentinel.device_obj\n device.__of__.return_value.primaryAq.return_value = device_obj\n app = sentinel.dmd_root\n app.zport = sentinel.zport\n app.zport.dmd = sentinel.dmd_root\n app._p_jar = {111: device}\n\n oid_to_obj_pipe = oid_to_obj(app, t.sink, t.out_pipe)\n oid_to_obj_pipe.send(111)\n\n t.out_pipe.send.assert_called_with((111, device_obj))\n\n def test__oid_to_object_poskeyerror(t):\n \"\"\"oids not found in dmd are considered deletions,\n and sent straight to the output sink\n \"\"\"\n app = MagicMock(name=\"dmd.root\", spec_set=[\"_p_jar\"])\n app._p_jar.__getitem__.side_effect = POSKeyError()\n\n oid_to_obj_pipe = oid_to_obj(app, t.sink, t.out_pipe)\n oid_to_obj_pipe.send(111)\n\n t.sink.send.assert_called_with([111])\n\n def test__oid_to_object_deleted_primaryaq_keyerror(t):\n \"\"\"objects without a primaryAq ar considered deletions,\n and sent straight to the output sink\n \"\"\"\n deleted = MagicMock(DeviceComponent, __of__=Mock())\n deleted.__of__.return_value.primaryAq.side_effect = KeyError\n app = sentinel.dmd_root\n app._p_jar = {111: deleted}\n\n oid_to_obj_pipe = oid_to_obj(app, t.sink, t.out_pipe)\n oid_to_obj_pipe.send(111)\n\n t.sink.send.assert_called_with([111])\n\n def test__oid_to_object_exclude_unsuported_types(t):\n \"\"\"Exclude any unspecified object types\"\"\"\n unsuported = MagicMock(name=\"unsuported type\", __of__=Mock())\n app = sentinel.dmd_root\n app._p_jar = {111: unsuported}\n\n oid_to_obj_pipe = oid_to_obj(app, t.sink, t.out_pipe)\n oid_to_obj_pipe.send(111)\n\n t.sink.send.assert_not_called()\n t.out_pipe.send.assert_not_called()\n\n\nclass filter_obj_Test(TestCase):\n \"\"\"Run the given object through each registered IInvalidationFilter\n drop any that are specifically Excluded by a filter\n \"\"\"\n\n def setUp(t):\n MockIInvalidationFilter = create_interface_mock(IInvalidationFilter)\n t.filter = MockIInvalidationFilter()\n\n t.included = sentinel.included\n t.excluded = sentinel.excluded\n\n def include(obj):\n if obj is t.included:\n return FILTER_INCLUDE\n elif obj is t.excluded:\n return FILTER_EXCLUDE\n else:\n return \"FILTER_CONTINUE\"\n\n t.filter.include = include\n\n t.out_pipe = Mock(name=\"output_pipe\", spec_set=[\"send\"])\n t.filter_object_pipe = filter_obj([t.filter], t.out_pipe)\n\n def test__filters_object(t):\n t.filter_object_pipe.send((111, t.included))\n t.out_pipe.send.assert_called_with((111, t.included))\n\n def test__filters_object_exclude(t):\n t.filter_object_pipe.send((111, t.excluded))\n t.out_pipe.send.assert_not_called()\n\n def test__filters_object_fallthrough(t):\n t.filter_object_pipe.send((111, sentinel.other))\n t.out_pipe.send.assert_called_with((111, sentinel.other))\n\n\nclass transform_obj_Test(TestCase):\n @patch(\"{src}.IInvalidationOid\".format(**PATH), autospec=True)\n @patch(\"{src}.subscribers\".format(**PATH), autospec=True)\n def test__transform_obj(t, subscribers, IInvalidationOid):\n \"\"\"given an oid: object pair\n gets a list of transforms for the object\n executes the transforms given the oid\n returns a set of oids returned by the transforms\n \"\"\"\n target = Mock(name=\"target\", set_attr=[\"send\"])\n adapter_a = Mock(\n name=\"adapter_a\",\n spec_set=[\"transformOid\"],\n transformOid=lambda x: x + \"0\",\n )\n subscribers.return_value = [adapter_a]\n adapter_b = Mock(\n name=\"adapter_b\",\n spec_set=[\"transformOid\"],\n transformOid=lambda x: [x + \"1\", x + \"2\"],\n )\n IInvalidationOid.return_value = adapter_b\n oid = \"oid\"\n obj = sentinel.object\n\n transform_pipe = transform_obj(target)\n transform_pipe.send((oid, obj))\n\n target.send.assert_called_with({\"oid0\", \"oid1\", \"oid2\"})\n\n\nclass set_sink_Test(TestCase):\n def test_set_sink_accepts_a_set(t):\n output = set()\n set_sink_pipe = set_sink(output)\n set_sink_pipe.send({\"a\", \"a\", \"b\", \"c\"} or (\"a\",))\n t.assertEqual(output, {\"a\", \"b\", \"c\"})\n\n def test_set_sink_accepts_a_tuple(t):\n output = set()\n set_sink_pipe = set_sink(output)\n set_sink_pipe.send(None or (\"a\",))\n t.assertEqual(output, {\"a\"})\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenHub/tests/test_invalidationmanager.py","file_name":"test_invalidationmanager.py","file_ext":"py","file_size_in_byte":12899,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"}
+{"seq_id":"73998048","text":"def hasValidPath(self, grid: List[List[int]]) -> bool:\n \n directions = {1 : [1, 4], 2:[2, 3], 3:[1, 3], 4:[4, 3], 5:[2, 1], 6:[2, 4]}\n final_directions = { (3, 1): 2, (3, 3): 4, (4,3): 1, (4,4): 2, (5,2): 4, (5,1): 3, (6,2): 1, (6,4): 3}\n \n m, n = len(grid), len(grid[0])\n \n x, y = 0, 0\n \n dir1, dir2 = directions[grid[0][0]][0], directions[grid[0][0]][1]\n \n curr_dir = None\n \n if grid[0][0] in [1, 6]:\n curr_dir = 1\n elif grid[0][0] in [2, 3, 4]:\n curr_dir = 2\n else: # 5\n return False\n \n \n while 0<=x D2n_1\n APU = np.roll(np.identity(2**total_qubits), 1, axis=1)\n # APU means Amplitude Permutation Unitary matrix\n # Create the horizontal and vertical scan circuits\n circ_list = build_image_circuits(total_qubits, normalized_horizontal, normalized_vertical, APU, backend)\n\n # STEP 3: codigo para obtener el vector de estado de cada uno de los circuitos obtenidos\n # backend = Aer.get_backend('statevector_simulator')\n # extract the results and the statevectors\n job = backend.run(circ_list)\n result = job.result()\n statevector_horizontal = result.get_statevector(circ_list[0])\n statevector_vertical = result.get_statevector(circ_list[1])\n\n # STEP 4: obtención del borde de la imagen\n # Defining a lambda function for\n # thresholding to binary values\n threshold = lambda amp: (amp > 10**(-3) or amp < -10**(-3))\n # threshold = lambda amp: (amp > 10**(-4) or amp < -10**(-4))\n\n # Selecting odd states from the raw statevector and\n # reshaping column vector of size 64 to an 8x8 matrix\n edge_scan_h = np.abs(np.array([1 if threshold(statevector_horizontal[2*i+1].real) else 0 for i in range(2**data_qubits)])).reshape(dimension, dimension)\n edge_scan_v = np.abs(np.array([1 if threshold(statevector_vertical[2*i+1].real) else 0 for i in range(2**data_qubits)])).reshape(dimension, dimension).T\n\n\n # STEP 5: union en la imagen original\n # Combining the horizontal and vertical component of the result\n edge_scan_sim = edge_scan_h | edge_scan_v\n\n return edge_scan_sim\n\n\n\n\n\n# -------------------------------------------------------------\n# CROPPING THE ORIGINAL IMAGE:\nprint(\"CROPPING THE ORIGINAL IMAGE\")\n\n\nimage_splits = split_image(image, image_crop_size)\n\n# probamos a representar una de las imagenes para ver que esta correcta y es la imagen correspondiente al cuadrante 0-32 vertical 64-96 horizontal\nplt.imshow(image_splits[2], extent=[0, image_splits[2].shape[0], image_splits[2].shape[1], 0], cmap='viridis')\nplt.show()\n\n\n# -------------------------------------------------------------\n# EXTRACTING QUBIT QUANTITY:\nprint(\"EXTRACTING QUBIT QUANTITY\")\n\nfrom math import log2\n# Initialize some global variable for number of qubits\ndata_qubits = int(log2(image_crop_size*image_crop_size))\nanc_qubits = 1\ntotal_qubits = data_qubits + anc_qubits\n\n# -------------------------------------------------------------\n# TRYING WITH JUST A FRACTION:\nprint(\"TRYING WITH JUST A FRACTION\")\nimage_split_edges = image_edge_finder(image=image_splits[2], total_qubits=total_qubits, data_qubits=data_qubits, dimension=image_crop_size, backend=backend)\n\n# Plotting the original and edge-detected images\nplot_image(image_splits[2], 'Original image')\nplot_image(image_split_edges, 'Edge Detected image')\n\n# -------------------------------------------------------------\n# TRYING WITH THE WHOLE IMG:\n\nsmaller_array_edges = []\n# calculation of all the final image representations\nfor element in image_splits:\n smaller_array_edges.append(image_edge_finder(image=element, total_qubits=total_qubits, data_qubits=data_qubits, dimension=image_crop_size, backend=backend))\n\n# reconstructing of the image and plotting\nimage_edges = reconstruct_image(smaller_arrays=smaller_array_edges, split_size=8)\n# plotting both the original image and the new one\nplot_image(image, 'Original image')\nplot_image(image_edges, 'Edge Detected image')","repo_name":"Quintanaaalberto/ciclab23","sub_path":"src/algorithms/EdgeDetection_v3_bigger_better_buggier.py","file_name":"EdgeDetection_v3_bigger_better_buggier.py","file_ext":"py","file_size_in_byte":10210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5699611186","text":"# Baekjoon Online Judge - 2589번. 보물섬\n\nfrom collections import deque\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\n\ndef bfs(x, y):\n visited[x][y] = 0\n queue = deque()\n queue.append((x, y))\n temp = 0\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx < 0 or nx >= N or ny < 0 or ny >= M:\n continue\n else:\n if visited[nx][ny] == -1 and treasure_map[nx][ny] == 'L':\n queue.append((nx, ny))\n visited[nx][ny] = visited[x][y] + 1\n # 가장 최근의 것이 긴 것\n temp = visited[nx][ny]\n return temp\n\n\nN, M = map(int, input().split())\ntreasure_map = [list(input()) for _ in range(N)]\nans = 0\nfor i in range(N):\n for j in range(M):\n if treasure_map[i][j] == 'L':\n visited = [[-1] * M for _ in range(N)]\n max_dist = bfs(i, j)\n if ans < max_dist:\n ans = max_dist\nprint(ans)\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_2589.py","file_name":"BOJ_2589.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33946464601","text":"import numpy as np\nimport pandas as pd\nfrom scipy.odr import *\n\nTHERMOGENESYS = 1/7700 # kilos weight gain per calorie, (Katan & Ludwig, 2010)\n\n# HELIUS study\ndf1 = pd.read_csv('data/data.csv', decimal=',', delimiter=';').set_index('Heliusnr')\n# Additional study on energy intake in calories done by Mary Nicolaou\n# for participants of the HELIUS study\ndf2 = pd.read_csv(\n 'data/energy_intake.csv', decimal=',', delimiter=';'\n ).set_index('Heliusnr')\n# Additional study on calories burnt per SQUASH minute exercise\n# for participants of the HELIUS study (Nicolaou et al., 2016)\ndf3 = pd.read_csv(\n 'data/squash_data.csv', delimiter=','\n ).set_index('Heliusnr')\ndf = df1.join(df2)\n\n### ==================\n### Defining functions\n### ==================\n\ndef reformat(l):\n \"\"\"\n Identifies missing data\n \"\"\"\n return np.array([i if i != -1 else np.nan for i in l])\n\ndef toFloat(l):\n \"\"\"\n Changes comma-seperated strings from first list to dot-seperated strings\n Changes strings to floats\n \"\"\"\n new = l.str.replace(',', '.')\n new = pd.to_numeric(new, errors='coerce')\n return new\n\ndef r(l, select):\n \"\"\"\n Selects data points according to select array\n \"\"\"\n return np.array([i for n, i in enumerate(l) if not select[n]])\n\ndef f(p, q):\n \"\"\"\n Fitting function for linear BMI regression\n \"\"\"\n dummy_q = np.vstack(([1 for n in range(len(q))],q))\n return np.dot(dummy_q.T, p)\n\ndef fitBMI(x0, x1):\n \"\"\"\n Linear BMI regression on body-image pictures\n \"\"\"\n input_data = x0\n output_data = x1\n\n # Set up ODR with the model and data.\n data = RealData(input_data, output_data)\n odr = ODR(data, Model(f), beta0=[0, 0])\n\n # Run the regression.\n return odr.run().beta\n\n### ==============\n### Defining data\n### ==============\n\n\"\"\" Ethnicity \"\"\"\nethnicities = reformat(df[\"H1_EtnTotaal\"])\n\n\"\"\" State determinants \"\"\"\nstress = df[\"H1_PsychStress\"]\nweight = (df[\"H1_LO_GemGewicht\"], df[\"H1_Gewicht\"])\nsleep = df[\"H1_SlaapInUren\"]\nenergy_intake = df[\"ENKcal_Sum\"] # QUITE A LOT OF DATA MISSING FOR THIS\nincome = df[\"inkomen\"].apply(lambda x: x/365.0)\nmax_exercise = max(df[\"H1_Squash_totmwk\"])\nexercise = df[\"H1_Squash_totmwk\"].apply(lambda x: x*960/max_exercise) # IN MINUTES PER DAY\ndiscrimination = df[\"H1_Discr_meanscore\"]\n\n\"\"\" Weight values \"\"\"\nsex = df[\"H1_geslacht\"]\nage = df[\"H1_lft\"]\nlength = (df[\"H1_LO_GemLengte\"], df[\"H1_Lengte\"])\nbmi_picture = df[\"H1_LichGelijk_unjumbled\"]\nideal_body_image = df[\"H1_LichWens_unjumbled\"]\n\n### ======================\n### Missing weight and length measurement are filled with self-reported value\n### ======================\n\ntemp_weight = toFloat(weight[0])\nrepl_weight = weight[1]\nweight = temp_weight.fillna(repl_weight)\n\ntemp_length = toFloat(length[0])\nrepl_length = length[1]\nlength = temp_length.fillna(repl_length)\ninv_length = length.apply(lambda x: 1/((.01*x)**2)) # in 1/m^2, for BMI\n\n### ======================\n### BMI is calculated\n### ======================\n\nbmi = weight*inv_length\n\n### ======================\n### Moving to numpy data structure\n### ======================\n\npd_variables = [\n stress, weight, sleep, energy_intake, income,\n bmi, exercise, discrimination\n ]\nvariables = np.array([reformat(d) for d in pd_variables])\n\npd_weight_values = [sex, age, length, inv_length, bmi_picture, ideal_body_image]\nweight_values = np.array([reformat(a) for a in pd_weight_values])\n\n### ======================\n### Removing incomplete data entries\n### Seperating data that only misses calory intake data\n### ======================\n\ndata = np.copy(np.vstack([variables, weight_values]))\ntotalPoints = len(df)\nselect = np.zeros(totalPoints)\nfor n, d in enumerate(data):\n h = d.copy()\n h[~np.isnan(h)] = 0\n h[np.isnan(h)] = 1\n select += h\nselect[select>1] = 1\n\nethn_data = r(ethnicities, select)\n\nstress = r(data[0], select)\nweight = r(data[1], select)\nsleep = r(data[2], select)\nenergy_intake = r(data[3], select)\nincome = r(data[4], select)\nbmi = r(data[5], select)\nexercise = r(data[6], select)\ndiscrimination = r(data[7], select)\n\nsex = r(data[8], select)\nage = r(data[9], select)\nlength = r(data[10], select)\ninv_length = r(data[11], select)\nbmi_picture = r(data[12], select)\nideal_body_image = r(data[13], select)\n\n### ======================\n### FITTING PERCEIVED FATNESS ON BMI UNITS\n###\n### 1) Fitting BMI's to self-reported antropometry picture\n### 2) Using fit to determine ideal BMI\n### 3) Perceived fatness is defined as the difference between 1) and 2)\n### (Clearification: The numbers from 'bmi_picture' and 'ideal_body_image'\n### are self-reported numbers selected on images from the HELIUS study)\n### ======================\n\np = fitBMI(bmi_picture, bmi) # 1)\nideal_body_image = f(p, ideal_body_image) # 2)\nperceived_fatness = bmi - ideal_body_image # 3)\n\n### ======================\n### Determining squash-energy-expenditure\n### Additional study on calories burnt per SQUASH minute exercise\n### for participants of the HELIUS study (Nicolaou et al., 2016)\n### ======================\n\nsquash_p = df3['H1_Squash_totmwk']\nAEE_p = df3['AEE_mean']\nsquash, AEE = [], []\nfor s, a in zip(squash_p, AEE_p):\n if s != ' ' and s != '0' and \\\n a != ' ' and a != '0':\n squash.append(float(s))\n AEE.append(float(a.replace(',','.')))\n\ncal_min = np.mean(np.array(AEE)/np.array(squash))\nAEE = [cal_min for i in range(len(select[select==0]))]\n\n### ======================\n### Predicting energy expenditure and returning data\n### ======================\n\n# Energy expenditure in rest from Schofield equations\n# \"Human energy requirements\" - Report of a Joint FAO/WHO/UNU Expert\n# Consultation, Rome, 17–24 October 2001\nmale, female = 1, 2\nconstant_rest_energy, variable_rest_energy = [], []\nfor s, a in zip(sex, age):\n if s == male:\n if a <= 30:\n variable_rest_energy.append(15.057)\n constant_rest_energy.append(692.2)\n elif a <= 60:\n variable_rest_energy.append(11.472)\n constant_rest_energy.append(873.1)\n else:\n variable_rest_energy.append(11.711)\n constant_rest_energy.append(587.7)\n\n if s == female:\n if a <= 30:\n variable_rest_energy.append(14.818)\n constant_rest_energy.append(486.6)\n elif a <= 60:\n variable_rest_energy.append(8.126)\n constant_rest_energy.append(845.6)\n else:\n variable_rest_energy.append(9.082)\n constant_rest_energy.append(658.5)\n\n# Defining cleaned data\nvariables_data = np.array([\n perceived_fatness, stress, weight, sleep, energy_intake, income,\n exercise, discrimination\n ])\n\nthermogenesys = np.array([THERMOGENESYS for i in range(len(select[select==0]))])\nweights_data = np.array([\n ideal_body_image, constant_rest_energy, inv_length, variable_rest_energy,\n AEE, thermogenesys\n ])\n\n# Defining functions to request data from this module\ndef selectOnEthnicity(eth):\n \"\"\"\n Selects all data for one of three ethnic groups\n\n Input: One of three strings, 'NL', 'HIND' or 'MAROK'\n Returns: Tuple of two numpy arrays\n One array with all data on the variables\n One array with all data on some of the weights\n \"\"\"\n ethnicDict = {'NL':1, 'HIND':2, 'MAROK':8}\n num = ethnicDict[eth]\n\n select = [0 if e == num else 1 for e in ethn_data]\n\n return (r(variables_data.T, select),\n r(weights_data.T, select))\n\ndef selectRandom(amount):\n \"\"\"\n Selects all data for a given number of participants, selected at random.\n\n Input: The amount of participants that should be returned\n Returns: Tuple of two numpy arrays\n One array with all data on the variables\n One array with all data on some of the weights\n \"\"\"\n select = np.zeros(len(noData[noData==0]))\n select[amount:] = 1\n np.random.shuffle(select)\n\n np.random.shuffle(noFood_select)\n\n return (r(variables_data.T, select),\n r(weights_data.T, select))\n","repo_name":"jj1993/SEGV","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"75158289795","text":"\nimport pandas as pd\nimport seaborn as sns \nfrom bs4 import BeautifulSoup\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport warnings; warnings.simplefilter('ignore')\nfrom chromedriver_py import binary_path # this will get you the path variable\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium.webdriver.common.by import By\nimport numpy as np\n\n\ndef scrape_site(park):\n service = Service()\n options = webdriver.ChromeOptions()\n driver = webdriver.Chrome(service=service, options=options)\n\n link = \"https://www.nps.gov/\"+park+\"/planyourvisit/directions.htm\"\n driver.get(link)\n #driver.find_element_by_xpath('//*[@id=\"anch_15\"]').click()\n page_source = driver.page_source\n soup = BeautifulSoup(page_source, 'lxml')\n elems = driver.find_elements(By.TAG_NAME, \"a\")\n website_list=[]\n raw_list=[]\n website_content=[]\n raw_soup_list = []\n website_location = []\n\n for elem in elems:\n try:\n if \"planyourvisit\" in str(elem.get_attribute('href')) and str(elem.get_attribute('href')) not in raw_list: #only want plan your visit sites\n z= str(elem.get_attribute('href'))\n raw_list.append(z)\n website_list.append(z)\n except:\n pass\n\n for x in website_list:\n try:\n driver.get(x)\n \n page_location = driver.find_element(By.ID, \"breadcrumbs\")\n website_location.append(page_location.text)\n page_source = driver.page_source\n elems = driver.find_elements(By.TAG_NAME, \"a\")\n soup = BeautifulSoup(page_source, 'lxml')\n raw_content = soup.get_text(strip=True) #all text fields are scraped\n website_content.append(raw_content) #raw content added to list of all content\n raw_soup_list.append(soup)\n except:\n #This means that the webpage doesn't exist\n website_content.append(\"page doesn't exist\")\n raw_soup_list.append(\"page doesn't exist\")\n website_location.append(\"page doesn't exist\")\n\n driver.close() #close driver link at end of scrape\n dict = {'website page': website_list, 'content': website_content, 'soup': raw_soup_list, \"website location\": website_location} #create dataframe for park data\n park_data = pd.DataFrame(dict)\n park_data['park']=park\n # print(\"done with scrape\")\n return park_data\n\n\ndef Traveler_Info_Finder(park):\n \"\"\"\n Find the following fields:\n #Public transportation information\n #Alternative Fueling Stations\n #Bike/Pedestrian Information\n #Driving directions\n \"\"\"\n\n\n AFS_list = []\n Bike_Ped_count = []\n Directions_page_count = []\n Pub_Transit_count = []\n Direction_majorcount = []\n Direction_count = []\n Congestion_count = []\n Travel_dist_count = []\n Travel_dist_other_count=[]\n Accessibility_count=[]\n Parking_count=[]\n Parking_plan_count=[]\n Accessibility_information_count = []\n\n\n Directions_Words = [\"Entrance\",\"Center\",\"street\",\"Visitor\"\n \"Street\",\"parking\",\"directions\",\"Route\",\"Road\",\n \"Interstate\",\"Exit\",\n \"mile\",\"km\",\"ferry\",\"access\", \"Street\",\"Blvd\", \"Hwy\"\n ]\n\n Directions_MajorWords = [\n \"GPS Coordinates\", \"GPS coordinates\", \"GPS device address\", \"GPS address\",\n \"Latitude\",\"Longitude\",\"Street\",\n \"Blvd\", \"Boulevard\", \"Ln.\",\"Rd.\",\"Pl.\",\n \"Hwy\",\"Exit\",\"Interstate\",\"US Highway\", \"U.S. Highway\", \"Indian Head Highway\", \"Turnpike\",\"beltway\",\"Causeway\"\n \"Secondary Route\", \"State Route\", \"I-\",\"State Highway\"\n ]\n\n\n Public_Transit_Words = [\n \"Public Transportation\", \"public transportation\",\"Public transportation\",\n \"bus schedule\", \"Bus schedule\", \"shuttle\", \"shuttles\",\"Shuttle\",\n \"bus stops\", \"buses stop\", \"ferry\",\"transit\",\"Transit\"\n ]\n\n Congestion_Words = [\n \"congestion\",\"Congestion\", \"congested\"\n ]\n\n BicyclePed_Words = [\n \"Bicyclists\",\"bicyclists\",\"cyclists\",\"pedestrians\",\"biking\"\n #,\"biking\",\"Biking\"\n ]\n\n Travel_dist_Words = [\n 'miles'\n ]\n\n Travel_dist_other_Words = [\n 'Places To Go',\"Popular Destinations\"\n ]\n\n Accessibility_Words = [\n \"wheelchair\", \"accessibility\", \"disability\", \"impaired\", \"disabilities\", \"handicap\",\n \"accessible\",\"Wheelchair\"\n ]\n\n Parking_Words = [\n \"parking\", \"Parking\", \"pullout\"\n ]\n\n\n #this will get the number of sites that have keywords\n count=0\n for x in park[\"content\"]:\n try:\n y=0\n if \"Department of Energy\" in x and \"Alternative Fueling Station\" in x:\n y=1\n AFS_list.append(y)\n else:\n y=0\n AFS_list.append(y)\n if any(substring in x for substring in Public_Transit_Words):\n y=1\n Pub_Transit_count.append(y)\n else:\n y=0\n Pub_Transit_count.append(y)\n if any(substring in x for substring in Directions_MajorWords):\n y=1\n Direction_majorcount.append(y)\n else:\n y=0\n Direction_majorcount.append(y)\n if any(substring in x for substring in BicyclePed_Words):\n y=1\n Bike_Ped_count.append(y)\n else:\n y=0\n Bike_Ped_count.append(y)\n if any(substring in x for substring in Congestion_Words):\n y=1\n Congestion_count.append(y)\n else:\n y=0\n Congestion_count.append(y)\n if any(substring in x for substring in Travel_dist_Words):\n y=1\n Travel_dist_count.append(y)\n else:\n y=0\n Travel_dist_count.append(y)\n if any(substring in x for substring in Travel_dist_other_Words):\n y=1\n Travel_dist_other_count.append(y)\n else:\n y=0\n Travel_dist_other_count.append(y)\n if any(substring in x for substring in Parking_Words):\n y=1\n Parking_count.append(y)\n else:\n y=0\n Parking_count.append(y)\n if any(substring in x for substring in Directions_Words):\n y=1\n Directions_page_count.append(y)\n else:\n y=0\n Directions_page_count.append(y)\n except:\n y=0\n AFS_list.append(y)\n Pub_Transit_count.append(y)\n Direction_majorcount.append(y)\n Bike_Ped_count.append(y)\n Congestion_count.append(y)\n Travel_dist_count.append(y)\n Travel_dist_other_count.append(y)\n Parking_count.append(y)\n Directions_page_count.append(y)\n\n\n\n\n#this section will get the total number of times that keywords show up on all sites for a park\n ps = PorterStemmer()\n lem = WordNetLemmatizer()\n\n stemmed_words=[]\n\n for x in park['content']:\n z=0\n z2=0\n z3=0\n\n major = 0\n congestion = 0\n pubtrans=0\n bikeped=0\n try:\n tokenized_word=word_tokenize(x)\n filtered_sent=[]\n stemmed_words=[]\n direction_words_temp = []\n for w in tokenized_word:\n if w not in stopwords:\n filtered_sent.append(w)\n for w in filtered_sent:\n if w in Directions_Words:\n z += 1\n if w in Parking_Words:\n z2 +=1\n if w in Accessibility_Words:\n z3 += 1\n Direction_count.append(z)\n Parking_plan_count.append(z2)\n Accessibility_count.append(z3)\n Accessibility_info = np.where(np.logical_or(z2>2,z3>2),1,0)\n Accessibility_information_count.append(Accessibility_info)\n except:\n Direction_count.append(0)\n Parking_plan_count.append(0)\n Accessibility_count.append(0)\n Accessibility_information_count.append(0)\n\n park[\"Alternative_Fueling_Stations\"]=AFS_list\n park[\"MajorDirections_count\"]=Direction_majorcount\n park[\"Directions_count\"]=Direction_count\n park[\"Directions_page_count\"]=Directions_page_count\n park[\"Public_transportation_information\"]=Pub_Transit_count\n park[\"Congestion_information\"]=Congestion_count\n park[\"Bike_Pedestrian_Information\"]=Bike_Ped_count\n park[\"Travel_dist_information\"]=Travel_dist_count\n park[\"Travel_other_dist_information\"]=Travel_dist_other_count\n park['Accessibility_intro_information']=Accessibility_count\n park['Parking_information']=Parking_count\n park['Parking_experience_information']=Parking_plan_count\n park['Parking_max_on_one_site']=park['Parking_experience_information']\n park['Accessibility_information']=Accessibility_information_count\n\n\n# park['Accessibility_information']=np.where(\n# np.logical_or(park['Accessibility_intro_information']>2,\n## park['Parking_experience_information']>2),1,0)\n\n park_final = park.groupby('park', as_index=False).agg({\n \"MajorDirections_count\": \"sum\",\n \"Directions_count\": \"sum\",\n \"Directions_page_count\":\"sum\",\n \"Public_transportation_information\": \"sum\",\n \"Alternative_Fueling_Stations\":\"sum\",\n \"Bike_Pedestrian_Information\":\"sum\",\n 'Congestion_information':'sum',\n 'Travel_dist_information':'sum',\n 'Travel_other_dist_information':'sum',\n 'Accessibility_information':'sum',\n 'Parking_information':'sum',\n 'Parking_experience_information':'sum',\n 'Parking_max_on_one_site':'max',\n \"website page\":\"count\",\n })\n\n # park_final2 = park.groupby('park')['Directions_word_list'].apply(lambda x: ','.join(x))\n # park_final = park_final.merge(park_final2, on=\"park\")\n\n return park_final\n","repo_name":"VolpeUSDOT/NPS-Emerging-Mobility","sub_path":"NPS digital product content/VE_scraper_functions.py","file_name":"VE_scraper_functions.py","file_ext":"py","file_size_in_byte":10256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9232078839","text":"import pandas as pd\r\n\r\n# Load the dataset into a pandas DataFrame\r\ndata = pd.read_csv('true.csv')\r\ndata = pd.read_csv('fake.csv')\r\n\r\n# Check the structure of the dataset\r\nprint(data.head())\r\nimport re\r\n\r\ndef clean_text(text):\r\n # Remove non-alphanumeric characters and extra whitespaces\r\n text = re.sub(r'[^a-zA-Z\\s]', '', text)\r\n text = ' '.join(text.split())\r\n return text\r\n\r\ndata['text'] = data['text'].apply(clean_text)\r\ndata['text'] = data['text'].str.lower()\r\nfrom nltk.tokenize import word_tokenize\r\n\r\ndata['text'] = data['text'].apply(word_tokenize)\r\nfrom nltk.corpus import stopwords\r\n\r\nstop_words = set(stopwords.words('english'))\r\n\r\ndef remove_stopwords(tokens):\r\n return [word for word in tokens if word not in stop_words]\r\n\r\ndata['text'] = data['text'].apply(remove_stopwords)\r\ndata['label'] = data['label'].map({'fake': 0, 'real': 1})\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX = data['text']\r\ny = data['label']\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\ntfidf_vectorizer = TfidfVectorizer(max_features=5000)\r\nX_train_tfidf = tfidf_vectorizer.fit_transform(X_train)\r\nX_test_tfidf = tfidf_vectorizer.transform(X_test)\r\n\r\n\r\n","repo_name":"amaankhan09/fake_news_phase3","sub_path":"phase3.py","file_name":"phase3.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70451756356","text":"\"\"\"\n322. Coin Change\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n dp = [-1]*(amount+1)\n #Initial dp[0] to 0\n dp[0] = 0\n # try each coins.\n for coin in coins:\n for i in range(1, amount+1):\n # May not reachable\n if i - coin >= 0 and dp[i-coin] != -1:\n if dp[i] == -1:\n dp[i] = dp[i-coin] + 1\n else:\n dp[i] = min([dp[i], dp[i-coin] + 1])\n return dp[-1]\n","repo_name":"dictator-x/practise_as","sub_path":"algorithm/leetCode/0322_coin_change.py","file_name":"0322_coin_change.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37251447014","text":"'''\nCreated on 31 oct. 2019\n\n@author: jean\n'''\n\nfinbucle=True\nsalida=0\n\n\nwhile finbucle:\n print( \" 1.- Naranja \\n\" ,\"2.- Manzana \\n\", \"3.- Pera\")\n opcion=input(\"Introduzca una Opción:\")\n \n \n if opcion == \"1\":\n print (\"has escogido Naranja\")\n print(\"\\n\")\n salida=input(\"Desea Salir? \"+ \"\\n Si = 1 \\n No = 2\")\n \n if salida == \"1\":\n finbucle=False\n else:\n finbucle=True\n \n elif opcion == \"2\":\n print(\"has escogio Manzana\")\n print(\"\\n\")\n salida=input(\"Desea Salir? \"+ \"\\n Si = 1 \\n No = 2\")\n \n if salida == \"1\":\n finbucle=False\n else:\n finbucle=True\n \n elif opcion == \"3\":\n print(\"has escogido Pera\")\n print(\"\\n\")\n salida=input(\"Desea Salir? \"+ \"\\n Si = 1 \\n No = 2\")\n \n if salida == \"1\":\n finbucle=False\n else:\n finbucle=True\n \n else:\n print (\"No has colocado ninguna opcion\")\n print(\"\\n\") \n \n ","repo_name":"jcmloiacono/Python3-Personal","sub_path":"Practicas/whileTrue.py","file_name":"whileTrue.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13356534913","text":"import os\nimport sys\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.externals import joblib\n\nif (len(sys.argv) != 4) :\n\tprint('\\nUsage: python3 sklearn-train.py datasets_path models_path svm_kernel\\n\\nsvm_kernel: one of linear, poly, rbf, sigmoid, or linearsvc \"which trains an OVA linear model\".');\n\tsys.exit()\n\ndataset_path = sys.argv[1]\nmodels_path = sys.argv[2]\nsvm_kernel = sys.argv[3].casefold()\n\nfiles = {}\n# r=root, d=directories, f=files\nfor r, d, f in os.walk(dataset_path):\n for file in f:\n files[file]=os.path.join(r, file)\n\nif not os.path.exists(models_path):\n os.makedirs(models_path)\n\n\nfor file in files:\n file_no_ext = file\n if (file_no_ext.find('.') != -1) :\n file_no_ext = file_no_ext[:file_no_ext.find('.')]\n\n # These are the classifiers that permit training data with sample weights!\n models_names = [svm_kernel]\n \n if svm_kernel == 'linearsvc' :\n classifiers = [LinearSVC()]\n else :\n classifiers = [SVC(kernel=svm_kernel)]\n\n print(\"file name :\", file)\n data = pd.read_csv(files[file], delimiter=r\"\\s+\").dropna().iloc[:200000]\n \n # if records equals to classes number, duplicates the data\n if data.shape[0] == data.iloc[:,0].nunique():\n data = data.append(data)\n\n # words (features) encoding\n from sklearn.preprocessing import OrdinalEncoder\n enc = OrdinalEncoder(dtype=np.int32)\n features = enc.fit_transform(data.iloc[:,2:])\n\n # save the encoder \n enc_name = os.path.join(models_path, 'encoder'+'-'+file_no_ext)[:256]\n if os.path.exists(enc_name):\n continue \n joblib.dump(enc, enc_name)\n\n # target and weights\n target = data.iloc[:,0]\n weights = data.iloc[:,1].values\n \n print(\"Rules(classes) number :\",target.nunique())\n print(\"Words(features) number :\",features.shape[1])\n print(\"Records number :\",features.shape[0])\n print(data.iloc[:target.nunique(),:] , '\\n')\n\n # split to train and test\n X_train, X_test, y_train, y_test, w_train, w_test = \\\n train_test_split(features, target, weights, test_size=.5, random_state=0, stratify=target)\n\n # train models and print their scores\n for name, model in zip(models_names, classifiers):\n print(\"model :\", name, \",\", end = '')\n model.fit(X=X_train, y=y_train, sample_weight=w_train)\n score = model.score(X=X_test, y=y_test, sample_weight=w_test)\n print(\" score =\", score)\n \n # save models\n model_name = os.path.join(models_path, name+'-'+file_no_ext)[:256]\n joblib.dump(model, model_name)\n print(\"----------------------------------------------\\n\")\n","repo_name":"aboelhamd/Weighted-transfer-rules-module","sub_path":"sklearn-train.py","file_name":"sklearn-train.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10468252265","text":"##############################################################\n# Imports\n##############################################################\nimport sys \n\n##############################################################\n# Constants\n##############################################################\nprimitiveInfo = UDPPrimitiveTypeInfo(\n\tname = \"Rectangular Spiral\",\n\tpurpose = \"Create a Rectangular Spiral in XY plane\",\n\tcompany = \"Ansys\",\n\tdate = \"12-5-2012\",\n version = \"2.0\")\n\ndefaultPrimitiveParams = [\n\t \"0.0\", \n \"0.0\",\n \"5.0\",\n \"2\",\n \"2.0\",\n \"1.0\" \n]\nprimitiveParamDefs = [ UDPPrimitiveParameterDefinition2(\n \"Xpos\", \n \"X Position of start point\", \n UnitType.LengthUnit, \n ParamPropType.Value, \n ParamPropFlag.MustBeReal, \n UDPParam(ParamDataType.Double,defaultPrimitiveParams[0])), #1 parameter\n\t \n UDPPrimitiveParameterDefinition2(\n \"Ypos\", \n \"Y Position of start point\", \n UnitType.LengthUnit, \n ParamPropType.Value, \n ParamPropFlag.MustBeReal, \n UDPParam(ParamDataType.Double, defaultPrimitiveParams[1])), #2 parameter\n \n UDPPrimitiveParameterDefinition2(\n \"Dist\", \n \"Distance between turns\", \n UnitType.LengthUnit, \n ParamPropType.Value, \n ParamPropFlag.MustBeReal, \n UDPParam(ParamDataType.Double,defaultPrimitiveParams[2])), #3 parameter\n \n UDPPrimitiveParameterDefinition2(\n \"Turns\", \n \"Number of turns\", \n UnitType.NoUnit, \n ParamPropType.Number, \n ParamPropFlag.MustBeInt, \n UDPParam(ParamDataType.Int, defaultPrimitiveParams[3])), #4 parameter\n \n UDPPrimitiveParameterDefinition2(\n \"Width\", \n \"Width of the spiral\", \n UnitType.LengthUnit, \n ParamPropType.Value, \n ParamPropFlag.MustBeReal, \n UDPParam(ParamDataType.Double, defaultPrimitiveParams[4])), #5 parameter\n \n UDPPrimitiveParameterDefinition2(\n \"Thickness\", \n \"Thickness/height of the spiral\", \n UnitType.LengthUnit, \n ParamPropType.Value, \n ParamPropFlag.MustBeReal, \n UDPParam(ParamDataType.Double, defaultPrimitiveParams[5]))] #6 parameter\n\n\nnumParams = 6\nlengthUnits = \"mm\"\n\nregisteredFaceNames = [ \"InnerEndFace\", \"OuterEndFace\"]\nregisteredEdgeNames = [ \"Inner-B\", \"Inner-L\", \"Inner-T\", \"Inner-R\",\n \"Outer-B\", \"Outer-L\", \"Outer-T\", \"Outer-R\"]\nregisteredVertexNames = [\"Inner-B-L\", \"Inner-L-T\", \"Inner-T-R\", \"Inner-R-B\",\n \"Outer-B-L\", \"Outer-L-T\", \"Outer-T-R\", \"Outer-R-B\"]\n\n\n##############################################################\n# Class Implementation\n##############################################################\nclass UDPExtension(IUDPExtension):\n\n def __init__(self):\n m_StartPt = UDPPosition(0,0,0)\n m_EndPt = UDPPosition(0,0,0)\n\n#----------------------------------------------\n# Interface implementations\n#-----------------------------------------------\n\n def CreatePrimitive2(self, funcLib, paramValues):\n path = self._CreatePath(funcLib, paramValues) \n if (path < 0):\n funcLib.AddMessage(MessageSeverity.ErrorMessage, \"Could not create path\")\n profile = self._CreateProfile(funcLib, paramValues)\n if (profile < 0):\n funcLib.AddMessage(MessageSeverity.ErrorMessage, \"Could not create profile\")\n\n theUDPSweepOptions = UDPSweepOptions(SweepDraftType.RoundDraft, 0.0, 0.0)\n bRet = funcLib.SweepAlongPath(profile, path, theUDPSweepOptions)\n if (bRet == False):\n funcLib.AddMessage(MessageSeverity.ErrorMessage, \"Could not sweep profile along path\") \n self._NameEntities(funcLib, paramValues)\n return bRet\n\n\n def GetPrimitiveTypeInfo(self):\n return primitiveInfo\n\n def GetLengthParameterUnits(self):\n return lengthUnits\n\n def GetPrimitiveParametersDefinition2(self):\n return primitiveParamDefs\n \n def GetRegisteredFaceNames(self):\n return registeredFaceNames\n\n def GetRegisteredEdgeNames(self):\n return registeredEdgeNames\n\n def GetRegisteredVertexNames(self):\n return registeredVertexNames\n \n def AreParameterValuesValid2(self, error, udpParams):\n numTurns = udpParams[3].Data\n if (numTurns < 1):\n error.Add(\"Number of turns cannot be less than 1.\")\n return False\n dist = udpParams[2].Data\n width = udpParams[4].Data\n height = udpParams[5].Data\n \n if (dist <= 0):\n error.Add(\"Distance should be more than 0.\")\n return False\n\n if (width <= 0):\n error.Add(\"Width should be more than 0.\")\n return False\n\n if (height <= 0):\n error.Add(\"Height should be more than 0.\")\n return False\n\n if (dist <= width):\n error.Add(\"Distance between turns should be more than the width.\")\n return False\n return True\n#----------------------------------------------\n# Private functions creating geometry of spiral\n#-----------------------------------------------\n def _CreatePath(self, funcLib, paramValues):\n xStart = paramValues[0].Data\n yStart = paramValues[1].Data\n zStart = 0.0\n\n dist = paramValues[2].Data\n numTurns = paramValues[3].Data\n\n numPoints = 2 + 4*numTurns\n numSegments = numPoints - 1\n\n thePointArrayX = []\n thePointArrayY = []\n thePointArray = []\n for indexPt in xrange(0, numPoints):\n thePointArrayX.append(xStart)\n thePointArrayY.append(yStart)\n\n thePointArrayY[1] = yStart\n thePointArrayX[numPoints-1] = xStart\n\n for indexPt in xrange(0, numTurns):\n xIndex = indexPt*4 + 1\n yIndex = xIndex + 1\n coord = dist*(indexPt + 1)\n\n thePointArrayX[xIndex] = xStart - coord\n thePointArrayX[xIndex + 1] = xStart - coord\n thePointArrayX[xIndex + 2] = xStart + coord\n thePointArrayX[xIndex + 3] = xStart + coord\n\n thePointArrayY[yIndex] = yStart + coord\n thePointArrayY[yIndex + 1] = yStart + coord\n thePointArrayY[yIndex + 2] = yStart - coord\n thePointArrayY[yIndex + 3] = yStart - coord\n\n for indexPt in xrange(0, numPoints):\n thePointArray.append(UDPPosition(thePointArrayX[indexPt], thePointArrayY[indexPt], zStart))\n\n self._m_StartPt = thePointArray[0]\n self._m_EndPt = thePointArray[numPoints - 1]\n\n theSegArray = []\n for indexSeg in xrange(0, numSegments):\n theSegDefinition = UDPPolylineSegmentDefinition(PolylineSegmentType.LineSegment,\n indexSeg,\n 0, 0.0, UDPPosition(0,0,0), CoordinateSystemPlane.XYPlane)\n theSegArray.append(theSegDefinition)\n\n thePolylineDefinition = UDPPolylineDefinition(thePointArray, theSegArray, 0, 0)\n return funcLib.CreatePolyline(thePolylineDefinition)\n\n def _CreateProfile(self, funcLib, paramValues):\n xStart = paramValues[0].Data\n yStart = paramValues[1].Data\n \n width = paramValues[4].Data\n height = paramValues[5].Data\n\n numPoints = 5\n numSegments = numPoints - 1\n \n thePointArray = []\n thePointArray.append(UDPPosition(xStart, yStart - (width/2.0), -height/2.0))\n thePointArray.append(UDPPosition(xStart, yStart + (width/2.0), -height/2.0))\n thePointArray.append(UDPPosition(xStart, yStart + (width/2.0), height/2.0))\n thePointArray.append(UDPPosition(xStart, yStart - (width/2.0), height/2.0))\n thePointArray.append(UDPPosition(xStart, yStart - (width/2.0), -height/2.0))\n\n theSegArray = []\n for indexSeg in xrange(0, numSegments):\n theSegDefinition = UDPPolylineSegmentDefinition(PolylineSegmentType.LineSegment,\n indexSeg,\n 0, 0.0, UDPPosition(0,0,0), CoordinateSystemPlane.XYPlane)\n theSegArray.append(theSegDefinition)\n\n thePolylineDefinition = UDPPolylineDefinition(thePointArray, theSegArray, 1, 1)\n return funcLib.CreatePolyline(thePolylineDefinition)\n\n def _NameEntities(self, funcLib, paramValues):\n # Name faces\n funcLib.NameAFace(self._m_StartPt, registeredFaceNames[0])\n funcLib.NameAFace(self._m_EndPt, registeredFaceNames[1])\n\n width = paramValues[4].Data\n height = paramValues[5].Data\n\n # Inner face edges\n # Inner face edge - Bottom\n posOnEdge = []\n posOnEdge.append(UDPPosition(self._m_StartPt.X, self._m_StartPt.Y, self._m_StartPt.Z - height/2.0))\n # Inner face edge - Left\n posOnEdge.append(UDPPosition(self._m_StartPt.X, self._m_StartPt.Y - width/2.0, self._m_StartPt.Z))\n # Inner face edge - Top\n posOnEdge.append(UDPPosition(self._m_StartPt.X, self._m_StartPt.Y, self._m_StartPt.Z + height/2.0))\n # Inner face edge - Right\n posOnEdge.append(UDPPosition(self._m_StartPt.X, self._m_StartPt.Y + width/2.0, self._m_StartPt.Z))\n # Outer face edges\n # Outer face edge - Bottom\n posOnEdge.append(UDPPosition(self._m_EndPt.X, self._m_EndPt.Y, self._m_EndPt.Z - height/2.0))\n # Outer face edge - Left\n posOnEdge.append(UDPPosition(self._m_EndPt.X, self._m_EndPt.Y - width/2.0, self._m_EndPt.Z))\n # Outer face edge - Top\n posOnEdge.append(UDPPosition(self._m_EndPt.X, self._m_EndPt.Y, self._m_EndPt.Z + height/2.0))\n # Outer face edge - Right\n posOnEdge.append(UDPPosition(self._m_EndPt.X, self._m_EndPt.Y+ width/2.0, self._m_EndPt.Z))\n \n # Inner face vertexs\n # Inner face vertex - (common to Bottom & Left edge)\n posOnVertex = []\n posOnVertex.append(UDPPosition(self._m_StartPt.X, self._m_StartPt.Y - width/2.0, self._m_StartPt.Z - height/2.0))\n # Inner face vertex - (common to Left & Top edge)\n posOnVertex.append(UDPPosition(self._m_StartPt.X, self._m_StartPt.Y - width/2.0, self._m_StartPt.Z + height/2.0))\n # Inner face vertex - (common to Top & Right edge)\n posOnVertex.append(UDPPosition(self._m_StartPt.X, self._m_StartPt.Y + width/2.0, self._m_StartPt.Z + height/2.0))\n # Inner face vertex - (common to Right & Bottom edge)\n posOnVertex.append(UDPPosition(self._m_StartPt.X, self._m_StartPt.Y + width/2.0, self._m_StartPt.Z - height/2.0))\n\n # Outer face vertexs\n # Outer face vertex - (common to Bottom & Left edge)\n posOnVertex.append(UDPPosition(self._m_EndPt.X, self._m_EndPt.Y - width/2.0, self._m_EndPt.Z - height/2.0))\n # Outer face vertex - (common to Left & Top edge)\n posOnVertex.append(UDPPosition(self._m_EndPt.X, self._m_EndPt.Y - width/2.0, self._m_EndPt.Z + height/2.0))\n # Outer face vertex - (common to Top & Right edge)\n posOnVertex.append(UDPPosition(self._m_EndPt.X, self._m_EndPt.Y + width/2.0, self._m_EndPt.Z + height/2.0))\n # Outer face vertex - (common to Right & Bottom edge)\n posOnVertex.append(UDPPosition(self._m_EndPt.X, self._m_EndPt.Y + width/2.0, self._m_EndPt.Z - height/2.0))\n \n for i in xrange(0, 8):\n funcLib.NameAEdge(posOnEdge[i], registeredEdgeNames[i])\n funcLib.NameAVertex(posOnVertex[i], registeredVertexNames[i])\n","repo_name":"juliusgh/Ansys-Maxwell-Scripting","sub_path":"UDP/RectangularSpiral.py","file_name":"RectangularSpiral.py","file_ext":"py","file_size_in_byte":11832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"498079367","text":"class NodoFama:\n\n def __init__(self, usuario, padre=None):\n # No modificar\n self.usuario = usuario\n self.padre = padre\n self.hijo_izquierdo = None\n self.hijo_derecho = None\n\n\nclass ArbolBinario:\n\n def __init__(self):\n # No modificar\n self.raiz = None\n\n def crear_arbol(self, nodos_fama):\n # No modificar\n for nodo in nodos_fama:\n self.insertar_nodo(nodo, self.raiz)\n\n def insertar_nodo(self, nuevo_nodo, padre=None):\n # Completar\n nodo_actual = None\n\n while padre is not None:\n nodo_actual = padre\n if nuevo_nodo.usuario.fama < padre.usuario.fama:\n padre = padre.hijo_izquierdo\n else:\n padre = padre.hijo_derecho\n \n if nodo_actual is None:\n self.raiz = nuevo_nodo\n \n elif nuevo_nodo.usuario.fama < nodo_actual.usuario.fama:\n nodo_actual.hijo_izquierdo = nuevo_nodo\n \n else:\n nodo_actual.hijo_derecho = nuevo_nodo\n \n def buscar_nodo(self, fama, padre=None):\n # Completar\n nodo_actual = self.raiz\n #Caso en que se busque la raiz\n if nodo_actual.usuario.fama == fama:\n return nodo_actual\n\n #Caso en que no se busque la raiz\n while nodo_actual.hijo_izquierdo or nodo_actual.hijo_derecho:\n\n hijo_izquierdo = nodo_actual.hijo_izquierdo\n hijo_derecho = nodo_actual.hijo_derecho\n\n if hijo_izquierdo is not None:\n if hijo_izquierdo.usuario.fama == fama:\n return hijo_izquierdo\n elif nodo_actual.usuario.fama < fama:\n nodo_actual = hijo_izquierdo\n\n elif hijo_derecho is not None:\n if hijo_derecho.usuario.fama == fama:\n return hijo_derecho\n elif nodo_actual.usuario.fama > fama:\n nodo_actual = hijo_derecho\n return None\n\n def print_arbol(self, nodo=None, nivel_indentacion=0):\n # No modificar\n indentacion = \"| \" * nivel_indentacion\n if nodo is None:\n print(\"** DCCelebrity Arbol Binario**\")\n self.print_arbol(self.raiz)\n else:\n print(f\"{indentacion}{nodo.usuario.nombre}: \"\n f\"{nodo.usuario.correo}\")\n if nodo.hijo_izquierdo:\n self.print_arbol(nodo.hijo_izquierdo,\n nivel_indentacion + 1)\n if nodo.hijo_derecho:\n self.print_arbol(nodo.hijo_derecho,\n nivel_indentacion + 1)\n","repo_name":"nicoabarca/progra_avanzada","sub_path":"Actividades/AS4/arbol.py","file_name":"arbol.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17923649067","text":"import contextlib\nimport logging\nimport os\nimport plistlib\nimport shutil\nimport tarfile\nimport tempfile\nimport time\nfrom enum import Enum\nfrom io import BytesIO\nfrom pathlib import Path\nfrom ssl import SSLEOFError\nfrom typing import Optional, Generator\nfrom zipfile import ZipFile\n\nimport requests\nfrom keystone import Ks, KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN\nfrom paramiko.config import SSH_PORT\nfrom paramiko.ssh_exception import SSHException\nfrom plumbum import local\nfrom pyimg4 import IM4P, Compression, IMG4\nfrom pyipsw.pyipsw import get_devices\nfrom pymobiledevice3 import usbmux\nfrom pymobiledevice3.exceptions import NoDeviceConnectedError, IRecvNoDeviceConnectedError, ConnectionFailedError, \\\n MuxException\nfrom pymobiledevice3.irecv import IRecv, Mode\nfrom pymobiledevice3.lockdown import LockdownClient\nfrom pymobiledevice3.restore.ipsw.ipsw import IPSW\nfrom remotezip import RemoteZip\nfrom tqdm import trange\nfrom usb import USBError\n\nfrom pylera1n.common import DEFAULT_STORAGE, PALERA1N_PATH, BOOTLOGO_PATH, wait, OS_VARIANT, \\\n blacktop_ipsw\nfrom pylera1n.exceptions import MissingProductVersionError\nfrom pylera1n.sshclient import SSHClient\n\nlogger = logging.getLogger(__name__)\n\n\nclass KernelcachdStrategy(Enum):\n PongoKpf = 'pongo'\n Normal = 'normal'\n\n\ndef download_gaster(output: Path, os_version: str = os.uname().sysname):\n logger.info('downloading gaster')\n gaster_zip = requests.get(\n f'https://nightly.link/palera1n/gaster/workflows/makefile/main/gaster-{os_version}.zip').content\n gaster_zip = ZipFile(BytesIO(gaster_zip))\n with gaster_zip.open('gaster') as f:\n output.write_bytes(f.read())\n output.chmod(0o755)\n\n\ndef download_pogo(output: Path) -> None:\n logger.info('downloading pogo')\n pogo = requests.get('https://nightly.link/doronz88/Pogo/workflows/build/master/Pogo.zip').content\n pogo = ZipFile(BytesIO(pogo))\n with pogo.open('Pogo.ipa') as f:\n output.write_bytes(f.read())\n\n\n@contextlib.contextmanager\ndef wait_device_ssh() -> Generator[SSHClient, None, None]:\n device = None\n\n logger.info('waiting for device to be recognized via usb')\n\n while device is None:\n # wait for device to boot\n device = usbmux.select_device()\n\n logger.info('waiting for ssh server to start')\n\n sock = None\n while sock is None:\n # wait for ssh server to start\n try:\n sock = device.connect(SSH_PORT)\n except MuxException:\n pass\n\n client = None\n\n while client is None:\n try:\n client = SSHClient(sock)\n except SSHException:\n pass\n\n try:\n yield client\n finally:\n client.close()\n\n\ndef rm_tree(pth: Path) -> None:\n if not pth.exists():\n return\n\n for child in pth.glob('*'):\n if child.is_file():\n child.unlink()\n else:\n rm_tree(child)\n pth.rmdir()\n\n\nRESTORE_COMPONENTS = ('iBSS', 'iBEC', 'RestoreDeviceTree', 'RestoreRamDisk', 'RestoreTrustCache',\n 'RestoreKernelCache', 'RestoreLogo')\n\nBOOT_COMPONENTS = ('iBSS', 'iBEC', 'DeviceTree', 'StaticTrustCache', 'KernelCache', 'RestoreLogo')\n\n\nclass Pylera1n:\n def __init__(self, product_version: str = None, ramdisk_ipsw: str = None, ipsw: str = None,\n devel=False, storage: Path = DEFAULT_STORAGE):\n storage.mkdir(parents=True, exist_ok=True)\n self._storage = storage\n\n gaster_path = storage / 'gaster'\n if not gaster_path.exists():\n download_gaster(gaster_path)\n\n pogo_path = storage / 'Pogo.ipa'\n if not pogo_path.exists():\n download_pogo(pogo_path)\n\n self._board_id = None\n self._chip_id = None\n self._hardware_model: Optional[str] = None\n self._product_type = None\n self._product_version = product_version\n self._ecid: Optional[str] = None\n self._gaster = local[str(gaster_path)]\n self._hdiutil = None if os.uname().sysname != 'Darwin' else local['hdiutil']\n self._ramdisk_ipsw_path = ramdisk_ipsw\n self._ramdisk_ipsw: Optional[IPSW] = None\n self._ipsw_path = ipsw\n self._boot_ipsw: Optional[IPSW] = None\n self._devel = devel\n self._tips = ZipFile(pogo_path)\n self._init_device_info()\n\n if self._product_version is None:\n raise MissingProductVersionError()\n\n self._kernel_patch_file = Path(\n __file__).parent / 'kernel_patches' / f'{self._product_type}-{self._product_version}.patch'\n\n shsh_blob_dir = self._storage / 'shsh'\n shsh_blob_dir.mkdir(exist_ok=True, parents=True)\n self._storage_shsh_blob = shsh_blob_dir / f'{self._ecid}-{self._hardware_model}-{self._product_version}.der'\n\n self._storage_ramdisk_dir = self._storage / 'ramdisk' / self._hardware_model\n\n self._storage_boot_dir = self._storage / 'boot' / self._hardware_model / self._product_version\n self._storage_boot_dir.mkdir(exist_ok=True, parents=True)\n\n @property\n def in_dfu(self) -> bool:\n try:\n with LockdownClient():\n return False\n except ConnectionFailedError:\n # the device is in the midst of a reboot\n return False\n except NoDeviceConnectedError:\n with IRecv(timeout=1) as irecv:\n return irecv.mode == Mode.DFU_MODE\n\n def jailbreak(self, recreate_ramdisk=False, recreate_boot=False, install_pogo=False, fsboot=False,\n fakefs=False) -> None:\n logger.info('jailbreaking')\n\n if recreate_boot:\n rm_tree(self._storage_boot_dir)\n\n kernelcachd = None\n if fsboot:\n kernelcachd = KernelcachdStrategy.PongoKpf\n\n if not self._storage_shsh_blob.exists() or recreate_ramdisk:\n logger.info('creating ramdisk')\n self.boot_ramdisk(recreate_ramdisk)\n self.perform_ramdisk_ssh_operations(dump_blobs=True, install_pogo=install_pogo,\n enable_development_options=self._devel is True, reboot=True,\n kernelcachd=kernelcachd, fakefs=fakefs)\n\n self.enter_dfu()\n\n if fsboot:\n self._boot_boot_using_fsboot(fakefs=fakefs)\n else:\n self._boot_boot_using_bootx(fakefs=fakefs)\n\n def boot_ramdisk(self, recreate_ramdisk=False) -> None:\n \"\"\" boot into ramdisk \"\"\"\n logger.info('waiting for device to enter DFU')\n\n if recreate_ramdisk:\n rm_tree(self._storage_ramdisk_dir)\n\n self.enter_dfu()\n self._boot_ramdisk()\n\n @property\n def has_prepared_ramdisk(self) -> bool:\n for component in RESTORE_COMPONENTS:\n if not ((self._storage_ramdisk_dir / component).with_suffix('.img4').exists()):\n return False\n return True\n\n @property\n def has_prepared_boot(self) -> bool:\n for component in BOOT_COMPONENTS:\n if not ((self._storage_boot_dir / component).with_suffix('.img4').exists()):\n return False\n return True\n\n def perform_ramdisk_ssh_operations(self, dump_blobs=False, install_pogo=False,\n enable_development_options=False, kernelcachd: KernelcachdStrategy = None,\n auto_boot=False, reboot=False, fakefs=False) -> None:\n \"\"\" create blobs, install pogo and patch nvram if on non-rootless \"\"\"\n with wait_device_ssh() as ssh:\n ssh.mount_filesystems()\n\n if dump_blobs:\n logger.info(f'saving apticket to: {self._storage_shsh_blob}')\n self._storage_shsh_blob.write_bytes(ssh.apticket)\n\n if install_pogo:\n ssh.install_pogo()\n\n if enable_development_options:\n ssh.enable_development_options()\n\n if fakefs:\n ssh.create_fakefs()\n\n if kernelcachd == KernelcachdStrategy.Normal:\n logger.info('placing kernelcachd')\n remote_kernelcachd = ssh.active_preboot / 'System' / 'Library' / 'Caches' / 'com.apple.kernelcaches' / 'kernelcachd'\n ssh.put_file(\n self._get_boot_component('KernelCache', basename='krnl', is_restore=False, cache=False),\n remote_kernelcachd)\n ssh.chmod(remote_kernelcachd, 0o644)\n\n if kernelcachd == KernelcachdStrategy.PongoKpf:\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_dir = Path(temp_dir)\n\n local_kernelcache = temp_dir / 'KernelCache.im4p'\n build_identity = self.boot_ipsw.build_manifest.get_build_identity(self._hardware_model)\n component_path = build_identity.get_component_path('KernelCache')\n local_kernelcache.write_bytes(self.boot_ipsw.read(component_path))\n ssh.place_kernelcachd_using_pongo_kpf(local_kernelcache)\n\n ssh.auto_boot = auto_boot\n\n if reboot:\n ssh.reboot()\n\n @property\n def ramdisk_ipsw(self) -> IPSW:\n if self._ramdisk_ipsw is None:\n self._init_ramdisk_ipsw()\n return self._ramdisk_ipsw\n\n @property\n def _ramdisk_im4m(self) -> bytes:\n with open(PALERA1N_PATH / 'ramdisk' / 'shsh' / f'0x{self._chip_id:x}.shsh', 'rb') as costume_ramdisk:\n return plistlib.load(costume_ramdisk)['ApImg4Ticket']\n\n @property\n def _ramdisk_restore_logo(self) -> Path:\n img4_file = self._storage_ramdisk_dir / 'RestoreLogo.img4'\n if img4_file.exists():\n return img4_file\n\n logger.info('creating restore logo (ramdisk)')\n\n im4p_file = IM4P(fourcc='logo', payload=BOOTLOGO_PATH.read_bytes(), description='EmbeddedImages-121.100.10')\n img4_file.write_bytes(IMG4(im4p=im4p_file, im4m=self._ramdisk_im4m).output())\n\n return img4_file\n\n def _get_ramdisk_component(self, component: str) -> Path:\n img4_file = (self._storage_ramdisk_dir / component).with_suffix('.img4')\n if img4_file.exists():\n return img4_file\n\n logger.info(f'creating {component} (ramdisk)')\n\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_dir = Path(temp_dir)\n\n build_identity = self.ramdisk_ipsw.build_manifest.get_build_identity(self._hardware_model)\n\n im4p_file = temp_dir / component\n im4p_file.write_bytes(self.ramdisk_ipsw.read(build_identity.get_component_path(component)))\n self._patch_ramdisk_component(component, im4p_file, img4_file)\n return img4_file\n\n def _patch_ramdisk_component(self, component: str, im4p_file: Path, img4_file: Path) -> None:\n {\n 'iBSS': self._patch_ramdisk_ibss,\n 'iBEC': self._patch_ramdisk_ibec,\n 'RestoreKernelCache': self._patch_ramdisk_restore_kernel_cache,\n 'RestoreRamDisk': self._patch_ramdisk_restore_ramdisk,\n 'RestoreDeviceTree': self._patch_ramdisk_device_tree,\n 'RestoreTrustCache': self._patch_ramdisk_restore_trust_cache,\n }[component](im4p_file, img4_file)\n\n def _patch_ramdisk_ibss(self, im4p_file: Path, img4_file: Path) -> None:\n decrypted_iboot = im4p_file.with_suffix('.dec')\n self.decrypt(im4p_file, decrypted_iboot)\n patched_iboot_file = im4p_file.with_suffix('.patched')\n self.patch_iboot_component(decrypted_iboot, patched_iboot_file)\n\n im4p = IM4P(payload=patched_iboot_file.read_bytes(), fourcc='ibss')\n img4 = IMG4(im4p=im4p, im4m=self._ramdisk_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_ramdisk_ibec(self, im4p_file: Path, img4_file: Path) -> None:\n decrypted_iboot = im4p_file.with_suffix('.dec')\n self.decrypt(im4p_file, decrypted_iboot)\n patched_iboot_file = im4p_file.with_suffix('.patched')\n\n boot_args = 'rd=md0 debug=0x2014e -v wdt=-1 '\n if self._chip_id in (0x8960, 0x7000, 0x7001):\n # TODO: macos variant?\n boot_args += '-restore'\n self.patch_iboot_component(decrypted_iboot, patched_iboot_file, boot_args)\n\n im4p = IM4P(payload=patched_iboot_file.read_bytes(), fourcc='ibec')\n img4 = IMG4(im4p=im4p, im4m=self._ramdisk_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_ramdisk_restore_kernel_cache(self, im4p_file: Path, img4_file: Path) -> None:\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_dir = Path(temp_dir)\n kcache_raw = temp_dir / 'kcache.raw'\n kcache_patched = temp_dir / 'kcache.patched'\n im4p_payload = IM4P(im4p_file.read_bytes()).payload\n im4p_payload.decompress()\n kcache_raw.write_bytes(im4p_payload.output().data)\n self.patch_kernelcache(kcache_raw, kcache_patched)\n\n im4p = IM4P(fourcc='rkrn', payload=kcache_patched.read_bytes())\n im4p.payload.compress(Compression.LZSS)\n img4 = IMG4(im4p=im4p, im4m=self._ramdisk_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_ramdisk_restore_ramdisk(self, im4p_file: Path, img4_file: Path) -> None:\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_dir = Path(temp_dir)\n dmg = temp_dir / 'ramdisk.dmg'\n im4p_file = IM4P(im4p_file.read_bytes())\n dmg.write_bytes(im4p_file.payload.output().data)\n\n if self._hdiutil is None:\n raise NotImplementedError('missing hdiutil')\n self._hdiutil('resize', '-size', '256MB', dmg)\n\n mountpoint = temp_dir / 'sshrd'\n mountpoint.mkdir(exist_ok=True, parents=True)\n self._hdiutil('attach', '-mountpoint', mountpoint, dmg)\n\n with tarfile.open(PALERA1N_PATH / 'ramdisk' / 'other' / 'ramdisk.tar.gz') as costum_ramdisk:\n costum_ramdisk.extractall(mountpoint)\n\n logger.info('extracting Pogo.app/* contents into /usr/local/bin/loader.app/*')\n local_app = temp_dir / 'Pogo'\n self._tips.extractall(local_app)\n loader_app = mountpoint / 'usr' / 'local' / 'bin' / 'loader.app'\n try:\n shutil.rmtree(mountpoint / loader_app)\n except FileNotFoundError:\n pass\n shutil.copytree(local_app / 'Payload' / 'Pogo.app', loader_app)\n\n logger.info('renaming /usr/local/bin/loader.app/Pogo -> /usr/local/bin/loader.app/Tips')\n shutil.move(loader_app / 'Pogo', loader_app / 'Tips')\n\n self._hdiutil('detach', '-force', mountpoint)\n self._hdiutil('resize', '-sectors', 'min', dmg)\n\n im4p = IM4P(payload=dmg.read_bytes(), fourcc='rdsk')\n img4 = IMG4(im4p=im4p, im4m=self._ramdisk_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_ramdisk_device_tree(self, im4p_file: Path, img4_file: Path) -> None:\n im4p = IM4P(im4p_file.read_bytes())\n im4p.fourcc = 'rdtr'\n img4 = IMG4(im4p=im4p, im4m=self._ramdisk_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_ramdisk_restore_trust_cache(self, im4p_file: Path, img4_file: Path) -> None:\n img4 = IMG4(im4p=im4p_file.read_bytes(), im4m=self._ramdisk_im4m)\n img4_file.write_bytes(img4.output())\n\n @property\n def boot_ipsw(self) -> IPSW:\n if self._boot_ipsw is None:\n self._init_boot_ipsw()\n return self._boot_ipsw\n\n @property\n def _boot_im4m(self) -> bytes:\n return self._storage_shsh_blob.read_bytes()\n\n @property\n def _boot_restore_logo(self) -> Path:\n img4_file = self._storage_boot_dir / 'RestoreLogo.img4'\n if img4_file.exists():\n return img4_file\n\n logger.info('creating restore logo (boot)')\n\n im4p_file = IM4P(fourcc='logo', payload=BOOTLOGO_PATH.read_bytes(), description='EmbeddedImages-121.100.10')\n img4_file.write_bytes(IMG4(im4p=im4p_file, im4m=self._boot_im4m).output())\n\n return img4_file\n\n def _get_boot_component(self, component: str, basename: str = None, cache=True, **kwargs) -> Path:\n if basename is None:\n img4_file = (self._storage_boot_dir / component).with_suffix('.img4')\n else:\n img4_file = (self._storage_boot_dir / basename).with_suffix('.img4')\n\n if cache and img4_file.exists():\n return img4_file\n\n logger.info(f'creating {component} (boot)')\n\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_dir = Path(temp_dir)\n\n build_identity = self.boot_ipsw.build_manifest.get_build_identity(self._hardware_model)\n component_path = build_identity.get_component_path(component)\n\n if self._devel:\n if component_path in ('iBSS', 'iBEC'):\n component_path = component_path.replace('RELEASE', 'DEVELOPMENT')\n elif component_path == 'KernelCache':\n component_path = component_path.replace('release', 'development')\n\n im4p_file = temp_dir / component\n im4p_file.write_bytes(self.boot_ipsw.read(component_path))\n self._patch_boot_component(component, im4p_file, img4_file, **kwargs)\n return img4_file\n\n def _patch_boot_component(self, component: str, im4p_file: Path, img4_file: Path, **kwargs) -> None:\n {\n 'iBSS': self._patch_boot_ibss,\n 'iBEC': self._patch_boot_ibec,\n 'iBoot': self._patch_boot_iboot,\n 'KernelCache': self._patch_boot_kernel_cache,\n 'DeviceTree': self._patch_boot_device_tree,\n 'StaticTrustCache': self._patch_boot_static_trust_cache,\n }[component](im4p_file, img4_file, **kwargs)\n\n def _patch_boot_ibss(self, im4p_file: Path, img4_file: Path) -> None:\n iboot_dec_file = im4p_file.with_suffix('.dec')\n patched_iboot_file = im4p_file.with_suffix('.patched')\n boot_args = None\n self.decrypt(im4p_file, iboot_dec_file)\n self.patch_iboot_component(iboot_dec_file, patched_iboot_file, boot_args)\n\n im4p = IM4P(payload=patched_iboot_file.read_bytes(), fourcc='ibss')\n img4 = IMG4(im4p=im4p, im4m=self._boot_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_boot_ibec(self, im4p_file: Path, img4_file: Path) -> None:\n iboot_dec_file = im4p_file.with_suffix('.dec')\n patched_iboot_file = im4p_file.with_suffix('.patched')\n self.decrypt(im4p_file, iboot_dec_file)\n self.patch_iboot_component(iboot_dec_file, patched_iboot_file,\n '-v keepsyms=1 debug=0x2014e panic-wait-forever=1')\n im4p = IM4P(payload=patched_iboot_file.read_bytes(), fourcc='ibec')\n img4 = IMG4(im4p=im4p, im4m=self._boot_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_boot_iboot(self, im4p_file: Path, img4_file: Path, fakefs=False) -> None:\n iboot_dec_file = im4p_file.with_suffix('.dec')\n patched_iboot_file = im4p_file.with_suffix('.patched')\n self.decrypt(im4p_file, iboot_dec_file)\n boot_args = '-v keepsyms=1 debug=0x2014e'\n if fakefs:\n boot_args += ' rd=disk0s1s8'\n self.patch_iboot_component(iboot_dec_file, patched_iboot_file, boot_args, fsboot=True)\n patched_iboot_file.write_bytes(patched_iboot_file.read_bytes().replace(b'/kernelcache', b'/kernelcachd'))\n if 0x8010 <= self._chip_id <= 0x801f:\n fourcc = 'ibss'\n else:\n fourcc = 'ibec'\n im4p = IM4P(payload=patched_iboot_file.read_bytes(), fourcc=fourcc, description='Unknown')\n img4 = IMG4(im4p=im4p, im4m=self._boot_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_boot_kernel_cache(self, im4p_file: Path, img4_file: Path, is_restore=True) -> None:\n with tempfile.TemporaryDirectory() as temp_dir:\n temp_dir = Path(temp_dir)\n kcache_raw_file = temp_dir / 'kcache.raw'\n kernelcache_buf = im4p_file.read_bytes()\n kcache_patched_file = temp_dir / 'kcache.patched'\n fourcc = 'rkrn' if is_restore else 'krnl'\n\n im4p = IM4P(kernelcache_buf)\n kpp = im4p.payload.extra\n im4p.payload.decompress()\n kcache_raw = im4p.payload.output().data\n\n kcache_raw_file.write_bytes(kcache_raw)\n\n if self._devel:\n im4p = IM4P(kernelcache_buf)\n im4p.fourcc = fourcc\n img4 = IMG4(im4p=im4p, im4m=self._boot_im4m)\n img4_file.write_bytes(img4.output())\n else:\n if self._kernel_patch_file.exists():\n if kcache_raw.startswith(b'\\xca\\xfe\\xba\\xbe'):\n # trim FAT image header\n kcache_raw = kcache_raw[0x1c:]\n\n logger.debug(f'using kernel patch file: {self._kernel_patch_file}')\n kcache_patched = self.patch(kcache_raw, self._kernel_patch_file.read_text())\n else:\n self.patch_kernelcache(kcache_raw_file, kcache_patched_file, flag_o=True)\n kcache_patched = kcache_patched_file.read_bytes()\n\n im4p = IM4P(fourcc=fourcc, payload=kcache_patched)\n im4p.payload.compress(Compression.LZSS)\n im4p.payload.extra = kpp\n\n img4 = IMG4(im4p=im4p, im4m=self._boot_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_boot_device_tree(self, im4p_file: Path, img4_file: Path) -> None:\n im4p = IM4P(im4p_file.read_bytes())\n im4p.fourcc = 'rdtr'\n img4 = IMG4(im4p=im4p, im4m=self._boot_im4m)\n img4_file.write_bytes(img4.output())\n\n def _patch_boot_static_trust_cache(self, im4p_file: Path, img4_file: Path) -> None:\n im4p = IM4P(im4p_file.read_bytes())\n im4p.fourcc = 'rtsc'\n img4 = IMG4(im4p=im4p, im4m=self._boot_im4m)\n img4_file.write_bytes(img4.output())\n\n def _boot_boot_using_bootx(self, fakefs=False) -> None:\n self._storage_boot_dir.mkdir(exist_ok=True, parents=True)\n\n logger.info('booting patched boot image (bootx)')\n\n self._gaster_pwn()\n\n ibss = self._get_boot_component('iBSS')\n\n basename = 'iBEC'\n if fakefs:\n basename += '-fakefs'\n basename += '.img4'\n ibec = self._get_boot_component('iBEC', basename=basename)\n\n self._gaster_reset()\n\n restore_logo = self._boot_restore_logo\n device_tree = self._get_boot_component('DeviceTree')\n trust_cache = self._get_boot_component('StaticTrustCache')\n kernel_cache = self._get_boot_component('KernelCache')\n\n with IRecv() as irecv:\n assert irecv.mode == Mode.DFU_MODE\n logger.info('sending iBSS')\n irecv.send_buffer(ibss.read_bytes())\n\n try:\n with IRecv() as irecv:\n assert irecv.mode == Mode.RECOVERY_MODE_2\n\n logger.info('sending iBEC')\n irecv.send_buffer(ibec.read_bytes())\n\n if self._chip_id in (0x8010, 0x8015, 0x8011, 0x8012):\n irecv.send_command('go', b_request=1)\n irecv.ctrl_transfer(0x21, 1)\n except USBError:\n # device will reboot and cause a broken pipe\n pass\n\n time.sleep(1)\n\n with IRecv() as irecv:\n logger.info('sending RestoreLogo')\n irecv.send_buffer(restore_logo.read_bytes())\n irecv.send_command('setpicture 0x1')\n\n logger.info('sending DeviceTree')\n irecv.send_buffer(device_tree.read_bytes())\n irecv.send_command('devicetree')\n\n logger.info('sending StaticTrustCache')\n irecv.send_buffer(trust_cache.read_bytes())\n irecv.send_command('firmware')\n\n logger.info('sending KernelCache')\n irecv.send_buffer(kernel_cache.read_bytes())\n try:\n logger.info('booting into ramdisk (boot image)')\n irecv.send_command('bootx', b_request=1)\n except USBError:\n pass\n\n def _boot_boot_using_fsboot(self, fakefs=False) -> None:\n self._storage_boot_dir.mkdir(exist_ok=True, parents=True)\n\n logger.info(f'booting patched boot image (fsboot) (fakefs: {fakefs})')\n\n self._gaster_pwn()\n\n ibss = None\n\n if not (0x8010 <= self._chip_id <= 0x801f):\n ibss = self._get_boot_component('iBSS')\n\n basename = 'iBoot'\n if fakefs:\n basename += '-fakefs'\n basename += '.img4'\n iboot = self._get_boot_component('iBoot', basename=basename, fakefs=fakefs)\n\n self._gaster_reset()\n\n if ibss is not None:\n with IRecv() as irecv:\n logger.info('sending iBSS')\n irecv.send_buffer(ibss.read_bytes())\n time.sleep(1)\n\n with IRecv() as irecv:\n assert irecv.mode == Mode.RECOVERY_MODE_2\n\n with IRecv() as irecv:\n logger.info('sending iBoot')\n irecv.send_buffer(iboot.read_bytes())\n time.sleep(1)\n\n with IRecv() as irecv:\n try:\n logger.info('booting into fs')\n irecv.send_command('fsboot')\n except USBError:\n pass\n\n def _boot_ramdisk(self) -> None:\n self._storage_ramdisk_dir.mkdir(exist_ok=True, parents=True)\n\n logger.info('booting ramdisk')\n\n self._gaster_pwn()\n\n ibss = self._get_ramdisk_component('iBSS')\n ibec = self._get_ramdisk_component('iBEC')\n\n self._gaster_reset()\n\n restore_logo = self._ramdisk_restore_logo\n ramdisk = self._get_ramdisk_component('RestoreRamDisk')\n device_tree = self._get_ramdisk_component('RestoreDeviceTree')\n trust_cache = self._get_ramdisk_component('RestoreTrustCache')\n kernel_cache = self._get_ramdisk_component('RestoreKernelCache')\n\n with IRecv() as irecv:\n assert irecv.mode == Mode.DFU_MODE\n logger.info('sending iBSS')\n irecv.send_buffer(ibss.read_bytes())\n time.sleep(1)\n\n try:\n with IRecv() as irecv:\n assert irecv.mode == Mode.RECOVERY_MODE_2\n logger.info('sending iBEC')\n irecv.send_buffer(ibec.read_bytes())\n\n if self._chip_id in (0x8010, 0x8015, 0x8011, 0x8012):\n irecv.send_command('go', b_request=1)\n except USBError:\n # device will reboot and cause a broken pipe\n pass\n\n logger.info('Waiting for iBEC to load')\n wait(3)\n\n with IRecv() as irecv:\n logger.info('sending RestoreLogo')\n irecv.send_buffer(restore_logo.read_bytes())\n irecv.send_command('setpicture 0x1')\n\n logger.info('sending RestoreRamDisk')\n irecv.send_buffer(ramdisk.read_bytes())\n irecv.send_command('ramdisk')\n\n time.sleep(2)\n\n logger.info('sending RestoreDeviceTree')\n irecv.send_buffer(device_tree.read_bytes())\n irecv.send_command('devicetree')\n\n logger.info('sending RestoreTrustCache')\n irecv.send_buffer(trust_cache.read_bytes())\n irecv.send_command('firmware')\n\n logger.info('sending RestoreKernelCache')\n irecv.send_buffer(kernel_cache.read_bytes())\n try:\n logger.info('booting into ramdisk (ramdisk image)')\n irecv.send_command('bootx', b_request=1)\n except USBError:\n pass\n\n @staticmethod\n def reboot() -> None:\n try:\n with LockdownClient() as lockdown:\n lockdown.enter_recovery()\n except (NoDeviceConnectedError, SSLEOFError):\n with IRecv(timeout=3) as irecv:\n irecv.reboot()\n\n def _gaster_pwn(self) -> None:\n logger.info('gaster pwn')\n self._gaster('pwn')\n time.sleep(1)\n\n def _gaster_reset(self) -> None:\n logger.info('gaster reset')\n self._gaster('reset')\n time.sleep(1)\n\n def decrypt(self, payload: Path, output: Path) -> None:\n self._gaster('decrypt', payload, output)\n\n @staticmethod\n def patch_iboot_component(iboot: Path, output: Path, boot_args: str = None, fsboot=False) -> None:\n executable = str(PALERA1N_PATH / 'binaries' / OS_VARIANT / 'iBoot64Patcher')\n args = [iboot, output]\n\n if boot_args is not None:\n args += ['-b', boot_args]\n\n if fsboot:\n args += ['-f']\n\n local[executable](args)\n\n @staticmethod\n def patch_kernelcache(kernelcache: Path, output: Path, flag_o=False) -> None:\n args = [kernelcache, output, '-a']\n if flag_o:\n args.append('-o')\n local[str(PALERA1N_PATH / 'binaries' / OS_VARIANT / 'Kernel64Patcher')](args)\n\n @staticmethod\n def create_kernelcache_patch_file(original: bytes, patched: bytes, output: Path) -> None:\n result = '#AMFI\\n\\n'\n for i, _ in enumerate(original):\n if original[i] != patched[i]:\n result += f'{hex(i)} {hex(original[i])} {hex(patched[i])}\\n'\n output.write_text(result)\n\n @staticmethod\n def patch(buf: bytes, patches: str) -> bytes:\n patched = buf\n\n with tempfile.NamedTemporaryFile('wb+', delete=False) as f:\n f.write(buf)\n file = Path(f.name)\n\n for line in patches.splitlines():\n if ':' not in line:\n continue\n\n if line.startswith(';') or line.startswith('#'):\n continue\n\n line = line.strip()\n\n address, patch = line.split(':', 1)\n retcode, stdout, stderr = blacktop_ipsw['macho', 'a2o', str(file), address].run()\n offset = int(stderr.split('hex=', 1)[1].split(' ', 1)[0], 16)\n\n ks = Ks(KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN)\n encoding, count = ks.asm(patch)\n encoding = bytes(encoding)\n patched = patched[:offset] + encoding + patched[offset + len(encoding):]\n\n file.unlink()\n\n return patched\n\n def enter_dfu(self) -> None:\n while not self.in_dfu:\n print('Prepare to do the following to start enter DFU mode:')\n print(' - Hold VolDown+Power for 4 seconds (Start only when prompted to!)')\n print(' - Keep holding VolDown for up to 10 seconds')\n input('HIT RETURN TO START> ')\n self.reboot()\n\n print('[1] Hold VolDown+Power for 4 seconds')\n wait(4)\n print('[2] Keep holding VolDown for up to 10 seconds')\n for _ in trange(10):\n try:\n with IRecv(timeout=1):\n pass\n except IRecvNoDeviceConnectedError:\n continue\n if self.in_dfu:\n logger.info('device entered DFU')\n return\n logger.error('Failed to enter DFU')\n\n def __repr__(self) -> str:\n return f'<{self.__class__.__name__} PRODUCT-TYPE:{self._product_type} BOARD-ID:0x{self._board_id:x} ' \\\n f'CHIP-ID:0x{self._chip_id:x} MODEL:{self._hardware_model} VERSION:{self._product_version}>'\n\n def _init_device_info(self) -> None:\n try:\n with LockdownClient() as lockdown:\n self._product_version = lockdown.product_version\n self._board_id = lockdown.board_id\n self._chip_id = lockdown.chip_id\n self._hardware_model = lockdown.hardware_model\n self._product_type = lockdown.product_type\n self._ecid = lockdown.ecid\n\n logger.info('entering recovery')\n lockdown.enter_recovery()\n wait(3)\n except (NoDeviceConnectedError, ConnectionFailedError):\n with IRecv(timeout=1) as irecv:\n self._board_id = irecv.board_id\n self._chip_id = irecv.chip_id\n self._hardware_model = irecv.hardware_model\n self._product_type = irecv.product_type\n self._ecid = irecv.ecid\n logger.info(f'init with device: {self}')\n\n def _init_ramdisk_ipsw(self) -> None:\n if self._ramdisk_ipsw_path is None:\n devices = list(get_devices(f\"'{self._product_type}' == device and '15.6.1' == version\"))\n assert len(devices) == 1\n url = devices[0]['url']\n\n logger.info(f'using remote ipsw: {url}')\n self._ramdisk_ipsw = IPSW(RemoteZip(url))\n else:\n self._ramdisk_ipsw = IPSW(ZipFile(self._ramdisk_ipsw_path))\n\n def _init_boot_ipsw(self) -> None:\n if self._ipsw_path is None:\n devices = list(get_devices(f\"'{self._product_type}' == device and '{self._product_version}' == version\"))\n assert len(devices) == 1\n self._boot_ipsw = IPSW(RemoteZip(devices[0]['url']))\n else:\n self._boot_ipsw = IPSW(ZipFile(self._ipsw_path))\n","repo_name":"doronz88/pylera1n","sub_path":"pylera1n/pylera1n.py","file_name":"pylera1n.py","file_ext":"py","file_size_in_byte":33236,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"}
+{"seq_id":"17792794018","text":"from django import forms\nfrom .models import Ad, Choice\n\n\nclass AdForm(forms.ModelForm):\n\tchoice = forms.ChoiceField(choices=Choice, widget=forms.RadioSelect())\n\temail = forms.EmailField(\n\t\tlabel='Email',\n\t\trequired=True, \n\t\twidget=forms.EmailInput(\n\t\t\tattrs={\n\t\t\t\t'class': 'validate',\n\t\t\t\t'required': 'True',\n\t\t\t}\n\t\t)\n\t)\n\n\tclass Meta:\n\t\tmodel = Ad\n\t\tfields = ('start_date', 'end_date', 'choice', 'name', 'email', 'image', 'title', 'web')","repo_name":"helpbearpark/baumtory","sub_path":"ads/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23458996691","text":"import csv\nfrom collections import Counter\nfrom math import ceil\n\nfin = open('../Downloads/B-large.in','r');\n#fin = open('input.in','r');\nfout = open('output.out','w');\n\ndata = csv.reader(fin, delimiter=' ')\nT = int(next(data)[0]);\n\nfor k in range(T):\n print('{} of {}'.format(k, T));\n D = int(next(data)[0]);\n P = next(data);\n P = [int(p) for p in P];\n N = Counter(P)\n #Create structure with all (P_j, N_j);\n Pmax = max(N.keys());\n tmin = Pmax;\n n = Pmax;\n #print('tmin = {}'.format(tmin));\n for n in range(Pmax-1,0,-1):\n t = sum([Nj*(ceil(Pj/n)-1) for Pj, Nj in zip(N.keys(), N.values())])+n;\n #print('sum + n = {}+{} = {}'.format(sum([Nj*(ceil(Pj/n)-1) for Pj, Nj in zip(N.keys(), N.values())]), n, t));\n if tloaded %d examples for class: %s' % (len(faces), subdir))\n X.extend(faces)\n y.extend(labels)\n \n #Kaka: 回傳 臉(array), 標籤人名(aray), 所有資料夾(list), 有問題的資料夾(list)\n return asarray(X), asarray(y), list_file, list_except_file\n\n# get_embedding(model: 模型, face_pixels: 人臉影像)\n# 功能: 人臉嵌入\ndef get_embedding(model, face_pixels):\n # scale pixel values\n face_pixels = face_pixels.astype('float32')\n # standardize pixel values across channels (global)\n mean, std = face_pixels.mean(), face_pixels.std()\n face_pixels = (face_pixels - mean) / std\n # transform face into one sample\n samples = expand_dims(face_pixels, axis=0)\n # make prediction to get embedding\n yhat = model.predict(samples)\n\n return yhat[0]\n\n# recognize(img_path: 影像路徑)\n# 功能: 人臉辨認\ndef recognize(img_path):\n\n #初始化\n classification_res_arr = [] \n class_index_arr = []\n class_probability_arr = []\n is_LOW_CONFIDENCE = True\n\n #Kaka: 載入圖檔\n marked_image = array(Image.open(img_path))\n\n #Kaka: 擷取人臉\n face, pos_arr = extract_face(img_path)\n if len(face) == 0:\n return face_recognition_err.NO_FACE_INFO, [], marked_image\n\n for i in range(len(face)):\n\n #Kaka: 人臉嵌入\n embedding = get_embedding(facenet_model, face[i])\n\n samples = expand_dims(embedding, axis = 0)\n\n #Kaka: 模型預測\n yhat_class = my_model.predict(samples)\n yhat_prob = my_model.predict_proba(samples)\n\n #Kaka: 取得預測結果\n class_index = labels_dict[str(yhat_class[0])] #人名\n class_probability = yhat_prob[0, yhat_class[0]] * 100 #預測值\n\n if class_probability <= 0.03:\n is_LOW_CONFIDENCE &= False\n classification_res_arr.append([face_recognition_err.SUCCESS, class_index, class_probability])\n cv2.rectangle(marked_image, pos_arr[i][0:2], pos_arr[i][2:], [255, 0, 0], 3) \n else:\n classification_res_arr.append([face_recognition_err.LOW_CONFIDENCE, class_index, class_probability])\n cv2.rectangle(marked_image, pos_arr[i][0:2], pos_arr[i][2:], [0, 255, 0], 3)\n \n if is_LOW_CONFIDENCE:\n return face_recognition_err.LOW_CONFIDENCE, classification_res_arr, Image.fromarray(marked_image)\n \n return face_recognition_err.SUCCESS, classification_res_arr, Image.fromarray(marked_image)","repo_name":"KakaCheng/Online_Report_System_Line_bot","sub_path":"models/face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"69929596356","text":"#!/usr/bin/python -i\nimport Block\nimport rlcompleter, readline\nreadline.parse_and_bind(\"tab: complete\")\n\ndevice = Block.Block(\"86:00.0\",2,\"libcomanche-blknvme.so\")\n\nbuffer = device.allocate_io_buffer(4096,32,-1)\n\ninfo = device.get_volume_info()\n\ninfo\n","repo_name":"IBM/comanche","sub_path":"src/components/api/wrappers/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"}
+{"seq_id":"23586419242","text":"import argparse, random\n\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, AlphaDropout, Dropout, Flatten\nfrom keras.optimizers import RMSprop, Adam\n\nfrom rl.agents.dqn import DQNAgent\nfrom rl.policy import BoltzmannQPolicy\nfrom rl.memory import SequentialMemory\n\nfrom model import Game, Board, Plebeian\nimport model\n\nparser = argparse.ArgumentParser(description='Train a learning agent to play Automatafl.')\nparser.add_argument('save', help='Save weights to this file')\nparser.add_argument('-L', '--load', dest='load', help='Load these weights before training')\nparser.add_argument('-s', '--steps', dest='steps', type=int, default=100000, help='Perform this many training steps')\nparser.add_argument('--dropout', dest='dropout', type=float, default=0.02, help='Drop this fraction of values betwen the internal layers to prevent overfit')\nparser.add_argument('--memory', dest='memory', type=int, default=10000, help='Remember this many past moves for the learner')\nparser.add_argument('--against', dest='against', help='Load this file as the adversary (instead of a random agent)')\nparser.add_argument('--rand-rate', dest='rand_rate', type=float, default=0.02, help='Have the adversary move randomly at this rate')\nparser.add_argument('--learn-rate', dest='learn_rate', type=float, default=0.1, help='Initial learning rate')\nparser.add_argument('--layers', dest='layers', type=int, default=8, help='Use this many hidden layers')\nparser.add_argument('--width', dest='width', type=int, default=128, help='Each hidden layer has this many neurons')\nparser.add_argument('--update', dest='update', type=int, default=32, help='Update the target model with learned data after this many steps')\nargs = parser.parse_args()\n\nplebs = [Plebeian(i) for i in range(1, 3)]\ndef setup_game():\n return Game(*plebs, setup=[\n# [2, 0, 0, 2, 0, 0, 2],\n# [0, 0, 1, 2, 1, 0, 0],\n# [1, 0, 0, 0, 0, 0, 1],\n# [2, 0, 0, 3, 0, 0, 2],\n# [1, 0, 0, 0, 0, 0, 1],\n# [0, 0, 1, 2, 1, 0, 0],\n# [2, 0, 0, 2, 0, 0, 2],\n# ], goals=[[(0, 0), (0, 6)], [(6, 0), (6, 6)]])\n [2, 0, 1, 0, 2],\n [0, 0, 0, 0, 0],\n [2, 0, 3, 0, 2],\n [0, 0, 0, 0, 0],\n [2, 0, 1, 0, 2],\n ], goals=[[(0, 0), (4, 0)], [(0, 4), (4, 4)]])\n\ngame = setup_game()\n\nNUM_ACTIONS = game.NumActions()\nNUM_STATES = len(game.StateVector(plebs[0]))\n\n#print(NUM_ACTIONS)\n#print(NUM_STATES)\n#exit()\n\ndef make_net(primary):\n mdl = Sequential()\n mdl.add(Flatten(input_shape=(args.memory, NUM_STATES)))\n mdl.add(Dropout(args.dropout))\n mdl.add(Dense(args.width, input_shape=(NUM_STATES,), activation='relu'))\n mdl.add(Dropout(args.dropout))\n if primary:\n for i in range(args.layers - 1):\n mdl.add(Dense(args.width, activation='relu', kernel_initializer='lecun_uniform'))\n mdl.add(Dropout(args.dropout))\n mdl.add(Dense(NUM_ACTIONS))\n return mdl\n\ndef make_agent(prim, load):\n nn = make_net(True)\n mem = SequentialMemory(limit=args.memory, window_length=args.memory)\n pol = BoltzmannQPolicy()\n dqn = DQNAgent(model=nn, nb_actions=NUM_ACTIONS, memory=mem, policy=pol, target_model_update=args.update)\n dqn.compile(Adam(lr=args.learn_rate), metrics=['mae'])\n if load:\n dqn.load_weights(load)\n return dqn\n\ncur = make_agent(True, args.load)\nif args.against:\n adv = make_agent(True, args.against)\n\nsteps = 0\nclass GameEnv(object):\n def reset(self):\n global game, steps\n game = setup_game()\n steps = 0\n print('Game reset')\n return game.StateVector(plebs[0])\n\n def render(self, mode='human', close=False):\n pass\n\n def close(self):\n pass\n\n def step(self, act):\n global steps\n steps += 1\n\n game.PoseAgentMove(plebs[0], act)\n if args.against and random.random() > args.rand_rate:\n game.PoseAgentMove(plebs[1], adv.forward(game.StateVector(plebs[1])))\n else:\n game.PoseAgentMove(plebs[1], random.randrange(0, NUM_ACTIONS))\n\n winner = None\n for ev in game.GlobalEvents():\n if ev.__class__ is model.TurnOver and ev.winner is not None:\n winner = ev.winner\n print(f'Game won on step {steps} by {winner}')\n if ev.__class__ is model.Conflict:\n print(f'Conflict on step {steps}')\n\n for pleb in plebs:\n pleb.Events()\n\n retval = (\n game.StateVector(plebs[0]),\n game.RewardScalar(plebs[0]),\n winner is not None,\n {},\n )\n\n return retval\n\ncur.fit(GameEnv(), nb_steps=args.steps, log_interval=args.update)\ncur.save_weights(args.save, overwrite=True)\n","repo_name":"emberian/automatafl","sub_path":"old_python_prototype/rl_learn.py","file_name":"rl_learn.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"72847604033","text":"class Room(object):\n \"\"\"Create a new room object\"\"\"\n\n def __init__(self, name):\n \"\"\"Set default properties and check for name validity\"\"\"\n if type(name) == str and len(name) <= 30:\n self.name = name\n self.members = {}\n self.no_of_occupants = len(self.members)\n else:\n raise Exception('Invalid name. Room name must be string and \\\n not more than 30 characters.')\n","repo_name":"hassan02/amity_room_allocator","sub_path":"model/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"10469207147","text":"\"\"\"Advance IMC particles over a time-step.\"\"\"\n\nimport math\nimport random\n\nimport imc_global_mat_data as mat\nimport imc_global_mesh_data as mesh\nimport imc_global_part_data as ptcl\nimport imc_global_phys_data as phys\nimport imc_global_time_data as time\n\n\ndef run():\n \"\"\"Advance IMC particles over a time-step.\"\"\"\n print(\"\\n\" + \"-\" * 79)\n print(\"Tracking step ({:4d})\".format(time.step))\n print(\"-\" * 79)\n\n # Create local storage for the energy deposited this timestep\n mesh.nrgdep[:] = 0.0\n\n ptcl.n_census = 0\n\n endsteptime = time.time + time.dt\n\n # Optimisations (avoid dot operator, saves 20% off cProfiled calc1\n exp = math.exp\n log = math.log\n ran = random.random\n nrgdep = [0.0] * mesh.ncells\n aaa = mat.gamma * phys.invh3 / mesh.temp[:] ** mat.tpower\n phys_h = -phys.h\n mesh_temp = mesh.temp\n mesh_fleck = -mesh.fleck\n mesh_nodepos = mesh.nodepos\n phys_c = phys.c\n top_cell = mesh.ncells - 1\n phys_invc = phys.invc\n bbb = phys.invh * mesh_temp\n\n print(\"\\nParticle loop...\")\n\n # Loop over all particles\n for iptcl in range(len(ptcl.prop)):\n\n # Get particle's initial properties at start of timestep\n (ttt, icell, xpos, muu, frq, nrg, startnrg) = ptcl.prop[iptcl][1:8]\n startnrg = 0.01 * startnrg\n\n # Loop over segments in the history (between boundary-crossings and collisions)\n while True:\n\n # Calculate the total macroscopic cross-section (cm^-1)\n sigma = (\n aaa[icell]\n * (1.0 - exp(phys_h * frq / mesh_temp[icell]))\n / (frq * frq * frq)\n )\n\n flecksig = mesh_fleck[icell] * sigma\n\n # Distance to boundary\n if muu > 0.0:\n dist_b = (mesh_nodepos[icell + 1] - xpos) / muu\n else:\n dist_b = (mesh_nodepos[icell] - xpos) / muu\n\n # Distance to collision\n dist_col = abs(log(ran())) / (sigma + flecksig)\n\n # Distance to census\n dist_cen = phys_c * (endsteptime - ttt)\n\n # Actual distance - whichever happens first\n dist = min(dist_b, dist_col, dist_cen)\n\n # Calculate the new energy and the energy deposited (temp storage)\n newnrg = nrg * exp(flecksig * dist)\n if newnrg <= startnrg:\n newnrg = 0.0\n\n # Deposit the particle's energy\n nrgdep[icell] += nrg - newnrg\n\n if newnrg == 0.0:\n # Flag particle for later destruction\n ptcl.prop[iptcl][6] = -1.0\n break\n\n # If the event was a boundary-crossing, and the boundary is the\n # domain boundary, then kill the particle\n if dist == dist_b:\n if muu > 0:\n if icell == top_cell:\n # Flag particle for later destruction\n ptcl.prop[iptcl][6] = -1.0\n break\n icell += 1\n if muu < 0:\n if icell == 0:\n # Flag particle for later destruction\n ptcl.prop[iptcl][6] = -1.0\n break\n icell -= 1\n\n # Otherwise, advance the position, time and energy\n xpos += muu * dist\n ttt += dist * phys_invc\n nrg = newnrg\n\n # If the event was census, finish this history\n if dist == dist_cen:\n # Finished with this particle\n # Update the particle's properties in the list\n # Starting energy comes in here but doesn't change\n ptcl.prop[iptcl][1:7] = (ttt, icell, xpos, muu, frq, nrg)\n ptcl.n_census += 1\n break\n\n # If event was collision, also update frequency and direction\n if dist == dist_col:\n # Collision (i.e. absorption, but treated as pseudo-scattering)\n frq = bbb[icell] * abs(log(ran()))\n muu = 1.0 - 2.0 * ran()\n\n # End loop over history segments\n\n # End loop over particles\n\n mesh.nrgdep[:] = nrgdep[:]\n\n\ndef clean():\n \"\"\"Tidy up the particle list by removing leaked and absorbed particles.\"\"\"\n # These had their energy set to -1 to flag them\n for iptcl in range(len(ptcl.prop) - 1, 0, -1):\n if ptcl.prop[iptcl][6] < 0.0:\n del ptcl.prop[iptcl]\n\n print(\"\\nNumber of particles in the system = {:16d}\".format(len(ptcl.prop)))\n","repo_name":"msleigh/fcimc","sub_path":"python/src/imc_track.py","file_name":"imc_track.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"771342892","text":"import lib.datasets as ds\nimport lib.controls as ctl\nimport lib.wifi as wifi\nimport machine , time\nfrom lib.web import Web , Req\n\n\ndoor = ctl.Door()\nlight = ctl.Light()\n\nwifi.connect()\n\n# ========== 外部中断\nbtn = machine.Pin(27, machine.Pin.IN, machine.Pin.PULL_UP)\ndef handle_interrupt(pin):\n time.sleep_ms(500)\n if door.info == 'stop':\n print('自动化关闭')\n door.approach(0)\n light.off()\n else:\n print('紧急制动')\n door.stop()\nbtn.irq(trigger=machine.Pin.IRQ_FALLING, handler=handle_interrupt)\n\n# ========== 网络 API\ntry:\n web = Web()\nexcept:\n machine.reset()\n\ndef app(req:Req):\n req.response('gcon_v2')\nweb.route('/app', app)\n\n# 运行状态\ndef status(req:Req):\n if 'light' in req.url:\n req.response(ds.get('light'))\n if 'door' in req.url:\n req.response(ds.get('door'))\n if 'height' in req.url:\n req.response(str(door.height))\n \nweb.listrout(status, [\n '/info_light', '/info_door', '/info_height'\n])\n\n# 状态控制\ndef control(req:Req):\n if 'on' in req.url: light.on()\n if 'off' in req.url: light.off()\n if 'up' in req.url: door.up()\n if 'down' in req.url: door.down()\n if 'stop' in req.url: door.stop()\n req.response('OK')\n\nweb.listrout(control, [\n '/api/light_on', '/api/light_off', '/api/door_up', '/api/door_down', '/api/door_stop'\n])\n\n# 自动模式\ndef approach(req:Req):\n if 'open' in req.url:\n req.response('OK')\n light.on()\n door.approach(ds.get('Auto_height', 32))\n return\n if 'close' in req.url:\n req.response('OK')\n door.approach(0)\n light.off()\n return\n\nweb.listrout(approach, [\n '/auto_close', '/auto_open'\n])\n\nweb.active()","repo_name":"A03HCY/GarageControler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3238533006","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def binaryTreePaths(self, root: TreeNode) -> List[str]:\n \n def traverse(cur: TreeNode, acc: str, res: list): \n if not cur.left and not cur.right:\n res.append(acc + str(cur.val))\n return\n if cur.left:\n traverse(cur.left, acc + str(cur.val) + \"->\", res)\n if cur.right:\n traverse(cur.right, acc + str(cur.val) + \"->\", res)\n \n res = [] # res = [\"1 -> 2 ...\", ...]\n if root:\n traverse(root, \"\", res)\n return res","repo_name":"chien-wei/LeetCode","sub_path":"0257_Binary_Tree_Paths.py","file_name":"0257_Binary_Tree_Paths.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33066043737","text":"lista_resul = []\r\ndef processa(caminho):\r\n f = open(caminho, 'r')\r\n csv = f.readlines()\r\n f.close()\r\n linha=[]\r\n for i in range(1,len(csv),1):\r\n linha = csv[i].split(',')\r\n \r\n criaArquivo('resul.txt', lista_resul, '', '', '\\n')\r\n\r\ndef criaArquivo(nome,lista,header,separador,quebra):\r\n f = open(nome, 'w')\r\n if(header != ''):\r\n f.write(header+quebra)\r\n k=0\r\n for item in lista:\r\n i = 0\r\n for ele in item:\r\n f.write(ele)\r\n if(i 9: L[i] -= 9 # poids corrigés\n s += L[i] # calcul de la somme de contrôle\n\n b = True if (s % 10) == 0 else False # validation du code\n return (b, s)\n\ndef calcul_cle(N):\n b, s = check(N)\n cle = 10 - (s % 10)\n return cle\n\n\nC = 20088\nprint(check(C))\nprint(calcul_cle(C))\n","repo_name":"ATHRIBIS/checksum","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6203572053","text":"import torch\nimport torch.nn as nn\nfrom gym import spaces\nfrom torch.distributions import Normal, Beta\nimport numpy as np\n\n\nclass Agent(nn.Module):\n \"\"\"\n :param observation_space: (gym.Space)\n :param features_dim: (int) Number of features extracted.\n This corresponds to the number of unit for the last layer.\n \"\"\"\n\n def __init__(self, observation_space: spaces.Box, dims):\n super().__init__()\n # We assume CxHxW images (channels first)\n # Re-ordering will be done by pre-preprocessing or wrapper\n if dims == 3:\n conv = nn.Conv3d\n elif dims == 2:\n conv = nn.Conv2d\n else: \n raise ValueError(\"Only dims = 2 or dims = 3 is currently supported\")\n \n self.actor_cnn = nn.Sequential(\n conv(3, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n conv(32, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n conv(32, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n conv(32, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n conv(32, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n nn.Flatten(),\n )\n\n # Compute shape by doing one forward pass\n with torch.no_grad():\n n_flatten = self.actor_cnn(\n torch.as_tensor(observation_space.sample()[0, None]).float()\n ).shape[1]+1\n\n self.action_mean = nn.Sequential((nn.Linear(n_flatten, 64),\n nn.ReLU(),\n nn.Linear(64, 32),\n nn.ReLU(),\n nn.Linear(32, dims)))\n \n self.action_logstd = nn.Sequential((nn.Linear(n_flatten, 64),\n nn.ReLU(),\n nn.Linear(64, 32),\n nn.ReLU(),\n nn.Linear(32, dims)))\n\n self.critic_cnn = nn.Sequential(\n conv(3, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n conv(32, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n conv(32, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n conv(32, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n conv(32, 32, kernel_size=4, stride=1),\n nn.ReLU(),\n nn.Flatten(),\n )\n\n self.critic = nn.Sequential((nn.Linear(n_flatten, 64),\n nn.ReLU(),\n nn.Linear(64, 32),\n nn.ReLU(),\n nn.Linear(32, dims)))\n\n print(\"Running with\", self.count_parameters(), \"parameters\")\n\n\n def forward(self, observations: torch.Tensor, time: torch.Tensor) -> torch.Tensor:\n observations = observations.squeeze()\n x = self.actor_cnn(observations)\n x = torch.concatenate((x, time.unsqueeze(1)), dim = 1)\n action_mean = self.action_mean(x) #Batch dim, (Mean, Std), (x, y, z)\n action_std = torch.exp(self.action_logstd(x))\n\n x = self.critic_cnn(observations)\n x = torch.concatenate((x, time.unsqueeze(1)), dim = 1)\n critic_output = self.critic(x)\n return action_mean, action_std, critic_output\n\n\n def get_value(self, x, time):\n _, _, value = self(x, time)\n return value\n\n def get_action_and_value(self, img, time, action=None):\n \n action_mean, action_std, critic_output = self(img, time)\n probs = Normal(action_mean, action_std)\n if torch.any(torch.isnan(img)):\n print(\"Found\", torch.isnan(img).sum(), \"NaNs in input\")\n\n if action is None:\n action = probs.sample()\n return action, probs.log_prob(action).sum(1), probs.entropy().sum(1), critic_output, action_std\n \n\n def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)\n ","repo_name":"ALjone/Master-Thesis","sub_path":"agents/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"7991660659","text":"\n\ndef num_pairs(input):\n calc = 0\n max_cur = len(input)\n for i in range(0, max_cur-1):\n for j in range(i+1, max_cur):\n if abs(input[i] - input[j]) % 200 == 0:\n calc += 1\n return calc\n\ninput = list(map(int, input('Массив:\\n').split()))\n\nprint(num_pairs(input))","repo_name":"ivangotovets/algorithms","sub_path":"Яндекс пробные 20/2-все пары пары дел на 200.py","file_name":"2-все пары пары дел на 200.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27094760215","text":"import numpy as np\r\nimport scipy.ndimage\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\nimport math\r\nimport code \r\n\r\ndef nothing(x):\r\n pass\r\n\r\ndef sortCorners(corners):\r\n arg = np.argsort(corners[:,0])\r\n corners[:,:] = corners[arg,:]\r\n\r\n tl = corners[1] if corners[0,1] > corners[1,1] else corners[0];\r\n tr = corners[0] if corners[0,1] > corners[1,1] else corners[1];\r\n bl = corners[3] if corners[2,1] > corners[3,1] else corners[2];\r\n br = corners[2] if corners[2,1] > corners[3,1] else corners[3];\r\n\r\n return np.array([tl, bl, br, tr],np.float32)\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nloop = True\r\n\r\nwhile(loop):\r\n # loop = False\r\n\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n # frame = cv2.blur(frame, (3,3))\r\n\r\n height, width, _ = frame.shape\r\n\r\n # Our operations on the frame come here\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n # clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\r\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(11,11))\r\n grayCLAHE = clahe.apply(gray)\r\n\r\n thresh,bw = cv2.threshold(grayCLAHE, 0, 1, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n\r\n kernel = np.ones((2,2), np.uint8)\r\n bw2 = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)\r\n bw2 = 1-bw2\r\n\r\n labeled_array, num_features = scipy.ndimage.label(bw2, np.ones((3,3),np.uint8))\r\n areas = scipy.ndimage.find_objects(labeled_array)\r\n\r\n ii = 0\r\n colourII = 0\r\n for area in areas:\r\n rows = area[0].stop - area[0].start\r\n cols = area[1].stop - area[1].start\r\n\r\n ii += 1\r\n\r\n if rows > 0.6*height:\r\n continue\r\n if cols > 0.6*width:\r\n continue\r\n\r\n if rows < 0.02*height:\r\n continue\r\n if cols < 0.02*width:\r\n continue\r\n\r\n image, contours, heirachy = cv2.findContours(np.copy(bw2[area[0], area[1]]),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n if contours[0].shape[0] < 4:\r\n continue\r\n\r\n epsilon = 0.03*cv2.arcLength(contours[0],True)\r\n contour = cv2.approxPolyDP(contours[0],epsilon,True)\r\n\r\n if contour.shape[0] != 4:\r\n continue\r\n\r\n cntArea = cv2.contourArea( contour)\r\n if cntArea < 64:\r\n continue\r\n\r\n transW = int(math.sqrt(cntArea)+0.5)\r\n\r\n reducedContour = np.array(contour[:,0,:], np.float32)\r\n reducedContour = sortCorners(reducedContour)\r\n\r\n cnr = [area[0].start, area[1].start]\r\n otherCnr = [area[0].start+transW, area[1].start+transW]\r\n imgSize = grayCLAHE.shape\r\n if otherCnr[0] >= imgSize[0] or otherCnr[1] >= imgSize[1]:\r\n continue\r\n\r\n transM = cv2.getPerspectiveTransform(reducedContour, np.array([[0,0], [transW,0], [transW, transW], [0, transW] ], np.float32))\r\n transGray = np.array(cv2.warpPerspective(grayCLAHE[area[0], area[1]], transM, (transW, transW) ), np.uint8)\r\n\r\n thresh,trans = cv2.threshold(transGray, 0, 1, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n # trans = np.array(cv2.warpPerspective(bw2[area[0], area[1]], transM, (transW, transW) ), np.uint8)\r\n # trans = 1-trans\r\n\r\n # kernel = np.ones((3,3), np.uint8)\r\n # trans = cv2.morphologyEx(trans, cv2.MORPH_OPEN, kernel, iterations=1)\r\n # trans = cv2.morphologyEx(trans, cv2.MORPH_CLOSE, kernel, iterations=1)\r\n # kernel[1,0] = 0\r\n # kernel[0,1] = 0\r\n # kernel[1,-1] = 0\r\n # kernel[-1,1] = 0\r\n # trans = cv2.morphologyEx(trans, cv2.MORPH_OPEN, kernel, iterations=1)\r\n # kernel = np.ones((3,3), np.uint8)\r\n # trans = cv2.morphologyEx(trans, cv2.MORPH_OPEN, kernel, iterations=1)\r\n # kernel = np.ones((3,3), np.uint8)\r\n # trans = cv2.morphologyEx(trans, cv2.MORPH_CLOSE, kernel, iterations=1)\r\n # kernel[0,0] = 0\r\n # kernel[0,-1] = 0\r\n # kernel[-1,-1] = 0\r\n # kernel[-1,0] = 0\r\n # trans = cv2.morphologyEx(trans, cv2.MORPH_CLOSE, kernel, iterations=1)\r\n\r\n # trans = 1-trans\r\n\r\n values = code.extractInner(trans)\r\n # if values[0] == -1 :\r\n # continue\r\n # data = code.decodeInner(values[1], values[2])\r\n\r\n colour = ((colourII%6)*255/6, 255, 255)\r\n colourDull = ((colourII%6)*255/6, 82, 255)\r\n colourII += 1\r\n\r\n hsv[labeled_array == ii ] = colourDull\r\n\r\n contour[:,0,:] = contour[:,0,:] + [area[1].start, area[0].start]\r\n cv2.drawContours(hsv, [contour], -1, colour, 2)\r\n\r\n\r\n grayCLAHE[cnr[0]:otherCnr[0],cnr[1]:otherCnr[1]] = trans*255\r\n # grayCLAHE[cnr[0]:otherCnr[0],cnr[1]:otherCnr[1]] = transGray\r\n\r\n if values[0] != -1:\r\n data = code.decodeInner(values[1], values[2])\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n cv2.putText(grayCLAHE,str(data[0])+\"(\"+str(data[1])+\", \"+str(data[2])+\") \"+str(values[0])\r\n ,(cnr[1],cnr[0]), font, 0.4,(255),1,cv2.LINE_AA)\r\n\r\n\r\n res = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\r\n\r\n cv2.imshow('res', res)\r\n # cv2.imshow('frame', frame)\r\n cv2.imshow('grayCLAHE', grayCLAHE)\r\n cv2.imshow('bw2', bw2*255)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncv2.destroyAllWindows()\r\n\r\n# When everything done, release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"dncnmcdougall/OpenAr","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27522626362","text":"class observable:\n\n def __init__(self):\n self.triggers = {}\n self.watchers = {}\n\n def addTrigger(self, forThisFunction, when, whatToDo):\n\n if forThisFunction not in self.triggers.keys():\n self.triggers[forThisFunction] = {\"BEFORE\": [], \"AFTER\": []}\n\n where = None\n\n if when in [\"BEFORE\", 0, False]:\n where = \"BEFORE\"\n else:\n if when in [\"AFTER\", 1, True]:\n where = \"AFTER\"\n\n if where is not None:\n self.triggers[forThisFunction][where].append(whatToDo)\n\n def addWatcher(self, forThisFunction, when, whatToDo):\n # la apelare e de preferat ca cbk sa fie definita cu cbk(*args,**kwargs)\n # sau simplu cbk(*args)\n # EX:\n # >>> def g(*args):\n # ... print(\"After watcher\")\n # ... import ujson\n # ... print(ujson.dumps(args))\n # ... \n # ... \n # ... \n # >>> vt.addWatcher(\"heartbeat\",\"AFTER\",g)\n\n if forThisFunction not in self.watchers.keys():\n self.watchers[forThisFunction] = {\"BEFORE\": [], \"AFTER\": []}\n\n where = None\n\n if when in [\"BEFORE\", 0, False]:\n where = \"BEFORE\"\n else:\n if when in [\"AFTER\", 1, True]:\n where = \"AFTER\"\n\n if where is not None:\n self.watchers[forThisFunction][where].append(whatToDo)\n\n def _trigger(func):\n def wrapper(self, *args, **kwargs):\n\n executeThese = {}\n if func.__name__ in self.triggers.keys():\n executeThese = self.triggers[func.__name__]\n\n if \"BEFORE\" in executeThese and len(executeThese[\"BEFORE\"]) > 0:\n for ebf in executeThese[\"BEFORE\"]:\n ebf(*args, **kwargs)\n\n out = func(self, *args, **kwargs)\n\n if \"AFTER\" in executeThese and len(executeThese[\"AFTER\"]) > 0:\n for ebf in executeThese[\"AFTER\"]:\n ebf(*args, **kwargs)\n\n return out\n return wrapper\n\n def _watch(func):\n def wrapper(self, *args, **kwargs):\n\n executeThese = {}\n\n if func.__name__ in self.watchers.keys():\n executeThese = self.watchers[func.__name__]\n\n if \"BEFORE\" in executeThese and len(executeThese[\"BEFORE\"]) > 0:\n for ebf in executeThese[\"BEFORE\"]:\n ebf(*args)\n\n out = func(self, *args, **kwargs)\n\n if \"AFTER\" in executeThese and len(executeThese[\"AFTER\"]) > 0:\n for ebf in executeThese[\"AFTER\"]:\n ebf(*args)\n\n return out\n return wrapper\n","repo_name":"mekanixms/mqHome","sub_path":"abstractObservable.py","file_name":"abstractObservable.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23547242141","text":"f = open(\"e.in\")\nfout = open(\"e.out\", \"w\")\n\ninput_str = f.readline()\nt = int(input_str)\n\nfor case in range(1, t + 1):\n input_str = f.readline()\n splitted = input_str.split()\n pline = splitted[0]\n arr = [False if ch == '-' else True for ch in pline]\n w = int(splitted[1])\n l = len(arr)\n\n out = 0\n it = 0\n while it < l - w:\n if not arr[it]:\n out += 1\n for jt in range(it, it + w):\n arr[jt] = not arr[jt]\n it += 1\n\n if not arr[it]:\n out += 1\n result = True\n for jt in range(it, l - 1):\n if arr[jt] != arr[jt + 1]:\n fout.write(\"Case #%d: IMPOSSIBLE\\n\" % case)\n result = False\n break\n\n if result:\n fout.write(\"Case #%d: %d\\n\" % (case, out))\n\nf.close()\nfout.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/371.py","file_name":"371.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32726743273","text":"answer = 0\nR = 0\nC = 0\nrow_stat_truth = []\n\ndef backtrack(A, depth, row_stat, column_stat):\n global R, C, answer, row_stat_truth\n for p in A:\n print('-----' * depth, depth, p)\n print()\n if depth == R * C:\n if any(map(lambda i:i%2, row_stat)) : return\n elif row_stat_truth != row_stat: return\n print('true!!!!!!!')\n answer += 1\n else:\n r = depth // R\n c = depth % C\n\n if column_stat[c] == 0: return\n\n for i in range(2):\n A[r][c] = i\n new_row_stat = [*row_stat]\n new_row_stat[r] += i\n\n new_column_stat = [*column_stat]\n new_column_stat[c] -= 1\n \n backtrack([[A[rr][cc] for cc in range(C)] for rr in range(R)], depth+1, new_row_stat, new_column_stat)\n\ndef solution(a):\n global R, C, answer, row_stat_truth\n BIG_NUMBER = 10**7 + 19\n answer = 0\n R = len(a)\n C = len(a[0])\n\n A = [[0 for _ in range(C)] for __ in range(R)]\n row_stat_truth = [sum([a[_][i] for i in range(C)]) for _ in range(R)]\n\n row_stat = [0 for _ in range(R)]\n column_stat = [sum([a[i][_] for i in range(R)]) for _ in range(C)]\n\n backtrack(A, 0, row_stat, column_stat)\n\n return answer\n\nprint(solution([[1,0],[0,1]]))\n\n","repo_name":"ghleokim/algorithm","sub_path":"200910/prgmrs-tryout-04.py","file_name":"prgmrs-tryout-04.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5632132667","text":"import os\n\n\ndef build(gen, env):\n env = env.clone()\n\n env['CPPPATH'] += ['src/libs/leveldb', 'src/libs/leveldb/include']\n # needed for strdup\n env['CPPFLAGS'] += ['-D_GNU_SOURCE']\n\n # shut off warnings\n env['CXXFLAGS'] += [\n '-Wno-sign-conversion',\n '-Wno-unused-parameter',\n '-Wno-implicit-fallthrough',\n '-Wno-unused-function',\n '-Wno-psabi',\n ]\n\n # build all files except tests and other envs\n files = env.glob(gen, 'table/*.cc') + env.glob(gen, 'db/*.cc') + env.glob(gen, 'util/*.cc')\n files = [f for f in files\n if not os.path.basename(f).endswith('_test.cc') and\n os.path.basename(f) != 'env_windows.cc' and\n os.path.basename(f) != 'testutil.cc']\n\n lib = env.static_lib(gen, out='leveldb', ins=files)\n env.install(gen, env['LIBDIR'], lib)\n","repo_name":"Barkhausen-Institut/M3-leveldb","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24951427521","text":"\"\"\"This script allows you to communicate with a Zebra printer over USB or TCP/IP\"\"\"\nimport socket\nimport binascii\nimport logging\nimport os\nimport subprocess\nimport sys\nimport threading\nimport time\nfrom datetime import datetime\nfrom tkinter import *\nfrom tkinter.filedialog import askopenfilename\nimport usb.core\nimport usb.util\nfrom pyfiglet import Figlet\n\n\nclass zebraPrinter:\n\n def __init__(self, dev): # define the constructor\n\n self.dev = dev\n self.intf = None\n\n def get_printer(self):\n\n try:\n # find all printers with the Zebra id (This supposes there is only\n # one)\n self.dev = usb.core.find(idVendor=0xa5f)\n self.dev.reset()\n # if self.dev is None:\n if self.dev.is_kernel_driver_active(0):\n print(\"Detaching kernel driver\")\n self.dev.detach_kernel_driver(0)\n except ValueError:\n print('Device not found')\n\n def set_configuration(self):\n\n self.dev.set_configuration()\n cfg = self.dev.get_active_configuration()\n self.intf = cfg[(0, 0)]\n return self.intf\n\n def get_out_endpoints(self):\n\n epo = usb.util.find_descriptor(\n self.intf,\n custom_match=lambda e:\n usb.util.endpoint_direction(e.bEndpointAddress) ==\n usb.util.ENDPOINT_OUT)\n return epo\n\n def get_in_endpoints(self):\n epi = usb.util.find_descriptor(\n self.intf,\n custom_match=lambda e:\n usb.util.endpoint_direction(e.bEndpointAddress) ==\n usb.util.ENDPOINT_IN)\n return epi\n\n def format_commands(self,cmd):\n\n print(\"\\ncmd_string:\", cmd)\n cmd_bytes = bytearray(cmd.encode('utf-8'))\n result = ''\n for cmd_byte in cmd_bytes:\n hex_byte = (\"{0:02x}\".format(cmd_byte))\n result += hex_byte\n return result\n\n def command_loop(self):\n \n cmds = []\n while True:\n try:\n cmd = input()\n cmds.append(cmd)\n print(cmds)\n except EOFError:\n return cmds\n \n def send_to_printer(self, result):\n epo = self.get_out_endpoints()\n print(\"Sending...\")\n self.dev.write(epo, bytearray.fromhex(result))\n t = datetime.utcnow()\n print(t)\n\n def read_response(self):\n \n try: \n epi = self. get_in_endpoints() \n for i in range (0, 7): #This is really for long responses...unfortunately it doesn't seem to work correctly.\n #print(\"response: \", binascii.hexlify(bytearray(ret)))\n #ret = bytearray(ret, 'utf-8')\n ret = self.dev.read(epi, epi.wMaxPacketSize)\n ret = bytearray(ret).decode()\n print(ret)\n except usb.core.USBError as e:\n print(e)\n\n\n def iter_cmds_loop(self, cmds):\n \n for cmd in cmds:\n r = self.format_commands(cmd)\n self.send_to_printer(r)\n self.read_response()\n self.dispose()\n\n def dispose(self):\n usb.util.dispose_resources(self.dev)\n self.dev = None\n\n\ndef file_reader():\n root = Tk()\n filename = askopenfilename(filetypes=[(\"Text files\",\"*.txt\")])\n root.destroy()\n with open(filename) as f: \n cmds = f.readlines()\n cmds = [x.strip() for x in cmds]\n return cmds\n\n\nclass mysocket:\n '''demonstration class only\n - coded for clarity, not efficiency\n '''\n\n def __init__(self, sock=None):\n self.send_chunks = []\n self.chunks = []\n self.chunk = None\n self.msgsize = 0\n self.args = 87\n self.totalsent = 0\n self.submesg = None\n self.sent = 0\n self.bytessent = 0\n self.f = None\n self.MSGLEN = None\n self.msglgth = 0\n self.f = None\n self.sock = None\n\n if sock is None:\n self.sock = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n\n def connect(self, host, port, args=None):\n self.sock.connect((host, port))\n if args is not None :\n self.args = args\n\n def mysend(self, msg):\n\n self.submsg = (str(self.args) + ',').encode()\n self.sent = self.sock.send(self.submsg)\n self.totalsent = self.totalsent + len(self.submsg)\n self.send_chunks.append(self.submsg)\n\n #msg = \"\".join(i for i in msg if i not in \"\\/:*?<>|\")\n\n self.msgsize = 0\n try:\n #self.submsg = self.f.read(random.randint(1,32768))\n self.submsg = msg.encode()\n #print ('msg=%s' % self.submsg)\n #self.submsg = self.f.read(random.randint(1,2048))\n #self.submsg = self.f.read(32768)\n while len(self.submsg) != 0:\n self.bytessent = 0\n self.msglgth = len(self.submsg)\n #print ('len(self.submsg) = %d' % self.msglgth)\n while self.bytessent < self.msglgth :\n self.sent = self.sock.send(self.submsg[self.bytessent:])\n if self.sent == 0:\n #raise RuntimeError(\"socket connection broken\")\n self.msgsize = self.totalsent\n break\n self.bytessent = self.bytessent + self.sent\n self.totalsent = self.totalsent + self.sent\n if self.bytessent == self.msglgth :\n #time.sleep(random.randint(1,3));\n self.send_chunks.append(self.submsg)\n #self.submsg = self.f.read(2048)\n #self.submsg = self.f.read(random.randint(1,32768))\n self.submsg = self.f.read(32768)\n #self.submsg = self.f.read(random.randint(1,2048))\n else :\n self.send_chunks.append(self.submsg[(self.totalsent - self.bytessent):self.bytessent])\n break\n\n #if (totalsent > 50000) :\n # break # to cut short the reads above\n\n #break # to cut short the reads above\n\n self.sock.shutdown(self.sock.SHUT_WR)\n except Exception as e:\n print(e)\n finally:\n #print ('c :(%r) totalsent=%d\\n' % (self.args, self.totalsent))\n #sys.exit(0)\n self.msgsize = self.totalsent\n return str(b''.join(self.send_chunks))\n\n def myreceive(self):\n #print ('self.msgsize=%d\\n' % self.msgsize)\n #sys.exit(1)\n #s = []\n self.chunks = []\n try :\n self.totalbytes = 0\n #while self.msgsize > self.totalbytes :\n while True :\n self.chunk = self.sock.recv(32768)\n\n if not self.chunk :\n # resource temporarily unavailable\n #print ('returning resource unavailable')\n #return ''.join(self.chunks)\n return self.chunks\n elif len(self.chunk) == 0 :\n # EOF\n #print ('returning EOF')\n #return ''.join(self.chunks)\n return self.chunks\n\n self.chunks.append(self.chunk)\n self.totalbytes = self.totalbytes + len(self.chunk)\n\n #print ('len of self.chunk = %d' % len(self.chunk))\n if (not self.chunk.endswith(b'\"')):\n continue\n\n # for zebra test\n s = b''.join(self.chunks)\n #print (s)\n return s.decode()\n\n #print ('client self.msgsize=%d, totalbytes=%d\\n' % (self.msgsize, self.totalbytes))\n except Exception as e :\n print(e)\n finally :\n #print ('returning at finally')\n return (b''.join(self.chunks)).decode()\n\nclass menuHandler():\n\n def menu(self):\n f = Figlet(font='standard', width=440)\n print(f.renderText('Configurator'))\n\n for x in range(0, 3):\n print('\\n')\n print('In order to use USB, you must run this script as sudo')\n print('Enter an option: ')\n print('(c) - command list')\n print('(u) - send commands over USB')\n print('(i) - send commands over TCP/IP ------Not working -_-') \n print('(o) - open commands file')\n\n def getChoice(self):\n\n i = input()\n return i\n\n def help_page(self):\n\n subprocess.call([\"vi\", \"-R\", \"help.txt\"])\n\n while True:\n print(\"Press 'r' to return to the menu or 'q' to quit\")\n i = input()\n if i == 'r':\n main()\n elif i == 'q':\n sys.exit()\n else:\n print(\"I'm sorry that's not a valid option\")\n\n def command_menu(self):\n print(\"\\n\" * 50)\n print(\" Enter command(s) or press \\'q\\' to quit\" + '\\r\\n')\n print(\n \"Once all the desired commands are entered, press Ctrl + D to submit\" +\n '\\r\\n')\n print(\"Format: ! U1 setvar \\\"ip.addr\\\"\")\n \ndef main():\n \n m = menuHandler()\n #dev = '' \n #z = zebraPrinter(dev) # Instantiate the Printer Object\n #z.get_printer() # Claim the Printer -> Resolve dev to the printer\n #z.set_configuration() # Claim the intf\n #s = mysocket()\n print(\"\\n\" * 50)\n m.menu()\n choice = m.getChoice()\n if choice == 'c':\n m.help_page()\n if choice == 'u':\n dev = '' \n z = zebraPrinter(dev) \n z.get_printer() \n z.set_configuration()\n m.command_menu()\n cmds = z.command_loop()\n z.iter_cmds_loop(cmds)\n if choice == 'i':\n s = mysocket()\n print(\"Enter the printer IP & Port in the following format - 192.168.1.1.8080\")\n connection = input()\n connection.split(':')\n host = connection[0]\n port = connection[1]\n s.connect(host, port)\n msg = input()\n s.mysend(msg)\n m = s.myreceive()\n print(m)\n if choice == \"o\":\n dev = '' \n z = zebraPrinter(dev) \n z.get_printer() \n z.set_configuration()\n cmds = file_reader()\n print(cmds)\n z.iter_cmds_loop(cmds) \n print(\"Select (r) to return the command option or (m) for the main menu\")\n choice = input()\n if choice == 'm':\n print(\"\\n\" * 100)\n m.menu()\n m.getChoice()\n elif choice == 'r':\n m.command_menu()\n z.get_printer()\n z.set_configuration\n cmds = z.command_loop()\n z.iter_cmds_loop(cmds)\n \n else:\n print(\"Sorry....Nope\")\n m.getChoice()\n\nif __name__ == '__main__':\n while True:\n main()\n","repo_name":"lucas-hopkins/zebrapy","sub_path":"usb_0.4.py","file_name":"usb_0.4.py","file_ext":"py","file_size_in_byte":11031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20566076418","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport seaborn as sns\n\nfrom sklearn.preprocessing import LabelEncoder,LabelBinarizer,OrdinalEncoder,MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix, accuracy_score, classification_report, f1_score\n\n\ndf = pd.read_csv('./data/churndata_processed.csv')\n#df = churndata.drop(columns=['id','phone','total_revenue','cltv','churn_score'],axis=1)\nprint(round(df.describe(),2))\n\ndf_uniques = df.nunique()\nprint('\\n\\n')\nprint(df_uniques)\n\nbinary_variables = list(df_uniques[df_uniques == 2].index)\nprint('\\n\\n')\nprint(binary_variables)\n\ncategorical_variables = list(df_uniques[(df_uniques > 2) & (df_uniques <=6)].index)\nprint('\\n\\n')\nprint(categorical_variables)\n\nprint('\\n\\n')\nprint([[i, list(df[i].unique())] for i in categorical_variables])\n\nordinal_variables = ['contract','satisfaction']\nprint('\\n\\n')\nprint(df['months'].unique())\n\nordinal_variables.append('months')\n\nnumeric_variables = list(set(df.columns) - set(ordinal_variables) - set(categorical_variables) - set(binary_variables))\n\ndf['months'] = pd.cut(df['months'], bins=5)\n\nlb , le = LabelBinarizer(), LabelEncoder()\n\nfor column in ordinal_variables:\n df[column] = le.fit_transform(df[column])\n\nfor column in binary_variables:\n df[column] = lb.fit_transform(df[column])\n\ncategorical_variables = list(set(categorical_variables) - set(ordinal_variables) - set(binary_variables))\n\ndf = pd.get_dummies(df, columns= categorical_variables, drop_first=True)\n\nprint('\\n\\n')\nprint(df.describe().T)\n\nmm = MinMaxScaler()\n\nfor column in [ordinal_variables + numeric_variables]:\n df[column] = mm.fit_transform(df[column])\n\n\nprint('\\n\\n')\nprint(df.describe().T)\n\ny, x = df['churn_value'], df.drop(columns='churn_value')\n\nx_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.4, random_state=42)\n\nknn = KNeighborsClassifier(n_neighbors=3)\nknn = knn.fit(x_train,y_train)\n\ny_pred = knn.predict(x_test)\n\nprint('\\n\\nK=3')\nprint(classification_report(y_test,y_pred))\nprint('Accuracy Score',round(accuracy_score(y_test,y_pred),2))\nprint('F1 Score',round(f1_score(y_test,y_pred),2))\n\n\nknn = KNeighborsClassifier(n_neighbors=5, weights='distance')\nknn = knn.fit(x_train,y_train)\ny_pred = knn.predict(x_test)\n\nprint('\\n\\nK=5')\nprint(classification_report(y_test,y_pred))\nprint('Accuracy Score',round(accuracy_score(y_test,y_pred),2))\nprint('F1 Score',round(f1_score(y_test,y_pred),2))\n\nmax_k = 40\nf1_scores = list()\nerror_rates = list()\n\nfor k in range(1,max_k):\n knn = KNeighborsClassifier(n_neighbors=k, weights='distance')\n knn = knn.fit(x_train,y_train)\n y_pred = knn.predict(x_test)\n f1 = f1_score(y_test,y_pred)\n f1_scores.append((k,round(f1,4)))\n error = 1- round(accuracy_score(y_test,y_pred),4)\n error_rates.append((k,error))\n\nf1_results = pd.DataFrame(f1_scores,columns=['K','F1 Score'])\nerror_results = pd.DataFrame(error_rates,columns=['K','Error Rate'])\n\nprint('\\n\\n')\nprint(f1_results)\n\nprint('\\n\\n')\nprint(error_results)\n","repo_name":"DhruvBandaria/PracticeCourse3","sub_path":"Lab03_B.py","file_name":"Lab03_B.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17989225885","text":"# AoC 2018 - Day 2b\n\ndef load_data():\n with open('input.txt', 'r') as infile:\n d = infile.readlines()\n return [x.strip() for x in d]\n\ndef find_match(data):\n for x in data:\n for y in data:\n count = 0\n common = []\n for diff in zip(x, y):\n if diff[0] != diff[1]:\n count += 1\n else:\n common.append(diff[0])\n if count == 1:\n print(f'{x} - {y} - commom is {\"\".join(common)}')\n \ndef main():\n data = load_data()\n find_match(data)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Azcobu/advent-of-code","sub_path":"2018/day02/aoc18-2b.py","file_name":"aoc18-2b.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12215896904","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\nfrom libs import InPlaceABNSync\n\nclass _ASPPModule(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm):\n super(_ASPPModule, self).__init__()\n self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,\n stride=1, padding=padding, dilation=dilation, bias=False)\n self.bn = BatchNorm(planes)\n self.relu = nn.ReLU()\n\n self._init_weight()\n\n def forward(self, x):\n x = self.atrous_conv(x)\n x = self.bn(x)\n\n return self.relu(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\nclass PAM_Module(nn.Module):\n \"\"\" Position attention module\"\"\"\n\n # Ref from SAGAN\n def __init__(self, in_dim):\n super(PAM_Module, self).__init__()\n self.chanel_in = in_dim\n\n self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)\n self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)\n self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x):\n \"\"\"\n inputs :\n x : input feature maps( B X C X H X W)\n returns :\n out : attention value + input feature\n attention: B X (HxW) X (HxW)\n \"\"\"\n m_batchsize, C, height, width = x.size()\n proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1)\n proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)\n energy = torch.bmm(proj_query, proj_key)\n attention = self.softmax(energy)\n proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)\n\n out = torch.bmm(proj_value, attention.permute(0, 2, 1))\n out = out.view(m_batchsize, C, height, width)\n\n out = self.gamma * out + x\n return out\n\nclass RCCAModule(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(RCCAModule, self).__init__()\n inter_channels = in_channels // 4\n self.channels = in_channels // 4\n\n self.conva = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n InPlaceABNSync(inter_channels))\n\n self.cca = PAM_Module(inter_channels)\n\n self.convb = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),\n InPlaceABNSync(inter_channels))\n\n self.bottleneck = nn.Sequential(\n nn.Conv2d(in_channels+inter_channels, out_channels, kernel_size=3, padding=1, dilation=1, bias=False),\n InPlaceABNSync(out_channels)\n )\n\n\n def forward(self, x, recurrence=1):\n output = self.conva(x)\n\n for i in range(recurrence):\n output = self.cca(output)\n output = self.convb(output)\n\n output = self.bottleneck(torch.cat([x, output], 1))\n\n return output\n\n\nclass ASPP(nn.Module):\n def __init__(self, backbone, output_stride, BatchNorm):\n super(ASPP, self).__init__()\n if backbone == 'drn':\n inplanes = 512\n elif backbone == 'mobilenet':\n inplanes = 320\n else:\n inplanes = 2048\n if output_stride == 16:\n dilations = [1, 6, 12, 18]\n elif output_stride == 8:\n dilations = [1, 12, 24, 36]\n else:\n raise NotImplementedError\n\n self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)\n self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm)\n self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm)\n self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm)\n inter_channels = 256\n\n self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),\n BatchNorm(256),\n nn.ReLU())\n\n self.context = RCCAModule(inter_channels, inter_channels)\n\n self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)\n self.bn1 = BatchNorm(256)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.5)\n\n\n self._init_weight()\n\n def forward(self, x, recurrence=1):\n x1 = self.aspp1(x)\n x2 = self.aspp2(x)\n x3 = self.aspp3(x)\n x4 = self.aspp4(x)\n\n x5 = self.global_avg_pool(x)\n\n # pytorch 0.4.0\n x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n # 0.4.1\n # x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n\n x = torch.cat((x1, x2, x3, x4, x5), dim=1)\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.dropout(x)\n\n x = self.context(x)\n\n return x\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\ndef build_aspp(backbone, output_stride, BatchNorm):\n return ASPP(backbone, output_stride, BatchNorm)\n","repo_name":"dawn5527/CS-Net","sub_path":"modeling/aspp.py","file_name":"aspp.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"23037632401","text":"import csv\nfrom modules.DataTraining import spacys_mom as spm\nimport wordninja as wj\nimport numpy as np\n\n### SPACY STUFF\n\nnlp = spm.SpacyWrapper()\n\ndef find_similar(properties, filepath):\n # Load spaCy's NLP dictionaries\n # Slice and Lemmatize properties\n properties_sl = []\n for prop in properties:\n prop_sl = split_and_lemmatize(prop)\n properties_sl.append(prop_sl)\n # Get ALL column headers from file\n headers = get_headers(filepath)\n # Slice and Lemmatize all headers\n headers_sl = []\n for header in headers:\n header_sl = split_and_lemmatize(header)\n headers_sl.append(header_sl)\n # Compare each property to each header\n classification = {}\n for index in range(len(properties)):\n classification[properties[index]] = cmp_prop_to_headers(properties[index], headers, properties_sl[index], headers_sl)\n # Populate dict and return\n results = (filepath, classification)\n return results\n\n### Comparison Functions\n\n# NOTE: Both prop_sl and headers_sl may be broken\n# into multiple English words, so they are\n# ARRAYS not STRINGS\ndef cmp_prop_to_headers(orig_prop, orig_headers, prop_sl, headers_sl):\n # Loop over each property's words\n related_headers = []\n for index in range(len(orig_headers)):\n header_sl = headers_sl[index]\n is_related = cmp_prop_to_header(prop_sl, header_sl)\n if is_related:\n related_headers.append(orig_headers[index])\n return related_headers \n\ndef cmp_prop_to_header(prop_sl, header_sl):\n means_all = []\n for word in prop_sl:\n word_spacy = nlp.process(word)\n cmp_values = []\n for header in header_sl:\n header_spacy = nlp.process(header)\n sim_value = nlp.compare(word_spacy, header_spacy)\n # If a lemmatized word from both header and property\n # are very closely related, return it regardless\n # of overall average. \n if sim_value > 0.95:\n return True\n else:\n cmp_values.append(sim_value)\n mean_hdr = np.mean(cmp_values)\n means_all.append(mean_hdr)\n mean_all = np.mean(means_all)\n # TODO: Strictness of \"relatedness\" needs tweaking. \n if mean_all > 0.8:\n return True\n else:\n return False\n\n### Util Functions\n\ndef split_and_lemmatize(input):\n split_words = slice_word(input)\n input_lemma = []\n for word in split_words:\n word_tok = nlp.process(word)\n word_lemma = lemmatize_word(word_tok)\n input_lemma.append(word_lemma)\n return input_lemma\n\n# Params: input = a spaCy token\n# Return: String\n# EX: \"reviews\" -> \"review\"\n# EX: \"thought\" -> \"think\"\ndef lemmatize_word(input):\n return input[0].lemma_\n\n# Params: input = given String\n# Return: [sub-word1, sub-word2, ...]\n# EX: \"review_date\" -> ['review', 'date']\n# EX: \"reviewernameslast\" -> ['reviewer', 'names', 'last']\ndef slice_word(input):\n return wj.split(input)\n\n# Params: file = path to given CSV file as a String\n# Return: [ header1, header2, ... ]\ndef get_headers(file):\n headers = []\n with open(file) as f:\n reader = csv.reader(f)\n headers = next(reader)\n return headers\n","repo_name":"DataPlumbers/WebAppBackEnd","sub_path":"python-flask/modules/DataTraining/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"7755753162","text":"class IndentPrinter():\n def __init__(self, indent=0):\n self.indent = indent\n\n def print(self, string):\n print(('| ' * self.indent) + string)\n\n\nprinter = IndentPrinter()\n\n\ndef expression(tokens, rbp=0):\n global curr\n printer.print('###################')\n printer.print(\"in expression()\")\n t = curr\n curr = tokens.pop(0)\n printer.print(\"t = {}\".format(t))\n printer.print(\"curr = {}\".format(curr))\n left = t.nud()\n\n printer.print(\"rbp = {}, curr.lbp = {}\".format(rbp, curr.lbp))\n while rbp < curr.lbp:\n t = curr\n curr = tokens.pop(0)\n left = t.led(left, tokens)\n printer.print(\"rbp = {}, curr.lbp = {}\".format(rbp, curr.lbp))\n\n printer.print('###################')\n printer.indent -= 1\n return left\n\nclass literal_token(object):\n def __init__(self, value):\n self.value = value\n def nud(self):\n return self.value\n\n def __str__(self):\n return self.value\n\nclass op_add_token(object):\n lbp = 10\n def led(self, left, tokens):\n printer.indent += 1\n right = expression(tokens, 10)\n return ['+', left, right]\n\n def __str__(self):\n return '+'\n\nclass op_mul_token(object):\n lbp = 20\n def led(self, left, tokens):\n printer.indent += 1\n return ['*', left, expression(tokens, 20)]\n\n def __str__(self):\n return '*'\n\nclass end_token(object):\n lbp = 0\n\ndef prep_tokens(tokens):\n new = []\n for t in tokens:\n if t == '+':\n new.append(op_add_token())\n elif t == '*':\n new.append(op_mul_token())\n else:\n new.append(literal_token(t))\n\n new.append(end_token())\n\n return new\n\n\ndef parse(tokens):\n global curr\n tokens = prep_tokens(tokens)\n curr = tokens.pop(0)\n return expression(tokens)\n\n\nprint(parse(['3', '+', '4', '*', '5']))\n","repo_name":"nham/vaughan","sub_path":"eli2.py","file_name":"eli2.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25552828008","text":"from marshmallow import fields, ValidationError\nfrom yarl import URL\n\n\nclass NoneString(fields.String):\n def _serialize(self, value, attr, obj, **kwargs):\n if value is None:\n return None\n if not isinstance(value, str):\n raise ValidationError(\"value must be a string or None\")\n if not value or not value.strip():\n return None\n return super(NoneString, self)._serialize(value, attr, obj, **kwargs)\n\n\nclass URLField(fields.String):\n def _serialize(self, value, attr, obj, **kwargs):\n if value is None:\n return None\n if not isinstance(value, (URL, str)):\n raise ValidationError(\"value must be a URL, string, or None\")\n value = str(value)\n if not value.strip():\n return None\n return super(URLField, self)._serialize(value, attr, obj, **kwargs)\n\n def _deserialize(self, value, attr, data, **kwargs):\n if not isinstance(value, (str, bytes)):\n raise self.make_error(\"invalid\")\n try:\n return URL(value)\n except Exception as error:\n raise self.make_error(\"invalid\") from error\n","repo_name":"DBeath/feedsearch-gateway","sub_path":"gateway/schema/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"1351449582","text":"from dataclasses import field\n\nimport rulez\nfrom morpcc.crud.view.edit import edit as default_edit\nfrom morpfw import request\nfrom morpfw.crud import permission as crudperm\n\nfrom ..app import App\nfrom .modelui import AttributeCollectionUI, AttributeModelUI\n\n\n@App.html(\n model=AttributeModelUI,\n name=\"edit\",\n template=\"morpcc_ttw/attribute/edit.pt\",\n permission=crudperm.Edit,\n)\ndef edit(context, request):\n return default_edit(context, request)\n\n\n@App.json(\n model=AttributeCollectionUI,\n name=\"reorder\",\n request_method=\"POST\",\n permission=crudperm.Edit,\n)\ndef reorder(context, request):\n mapping = request.json[\"mapping\"]\n collection = context.collection\n attrs = collection.search(\n rulez.field[\"entity_uuid\"] == request.GET.get(\"entity_uuid\"),\n order_by=(\"order\", \"asc\"),\n )\n attrs = list(sorted(attrs, key=lambda x: [x[\"order\"], x[\"created\"]]))\n count = 0\n field_orders = {}\n for attr in attrs:\n field_orders[attr[\"name\"]] = {\"order\": count, \"obj\": attr}\n count += 1\n new_mapping = []\n for m in mapping:\n # ord_old = field_orders[m[\"old\"]][\"order\"]\n ord_new = field_orders[m[\"new\"]][\"order\"]\n\n new_mapping.append((m[\"old\"], ord_new))\n\n for no in new_mapping:\n field_orders[no[0]][\"order\"] = no[1]\n\n for fo in field_orders.values():\n if fo[\"order\"] != fo[\"obj\"][\"order\"]:\n fo[\"obj\"].update({\"order\": fo[\"order\"]}, deserialize=False)\n\n return {}\n","repo_name":"morpframework/morpcc_ttw","sub_path":"morpcc_ttw/attribute/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27971503907","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('status', models.IntegerField(choices=[(0, 'Unpaid'), (1, 'Paid'), (2, 'Error')], default=0)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('redeemed', models.BooleanField(default=False)),\n ],\n options={\n 'verbose_name': 'Order',\n 'verbose_name_plural': 'Orders',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('subtitle', models.CharField(max_length=500, blank=True)),\n ('description', models.TextField(blank=True)),\n ('amount', models.DecimalField(decimal_places=2, max_digits=10)),\n ('currency', models.CharField(max_length=3, choices=[('CHF', 'CHF')], default='CHF')),\n ('instant_delivery', models.BooleanField(default=False)),\n ('delivery_command', models.TextField(blank=True)),\n ],\n options={\n 'verbose_name': 'Product',\n 'verbose_name_plural': 'Products',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='order',\n name='product',\n field=models.ForeignKey(to='shop.Product', related_name='orders'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='order',\n name='user',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='orders'),\n preserve_default=True,\n ),\n ]\n","repo_name":"julianwachholz/thefarland","sub_path":"apps/shop/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14160782982","text":"#Bernardo Flores\r\n#stock sale revised but not sure how this is suppose to work or\r\n#how stock gain or loss works\r\n\r\ndef main():\r\n print (\"This program computes the result of a stock transaction.\")\r\n start_balance = float(input(\"Enter your starting balance: \"))\r\n user_choice = \"Yes\"\r\n while (user_choice != \"No\"):\r\n stock_name = input(\"What is the name of the chosen stock? \")\r\n purchased_shares = float(input(\"Enter purchased shared: \"))\r\n stock_bought = float(input(\"Enter amount paid for stock per share: \"))\r\n commission = (purchased_shares*stock_bought)\r\n print(format(commission, ',f')); print(\"is deducted from your balance.\")\r\n start_balance-=commission\r\n print(\"Balance is \", (format(start_balance, ',f')))\r\n user_choice = input(\"\\nWould you like to try agan (Yes/No) ? \")\r\n print(\"Goodbye.\")\r\nmain()\r\n##needs better format specifier when as in print\r\n##ex: 20.00 <--- it print outs .00000\r\n","repo_name":"AbstractBern/SchoolPortfolio","sub_path":"Python/assignment-projects/TT16_L2_Flores_STOCKSALE.py","file_name":"TT16_L2_Flores_STOCKSALE.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8053127223","text":"r\"\"\"\n===================================================\nB108: Test for insecure usage of tmp file/directory\n===================================================\n\nSafely creating a temporary file or directory means following a number of rules\n(see the references for more details). This plugin test looks for strings\nstarting with (configurable) commonly used temporary paths, for example:\n\n - /tmp\n - /var/tmp\n - /dev/shm\n - etc\n\n**Config Options:**\n\nThis test plugin takes a similarly named config block,\n`hardcoded_tmp_directory`. The config block provides a Python list, `tmp_dirs`,\nthat lists string fragments indicating possible temporary file paths. Any\nstring starting with one of these fragments will report a MEDIUM confidence\nissue.\n\n.. code-block:: yaml\n\n hardcoded_tmp_directory:\n tmp_dirs: ['/tmp', '/var/tmp', '/dev/shm']\n\n\n:Example:\n\n.. code-block: none\n\n >> Issue: Probable insecure usage of temp file/directory.\n Severity: Medium Confidence: Medium\n Location: ./examples/hardcoded-tmp.py:1\n 1 f = open('/tmp/abc', 'w')\n 2 f.write('def')\n\n.. seealso::\n\n - https://security.openstack.org/guidelines/dg_using-temporary-files-securely.html # noqa\n\n.. versionadded:: 0.9.0\n\n\"\"\"\n\nimport bandit\nfrom bandit.core import test_properties as test\n\n\ndef gen_config(name):\n if name == 'hardcoded_tmp_directory':\n return {'tmp_dirs': ['/tmp', '/var/tmp', '/dev/shm']}\n\n\n@test.takes_config\n@test.checks('Str')\n@test.test_id('B108')\ndef hardcoded_tmp_directory(context, config):\n if config is not None and 'tmp_dirs' in config:\n tmp_dirs = config['tmp_dirs']\n else:\n tmp_dirs = ['/tmp', '/var/tmp', '/dev/shm']\n\n if any(context.string_val.startswith(s) for s in tmp_dirs):\n return bandit.Issue(\n severity=bandit.MEDIUM,\n confidence=bandit.MEDIUM,\n text=\"Probable insecure usage of temp file/directory.\"\n )\n","repo_name":"zeroSteiner/bandit-ss","sub_path":"bandit/plugins/general_hardcoded_tmp.py","file_name":"general_hardcoded_tmp.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"}
+{"seq_id":"38795020125","text":"from __future__ import absolute_import\r\n\r\nfrom django.shortcuts import render\r\nfrom utils.ajax import login_required_ajax\r\n\r\n\r\n@login_required_ajax\r\ndef openNewTab1CV(request):\r\n '''\r\n 개통업무 > 신규개통 : 단말기 컨텐츠\r\n '''\r\n return render(\r\n request,\r\n 'open/new/contents_tab1.html',\r\n {},\r\n )\r\n\r\n\r\n@login_required_ajax\r\ndef openNewTab2CV(request):\r\n '''\r\n 개통업무 > 신규개통 : 유심/중고 컨텐츠\r\n '''\r\n return render(\r\n request,\r\n 'open/new/contents_tab2.html',\r\n {},\r\n )\r\n\r\n\r\n@login_required_ajax\r\ndef openNewTab3CV(request):\r\n '''\r\n 개통업무 > 신규개통 : 홈상품 컨텐츠\r\n '''\r\n return render(\r\n request,\r\n 'open/new/contents_tab3.html',\r\n {},\r\n )\r\n","repo_name":"007babe/ntelRepo","sub_path":"ntelProject/src/open/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36527083813","text":"#!/usr/bin/python3\n\n'''\n[Jowi]\nGenera features indicando si el usuario vio/se postulo al aviso o no y cuantas veces lo vio.\n- num_vistas = Cantidad de veces que el usuario vio el aviso\n'''\n\nimport datasets.vistas as vistas\nimport datasets.postulaciones as postulaciones\n\nimport pandas as pd\nimport numpy as np\n \n\nclass VistasPorAviso:\n\n def get_name(self):\n return 'Vistas por aviso'\n\n def featurize(self, df):\n test1 = pd.merge(df[['idaviso', 'idpostulante']], vistas.df, on=['idaviso', 'idpostulante'], how='inner')\n test1['num_vistas'] = 1\n test1 = test1.groupby(['idaviso', 'idpostulante']).agg('count').reset_index()\n test1 = pd.merge(df[['idaviso', 'idpostulante']], test1, on=['idaviso', 'idpostulante'], how='left')\n test1['num_vistas'] = test1['num_vistas'].fillna(0).astype('int')\n\n return test1\n\n","repo_name":"sportelliluciano/tp2-orga-datos-1c2018","sub_path":"preprocesar_set/featurizers/vistas_por_aviso.py","file_name":"vistas_por_aviso.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30628295798","text":"\n\n##\n# Required Modules\n#------------------------------------------------------------------------------\nfrom signalhandler import Signal # base handler module\nfrom signal import SIGTERM, SIGINT # symbols for specific signals\n\n\n\n##\n# Create an object that handles terminating the application\n# upon receiving a SIGTERM or a SIGINT from an outside source\n#------------------------------------------------------------------------------\nclass TerminationSignalHandler( object ) :\n\n\n ##\n # Constructor - Defines the pointers to the Signal objects\n def __init__( self, term_func ) :\n super( TerminationSignalHandler, self ).__init__()\n self._handle = {\n SIGTERM: Signal( SIGTERM, term_func ),\n SIGINT: Signal( SIGINT, term_func )\n } # end self.signal\n","repo_name":"dmendyke/sandbox","sub_path":"stoppable_threads/termination.py","file_name":"termination.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43187994569","text":"from typing import Tuple, List, Optional\n\n# some helper\ngetter = lambda m,p : m[p[0]][p[1]]\n\nget_neighbours = lambda p: [ (p[0]-1, p[1]), (p[0]+1, p[1]), (p[0], p[1]-1), (p[0], p[1]+1) ]\n \ndef setter(m, p, v):\n m[p[0]][p[1]] = v\n\ndef calc_smallest_step_number(board :List[List[bool]], start :Tuple[int,int], end :Tuple[int,int]) -> Optional[int]:\n # Helper and Preconditions\n N,M = len(board), len(board[0])\n is_stepable= lambda p : 0 <= p[0] < N and 0 <= p[1] < M and not getter(board, p)\n assert is_stepable(start)\n assert is_stepable(end)\n\n distances = [ [None]*M for _ in range(N)]\n distances[end[0]][end[1]] = 0\n nexts = list()\n for neighbour in get_neighbours(end):\n if is_stepable(neighbour):\n nexts.append(neighbour)\n while len(nexts) > 0:\n point = nexts.pop()\n min_distance = N*M\n for neighbour in get_neighbours(point):\n if is_stepable(neighbour):\n distance = getter(distances, neighbour)\n if distance != None:\n min_distance = min(min_distance, distance + 1)\n else:\n nexts.append(neighbour)\n distances[point[0]][point[1]] = min_distance\n if point == start: break\n\n # for distance in distances: print(distance)\n return getter(distances, start)\n\n\nimport unittest\n\nclass TestCalcSmallestStepNumber(unittest.TestCase):\n\n def parse_board(self, board: str) -> List[List[int]]:\n board = board.strip()\n board = list(filter(lambda x : x in \"ft\\n\", board.strip()))\n matrix = list()\n matrix.append(list())\n row = 0\n for item in board:\n if item == \"\\n\":\n matrix.append(list())\n row += 1\n elif item == \"t\":\n matrix[row].append(True)\n elif item == \"f\":\n matrix[row].append(False)\n # check, if the board is a clumsy missformed matrix\n for row in matrix[1:]:\n assert len(matrix[0]) == len(row)\n return matrix\n\n def test_unreachable(self):\n board_str = \"\"\"\n fffff\n ftfft\n tfttf\n fffff\n \"\"\"\n \"\"\"\n 0 1 2 3 4\n 1 . 3 4 .\n . - . . -\n - - - - -\n \"\"\"\n\n board = self.parse_board(board_str)\n self.assertEqual(calc_smallest_step_number(board, (3,0), (0,0)), None)\n\n def test_example(self):\n board_str = \"\"\"\n [[f, f, f, f],\n [t, t, f, t],\n [f, f, f, f],\n [f, f, f, f]]\n \"\"\"\n \"\"\"\n 0 1 2 3\n . . 3 .\n 6 5 4 5\n 7 6 5 6\n \"\"\"\n board = self.parse_board(board_str)\n self.assertEqual(calc_smallest_step_number(board, (3,0), (0,0)), 7)\n self.assertEqual(calc_smallest_step_number(board, (0,0), (3,0)), 7)\n\n def test_long(self):\n board_str = \"\"\"\n ftffftfff\n ffftffftf\n ttttttttf\n ffftffftf\n ftffftfff\n ftttttttt\n ftffftfff\n ffftffftt\n \"\"\"\n \"\"\"\n 0 . 4 5 6 . 10 11 12\n 1 2 3 . 7 8 9 . 13\n . . . . . . . . 14\n 27 26 25 . 21 20 19 . 15\n 28 . 24 23 22 . 18 17 16\n 29 . . . . . . . .\n 30 . 34 35 36 . 40 41 42\n 31 32 33 . 37 38 39 . .\n ..........................\n \"\"\"\n board = self.parse_board(board_str)\n self.assertEqual(calc_smallest_step_number(board, (6,8), (0,0)), 42)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"kafawi/Daily-Coding-Problem","sub_path":"SOLUTIONS/p023/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43849890587","text":"\"\"\"\napp\n\"\"\"\n\nimport os\nfrom funcs import get_secret, refresh_token, set_secret\n\ndef handler(event,context):\n \"\"\"\n main function called by container\n \"\"\"\n\n token_data = get_secret(\n os.environ['AWS_SECRET_PATH_GOOGLE_TOKEN'],\n os.environ['AWS_DEFAULT_REGION']\n )\n\n creds_data = get_secret(\n os.environ['AWS_SECRET_PATH_GOOGLE_CRED'],\n os.environ['AWS_DEFAULT_REGION']\n )\n\n updated_token_data = refresh_token(\n creds_data,\n token_data\n )\n\n set_secret(\n os.environ['AWS_SECRET_PATH_GOOGLE_TOKEN'],\n updated_token_data,\n os.environ['AWS_DEFAULT_REGION']\n )\n\n return {\n 'statusCode': 201, #request succeeded and made changes\n 'body': 'success'\n }\n\n#for debugging in vscode and calling this file directly\nif __name__ == '__main__':\n handler(None,None)\n","repo_name":"ewhitesides/google_token_refresh","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3209069432","text":"class Solution:\n def minimumRecolors(self, blocks: str, k: int) -> int:\n length = len(blocks)\n ans = 999999\n if length <= k:\n return blocks.count('W')\n for i in range(length - k + 1):\n s = blocks[i: i + k]\n t = s.count(\"W\")\n if t < ans:\n ans = t\n return ans\n\nif __name__ == '__main__':\n solution = Solution()\n blocks = \"WWBBBWBBBBBWWBWWWB\"\n k=16\n res = solution.minimumRecolors(blocks, k)\n print(res)","repo_name":"foreverxujiahuan/algorithm","sub_path":"竞赛/A85/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"11829505178","text":"'''\n@Created by loka\n@Date : 2020/01/20\n'''\n\nimport unittest\nimport random\nfrom base.HTMLTestReportCN import HTMLTestRunner\nfrom base.httpRequest import HttpRequest\nfrom data_config import common_config\nfrom master_api.system_management import PortalManagement\nfrom master_api.account_login import User\nfrom data_config.system_config import systemSetting\n\n\nclass PortalManagementBaseTest(unittest.TestCase):\n \"\"\" 網站版面 - 相關 API 調用狀態\"\"\"\n\n def setUp(self):\n self.config = systemSetting() # 系統參數\n self.__http = HttpRequest()\n self.user = User(self.__http)\n self.siteParameter = PortalManagement.PortalManagement(self.__http)\n self.PortalManagement = PortalManagement(self.__http)\n self.user.login()\n\n # 登出\n def tearDown(self):\n self.user.logout()\n\n # 取站台ID\n def getWebsiteId(self):\n response_data = self.PortalManagement.getWebsiteList({})\n for i in range(len(response_data[1]['ReturnObject'])):\n if self.config.siteName_config() == response_data[1]['ReturnObject'][i]['Name']:\n Id = response_data[1]['ReturnObject'][i]['Id']\n return Id\n\n def test_GetProductSetting_relatedApi_status_01(self):\n \"\"\" 網站版面 - 取得網站版面獲取產品設置 電腦&直向&橫向 狀態\"\"\"\n ID = self.getWebsiteId()\n deviceType = [1, 2, 3]\n for x in deviceType:\n data = {\"device\": x, \"websiteId\": ID}\n response_data = self.siteParameter.GetProductSetting(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n def test_SaveProductSetting_relatedApi_status_02(self):\n \"\"\" 網站版面 - 修改標題名稱&前台顯示站名 電腦&直向&橫向 狀態\"\"\"\n ID = self.getWebsiteId()\n menu = random.randint(1, 50)\n deviceType = [1, 2, 3]\n for i in deviceType:\n data = {\n \"websiteId\": ID,\n \"device\": i,\n \"titleName\": \"Stage 测试站\" + str(menu),\n \"portalSiteName\": \"Stage 测试站\" + str(menu)\n }\n response_data = self.siteParameter.SaveProductSetting(data)\n status_code = response_data[0]\n self.assertEqual(status_code, common_config.Status_Code)\n\n # def test_SaveProductSetting_relatedApi_status_03(self):\n # \"\"\" 網站版面 - 修改標題ico 電腦&直向&橫向 狀態\"\"\"\n # # 我卡點在處...\n # ID = self.getWebsiteId()\n # menu = random.randint(1, 50)\n # deviceType = [1, 2, 3]\n # for i in deviceType:\n # data = {\n # \"websiteId\": ID,\n # \"device\": i,\n # \"titleName\": \"Stage 测试站\" + str(menu),\n # \"portalSiteName\": \"Stage 测试站\" + str(menu)\n # }\n # response_data = self.siteParameter.SaveProductSetting(data)\n # status_code = response_data[0]\n # self.assertEqual(status_code, common_config.Status_Code)\n\n\nif __name__ == '__main__':\n unittest.main(testRunner = HTMLTestRunner())\n","repo_name":"eos1209/auto_test","sub_path":"case/test_api/test_BaseCase/test_Master_Base_API_PortalPortalManagement.py","file_name":"test_Master_Base_API_PortalPortalManagement.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42621276257","text":"from ROOT import *\nimport ROOT as root\nimport numpy as np\nfrom array import array\n\ngStyle.SetOptStat(0)\n\n\ndef is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\nfnameTxT = \"/Users/elenag/Desktop/LArIATTrueCrossSec/KaonPlusG4.txt\"\n\nkineticEnergy = []\ncrossSec = []\nzero = []\n\n\ntitle = \"\"\nwith open(fnameTxT) as f:\n for fLine in f.readlines():\n w = fLine.split()\n if is_number(w[0]):\n runIn = int(w[0])\n ke = float(w[1])\n xstot = float(w[4])\n kineticEnergy.append(ke)\n crossSec.append(xstot)\n zero.append(0.)\n else:\n if \"for\" not in fLine: \n continue\n title = fLine[9:]\n\ng4x = array('f', kineticEnergy )\ng4y = array('f', crossSec)\ng4exl = array('f', zero)\ng4exr = array('f', zero)\n\n\nnPoints=len(g4x)\ngr = TGraphErrors ( nPoints , g4x , g4y , g4exl, g4exr )\ngr.SetTitle(title+\"; Kinetic Energy [MeV]; Cross Section [barn]\")\ngr . GetXaxis().SetRangeUser(0,1000)\ngr . GetYaxis().SetRangeUser(0,2.)\ngr . SetLineWidth(2) ;\ngr . SetLineColor(kGreen-2) ;\ngr . SetFillColor(0)\n\n#gr . Draw ( \"APL\" ) ;\n\n\nlariatHead = TLatex();\nlariatHead.SetNDC();\nlariatHead.SetTextFont(62);\nlariatHead.SetTextSize(0.04);\nlariatHead.SetTextAlign(40);\n\nMCFile = root.TFile(\"Final.root\")\nstat = MCFile.Get(\"XS_StatOnly\")\nsys = MCFile.Get(\"grXS\")\n\nMCFileTrue = root.TFile(\"../EffCorrection/Eff_Correction.root\")\nXSTrue45 = MCFileTrue.Get(\"XS45Deg\")\nfor i in xrange(16,40):\n XSTrue45.SetBinContent(i,0)\n XSTrue45.SetBinError(i,0)\nXSTrue45.SetLineColor(kGreen-2)\n\nsys .SetLineWidth(2)\nstat .SetMarkerStyle(22)\nstat .SetMarkerSize(1.0)\nsys.SetTitle(\"; Kinetic Energy [MeV]; #sigma^{K}_{TOT} per 50 MeV [barn]\")\nsys.GetXaxis().SetRangeUser(0,800.)\nsys.GetYaxis().SetRangeUser(0,1.5)\nsys.SetLineColor(kBlack)\n\ncXS = TCanvas(\"cXS\",\"cXS\",600,600)\ncXS.SetGrid()\nsys.Draw(\"AP\")\n#gr . Draw ( \"PL\" ) ;\nstat.Draw(\"e1same\")\nXSTrue45.Draw(\"histosame][\")\nlariatHead.DrawLatex(0.6,0.90,\"LArIAT Preliminary\");\nlegendXS = TLegend(.30,.68,.86,.86);\nlegendXS.AddEntry(XSTrue45,\"FTFP_BERT Geant4 Prediction Angle > 5.0 Deg\")\nlegendXS.AddEntry(stat,\"Kaon Data (Stat. #oplus Syst Unc.)\");\n#legendXS.AddEntry(sys ,\"Kaon Data Stat and Sys\");\nlegendXS.Draw(\"same\")\ncXS.Update()\n#cXS.SaveAs(\"TheFinalMoneyPlotK.pdf\")\n\n\n\n############################################################\ncXS60A = TCanvas(\"cXS60A\",\"cXS60A\",600,800)\np1 = TPad(\"pad1\", \"The pad 80% of the height\",0.0,0.2,1.0,1.0,21) \np2 = TPad(\"pad2\", \"The pad 80% of the height\",0.0,0.0,1.0,0.2,22)\np1.Draw()\np1.SetFillColor(0)\np2.SetFillColor(0)\np1.SetLeftMargin(0.12)\n#p2.SetLeftMargin(0.12)\np2.Draw() \np1.cd()\np1.SetGrid()\np2.SetGrid()\nsys.Draw(\"AP\")\n#gr . Draw ( \"PL\" ) ;\nXSTrue45.Draw(\"][histosame\")\nstat.SetLineColor(1)\nstat.SetFillColor(0)\nstat.Draw(\"e1same\")\nlariatHead.DrawLatex(0.6,0.90,\"LArIAT Preliminary\");\nlegendXS.Draw(\"same\")\n\n\ny_XS , eyl_XS , eyh_XS = array( 'd' ), array( 'd' ), array( 'd' )\nx_XS , exl_XS , exh_XS = array( 'd' ), array( 'd' ), array( 'd' )\n\nresidual = stat.Clone(\"res\")\nresidualTGr = sys.Clone()\nresidual.SetTitle(\";Kinetic Energy [MeV]; #sigma_{MC} - #sigma^{#pi-}\")\nresidual.GetYaxis().SetRangeUser(-1,1)\nresidual.GetXaxis().SetRangeUser(0,800)\n\nresidual.GetXaxis().SetTitleOffset(.5);\nresidual.GetXaxis().SetTitleSize(.10);\nresidual.GetYaxis().SetTitleOffset(.5);\nresidual.GetYaxis().SetTitleSize(.10);\n\nxRes = sys .GetX()\nerrYHigh = sys .GetEYhigh()\nerrYLow = sys .GetEYlow()\n\nfor i in xrange(residual.GetSize()-2):\n if stat.GetBinContent(i):\n residual.SetBinContent(i, (stat.GetBinContent(i) - XSTrue45.GetBinContent(i))/stat.GetBinContent(i) )\n else:\n residual.SetBinContent(i, -100. )\n\n\n x_XS .append(xRes[i])\n exl_XS .append(25.)\n exh_XS .append(25.)\n \n if i < 5 or i > 15:\n y_XS .append(-100.)\n elif stat.GetBinContent(i):\n y_XS .append( (stat.GetBinContent(i) - XSTrue45.GetBinContent(i))/ stat.GetBinContent(i) )\n else:\n y_XS .append( -100.)\n eyh_XS .append(errYHigh[i])\n eyl_XS .append(errYLow[i])\n \n\ngrRes_Combo = TGraphAsymmErrors(residual.GetSize()-2,x_XS, y_XS, exl_XS, exh_XS, eyl_XS, eyh_XS)\n\ngrRes_Combo.SetTitle(\";Kinetic Energy [MeV]; #frac{#sigma^{K}_{TOT} - #sigma_{MC}}{#sigma^{K}_{TOT}}\")\ngrRes_Combo.GetYaxis().SetRangeUser(-2,2)\ngrRes_Combo.GetXaxis().SetRangeUser(0,800)\n\ngrRes_Combo.GetXaxis().SetTitleOffset(.5);\ngrRes_Combo.GetXaxis().SetTitleSize(.13);\ngrRes_Combo.GetYaxis().SetTitleOffset(.5);\ngrRes_Combo.GetYaxis().SetTitleSize(.13);\n\np2.cd()\n#residual.Draw()\ngrRes_Combo.Draw(\"AP\")\n\ncXS60A.Update()\ncXS60A.SaveAs(\"TheRealMoneyPlot.pdf\")\n\n\n\n\n\nraw_input()\n\n","repo_name":"ElenaGramellini/QuickDumpster2","sub_path":"ErrorPropagationInXS/Kaons/StatAndSys/FinalRead_Conclusions.py","file_name":"FinalRead_Conclusions.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12508813635","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport urllib.request\nimport os\nimport time\nimport datetime\n\nif not os.path.exists(\"html_files\"):\n\tos.mkdir(\"html_files\")\n\nfor i in range(24):\n\tcurrent_time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')\n\tprint(str(i) + \": \" + current_time_stamp)\n\tf = open(\"html_files/coinmarketcap\" + current_time_stamp + \".html\", \"wb\")\n\tresponse = urllib.request.urlopen('https://coinmarketcap.com/all/views/all/')\n\thtml = response.read()\n\tf.write(html)\n\tf.close()\n\ttime.sleep(7200)\n \n#possible to add a ton of different variations to make website think you are human (won't get banned)","repo_name":"jpoole5/ECON498_ps1","sub_path":"HW1_Request.py","file_name":"HW1_Request.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"69837126914","text":"\"\"\"\r\nA diode or capacitor structure.\r\n\r\nUsed for testing any number of electrical characteristics.\r\n\"\"\"\r\n\r\nimport pya\r\nimport math\r\n\r\nimport helpers\r\n\r\nclass diode(pya.PCellDeclarationHelper):\r\n\r\n def __init__(self):\r\n # Initialize the super class\r\n super(diode, self).__init__()\r\n # declare the parameters\r\n self.param(\"active\", self.TypeLayer, \"Active Region Layer\", default = pya.LayerInfo(1, 0))\r\n self.param(\"contact\", self.TypeLayer, \"Contact Etch Layer\", default = pya.LayerInfo(3, 0))\r\n self.param(\"metal\", self.TypeLayer, \"Metal Layer\", default = pya.LayerInfo(4, 0))\r\n self.param(\"p_metal\", self.TypeLayer, \"P Metal Layer\", default = pya.LayerInfo(5, 0))\r\n self.param(\"L\", self.TypeDouble, \"Length\", default = 50)\r\n self.param(\"alignment\", self.TypeDouble, \"Alignment Accuracy\", default = 1)\r\n self.param(\"contact_size\", self.TypeDouble, \"Contact Size\", default = 2)\r\n self.param(\"pad_w\", self.TypeDouble, \"Pad Width\", default = 150)\r\n self.param(\"pad_h\", self.TypeDouble, \"Pad Length\", default = 100)\r\n self.param(\"pad_dx\", self.TypeDouble, \"Pad X Spacing\", default = 150)\r\n self.param(\"diode\", self.TypeBoolean, \"Diode?\", default=True)\r\n\r\n self.param(\"disp_L\", self.TypeBoolean, \"Display Size?\", default=True)\r\n self.param(\"text_h\", self.TypeDouble, \"Text Height\", default = 20)\r\n\r\n\r\n def display_text_impl(self):\r\n part_str = 'Diode' if self.diode else 'Cap'\r\n return f'{part_str} L={self.L}'\r\n \r\n def coerce_parameters_impl(self):\r\n pass\r\n\r\n def produce_impl(self):\r\n dbu = self.layout.dbu\r\n L = self.L / dbu\r\n alignment = self.alignment / dbu\r\n contact_size = self.contact_size / dbu\r\n pad_w = self.pad_w / dbu\r\n pad_h = self.pad_h / dbu\r\n pad_dx = self.pad_dx / dbu\r\n offset = 4 * alignment + contact_size\r\n\r\n self.cell.shapes(self.active_layer).insert(pya.Box(\r\n *helpers.center_size_to_points(0, 0, L, L)))\r\n\r\n num_contacts = int((L - 3 * alignment) / (contact_size + 2 * alignment))\r\n p_contact_pos = L / 2 + 3 * offset / 2\r\n for ii in range(num_contacts):\r\n contact_x = - L / 2 + offset / 2 + ii * (contact_size + 2 * alignment)\r\n self.cell.shapes(self.contact_layer).insert(pya.Box(\r\n *helpers.center_size_to_points(contact_x, p_contact_pos, contact_size, contact_size)))\r\n self.cell.shapes(self.contact_layer).insert(pya.Box(\r\n *helpers.center_size_to_points(contact_x, -p_contact_pos, contact_size, contact_size)))\r\n self.cell.shapes(self.contact_layer).insert(pya.Box(\r\n *helpers.center_size_to_points(-p_contact_pos, contact_x, contact_size, contact_size)))\r\n if self.diode or self.metal_layer != self.active_layer:\r\n for jj in range(num_contacts):\r\n contact_y = L / 2 - offset / 2 - jj * (contact_size + 2 * alignment)\r\n self.cell.shapes(self.contact_layer).insert(pya.Box(\r\n *helpers.center_size_to_points(contact_x, contact_y, contact_size, contact_size)))\r\n\r\n pad_x = (pad_w + pad_dx) / 2\r\n self.cell.shapes(self.metal_layer).insert(pya.Box(*helpers.center_size_to_points(\r\n pad_x, 0, pad_w, pad_h))) \r\n self.cell.shapes(self.p_metal_layer).insert(pya.Box(*helpers.center_size_to_points(\r\n - pad_x, 0, pad_w, pad_h))) \r\n\r\n self.cell.shapes(self.metal_layer).insert(pya.Box(\r\n - L / 2, - L / 2, pad_dx / 2, L / 2))\r\n\r\n self.cell.shapes(self.p_metal_layer).insert(pya.Box(\r\n - p_contact_pos - offset / 2, p_contact_pos + offset / 2,\r\n - p_contact_pos + offset / 2, - p_contact_pos - offset / 2))\r\n self.cell.shapes(self.p_metal_layer).insert(pya.Box(\r\n - p_contact_pos + offset / 2, p_contact_pos - offset / 2,\r\n L / 2, p_contact_pos + offset / 2))\r\n self.cell.shapes(self.p_metal_layer).insert(pya.Box(\r\n - p_contact_pos + offset / 2, - p_contact_pos - offset / 2,\r\n L / 2, - p_contact_pos + offset / 2))\r\n\r\n self.cell.shapes(self.p_metal_layer).insert(pya.Box(\r\n - pad_dx / 2, - L / 2, - p_contact_pos - offset / 2, L / 2))\r\n\r\n # Display text with relevant parameters \r\n if self.disp_L:\r\n # Generate klayout region containing text\r\n # This can only generate with lower left at (0, 0)\r\n text_generator = pya.TextGenerator.default_generator()\r\n # default height is .7; third argument rescales to desired size\r\n text = text_generator.text(f'L={self.L:g}', self.layout.dbu, self.text_h / .7)\r\n\r\n # Adjust position of region\r\n bbox = text.bbox()\r\n text_len = (bbox.right - bbox.left)\r\n text_x = - text_len / 2\r\n text_y = max(p_contact_pos + offset / 2, pad_h / 2) + offset\r\n text.move(text_x, text_y)\r\n\r\n # Add region to metal layer\r\n self.cell.shapes(self.metal_layer).insert (text)","repo_name":"ConnorCremers/EE312_Klayout_Pcells","sub_path":"diode.py","file_name":"diode.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"32584286386","text":"class PowerCalculator:\n def power(self, x, n):\n # Base case: x^0 is always 1\n if n == 0:\n return 1\n \n # Recursive case for positive n\n if n > 0:\n return x * self.power(x, n - 1)\n # Recursive case for negative n\n elif n < 0:\n return 1 / (x * self.power(x, abs(n) - 1))\n\n# Example usage:\ntry:\n base = float(input(\"Enter the base (x): \"))\n exponent = int(input(\"Enter the exponent (n): \"))\n\n calculator = PowerCalculator()\n result = calculator.power(base, exponent)\n\n print(f\"{base}^{exponent} is: {result}\")\nexcept ValueError as e:\n print(f\"Error: {e}\")\n","repo_name":"harishccbp/Python_Assignment","sub_path":"pow(x,n)_class.py","file_name":"pow(x,n)_class.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32786569007","text":"# Cody Lynch\r\n# 1954220\r\n\r\n# Assigns first set of values values\r\na = int(input())\r\nb = int(input())\r\nc = int(input())\r\n\r\n\r\n# Assigns second set of values\r\nd = int(input())\r\ne = int(input())\r\nf = int(input())\r\n\r\n# Creates equations\r\ny = (c - (a * f / d)) / (b - (e * a / d))\r\nx = ((c / a) - (b * y / a))\r\n\r\n# Sets parameters and gives rounded output\r\nif -10 > round(x) or 10 < round(x):\r\n print(\"There is no solution\")\r\nelif -10 > round(y) or 10 < round(y):\r\n print(\"No solution\")\r\nelse:\r\n print(round(x), (round(y)))\r\n","repo_name":"ca-lynch/CIS2348","sub_path":"Homework2/ZyLab_6_22.py","file_name":"ZyLab_6_22.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40923891572","text":"from __future__ import annotations\n\nfrom transaction.Transaction import Transaction\nfrom transaction.Accusation import Accusation\nfrom identity import ID\nfrom config import verdict_config\nfrom utils import compare_signature, sign, random_word, import_key\nfrom Crypto.PublicKey.RSA import RsaKey\nfrom typing import Optional\n\nimport random\nimport datetime\n\n\nSENTECE_CHAR_LIMIT = verdict_config[\"sentence_char_limit\"]\nDESCRIPTION_CHAR_LIMIT = verdict_config[\"description_char_limit\"]\n\n\nclass Verdict(Transaction):\n\n def __init__(self, sender: ID, accusation: Accusation, sentence: str,\n description: str, signature: Optional[str] = None):\n\n if not isinstance(sender, ID):\n raise TypeError(\"\\\"sender\\\" must be of type ID\")\n elif not isinstance(accusation, Accusation):\n raise TypeError(\"\\\"accusation\\\" must be of type Accusation\")\n elif not isinstance(sentence, str):\n raise TypeError(\"\\\"sentence\\\" must be of type str\")\n elif not isinstance(description, str):\n raise TypeError(\"\\\"description\\\" must be of type str\")\n elif not (signature is None or isinstance(signature, str)):\n raise TypeError(\"\\\"signature\\\" must be of type str\")\n\n self.__sender = sender\n self.__accusation = accusation\n self.__sentence = sentence\n self.__description = description\n self.__signature = signature\n\n def is_valid(self):\n if self.__signature is None:\n print(\"Invalid Appeal: Unsigned transaction\")\n return False\n\n if self.__sender.is_valid() is False:\n print(\"Invalid Verdict: Sender's ID is not valid\")\n return False\n\n if self.__accusation.is_valid() is False:\n print(\"Invalid Verdict: Accusation is not valid\")\n return False\n\n if compare_signature(self.__sender.to_dict()[\"public_key\"], self.__signature,\n self.get_content()) is False:\n print(\"Invalid Verdict: Signature doesn't match public key\")\n return False\n\n if len(self.__sentence) == 0:\n print(\"Invalid Verdict: The sentence must have at least one character\")\n return False\n\n if len(self.__sentence) > SENTECE_CHAR_LIMIT:\n print(\"Invalid Verdict: The sentence surpassed the \" +\n f\"characters limit of {SENTECE_CHAR_LIMIT}\")\n return False\n\n if len(self.__description) > DESCRIPTION_CHAR_LIMIT:\n print(\"Invalid Verdict: The description surpassed the \" +\n f\"characters limit of {DESCRIPTION_CHAR_LIMIT}\")\n return False\n\n return True\n\n def to_dict(self) -> dict:\n \"\"\"Returns 'Transaction' content on a dictionary format\"\"\"\n\n return {\n \"sender\": self.__sender.to_dict(),\n \"accusation\": self.__accusation.to_dict(),\n \"sentence\": self.__sentence,\n \"description\": self.__description,\n \"signature\": self.__signature\n }\n\n def get_content(self) -> dict:\n \"\"\"Returns everything except the signature and the hash on a dictionary format\"\"\"\n\n return {\n \"sender\": self.__sender.to_dict(),\n \"accusation\": self.__accusation.to_dict(),\n \"sentence\": self.__sentence,\n \"description\": self.__description\n }\n\n def sign(self, privkey: RsaKey) -> None:\n \"\"\"Adds a signature to the transaction based on it's content\"\"\"\n\n self.__signature = sign(privkey, self.get_content())\n \n def __eq__(self, other):\n return self.to_dict() == other.to_dict()\n\n @staticmethod\n def import_dict(transaction: dict) -> Optional[Verdict]:\n keys = [\"sender\", \"accusation\", \"sentence\", \"description\", \"signature\"]\n if any([not key in keys for key in transaction.keys()]):\n print(\"Invalid transaction: Keys missing\")\n return None\n\n try:\n sender = ID(**transaction[\"sender\"])\n except TypeError:\n print(\"Invalid transaction: Invalid sender ID\")\n return None\n\n accusation = Accusation.import_dict(transaction[\"accusation\"])\n\n return Verdict(sender, accusation, transaction[\"sentence\"],\n transaction[\"description\"], transaction[\"signature\"])\n\n @staticmethod\n def get_random(valid: bool = True) -> dict:\n \"\"\"Returns a random Verdict with it's corresponding private key\"\"\"\n\n id_info = ID.get_random(valid=valid)\n key, userid = id_info[\"private_key\"], id_info[\"id\"]\n accusation = Accusation.get_random(valid=valid)[\"accusation\"]\n\n verdict = Verdict(\n userid,\n accusation,\n random_word(random.randint(1, SENTECE_CHAR_LIMIT)),\n random_word(random.randint(1, DESCRIPTION_CHAR_LIMIT))\n )\n\n verdict.sign(import_key(key))\n\n return {\n \"private_key\": key,\n \"verdict\": verdict\n }\n","repo_name":"mateusap1/athena-old","sub_path":"model/transaction/Verdict.py","file_name":"Verdict.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"7560394478","text":"BASE = 16\n\n\ndef converts_numbers_by_base(number: int, base: int) -> str:\n \"\"\"Преобразует число в нужную систему счисления\"\"\"\n number_str = '0123456789ABCDEF'\n if number < base:\n return number_str[number]\n \n return converts_numbers_by_base(number // base, base) + number_str[number % base]\n\n\ndef input_number() -> int:\n try:\n number = int(input('Введите число для перевода в систему счисления:'))\n except ValueError:\n return input_number()\n return number\n\n\nif __name__ == '__main__':\n \n number = input_number()\n print(f'Полученый результат в системе счисления {BASE} равен:', converts_numbers_by_base(number, BASE))\n","repo_name":"jshuckbot/python_basics","sub_path":"lesson_2/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29039670634","text":"from toque import utils\r\nfrom . import *\r\nimport time\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\ndef AST(filename, acc=True, tmin=12,method=2,tone='default',transposed=0):\r\n # display current file\r\n print ('transcribing guitar %s ...') % filename\r\n # time tic\r\n startTime = time.time()\r\n # load audio file\r\n audio = loadWaveAudio(filename)\r\n\r\n ############################# CHANNEL SELECTION #################################\r\n\r\n if audio['numChannels'] == 2:\r\n print('channel selection')\r\n channel = algoChannelSelection(audio['left'],audio['right'])\r\n if channel == 0:\r\n samples = audio['left']\r\n else:\r\n samples = audio['right']\r\n else:\r\n samples = audio['left']\r\n\r\n ############################ VOCAL MELODY EXTRACTION ##############################\r\n\r\n print('vocal melody extraction')\r\n if acc:\r\n # predominant melody extraction\r\n f0 = algoF0Acc(samples)\r\n\r\n else:\r\n # monophonic melody extraction\r\n f0 = algoF0Solo(samples)\r\n\r\n ########################## CONTOUR FILTERING ####################################\r\n if acc:\r\n print('contour filtering')\r\n f0g = algoContourFiltering(f0,samples)\r\n ########################### GUITAR EXTRACTION #######################################\r\n if acc:\r\n if audio['numChannels'] == 2:\r\n print('Falseta Extraction')\r\n audioguitL, audioguitR, timefalsetas = algoGuitarExtraction(f0g,tmin, audio['left'],audio['right'])\r\n\r\n #falsetas .wav's exportation on each channel independently\r\n\r\n namewav = filename[0:len(filename) - 4] + 'L_falsetas.wav'\r\n scipy.io.wavfile.write(namewav, 44100, audioguitL)\r\n namewav = filename[0:len(filename) - 4] + 'R_falsetas.wav'\r\n scipy.io.wavfile.write(namewav, 44100, audioguitR)\r\n\r\n #txt file with the time corresponding to each falseta (in sec) : [start1,end1,start2,end2,...]\r\n\r\n name= filename[0:len(filename) -4]+'_timefalsetas.txt'\r\n np.savetxt(name,timefalsetas,fmt='%.1d')\r\n\r\n # Stereo to mono (mean of the 2 channels to work better with the transcription)\r\n gLen = min([len(audioguitL), len(audioguitR)])\r\n audioguitL = audioguitL[0:gLen]\r\n audioguitR = audioguitR[0:gLen]\r\n\r\n monoguit = np.array(audioguitR) + np.array(audioguitL)\r\n monoguit=monoguit/2\r\n\r\n wavmonofile = 'monoguit.wav'\r\n scipy.io.wavfile.write(wavmonofile, 44100, monoguit)\r\n\r\n if audio['numChannels'] == 1:\r\n audioguit, timefalsetas = algoGuitarExtractionMono(f0g, samples, tmin)\r\n\r\n #falsetas .wav's exportation\r\n namewav = filename[0:len(filename) - 4] + 'M_falsetas.wav'\r\n scipy.io.wavfile.write(namewav, 44100, audioguit)\r\n\r\n wavmonofile = 'monoguit.wav'\r\n scipy.io.wavfile.write(wavmonofile, 44100, audioguit)\r\n monoguit=audioguit\r\n\r\n # txt file with the time corresponding to each falseta (in sec) : [start1,end1,start2,end2,...]\r\n name= filename[0:len(filename) -4]+'_timefalsetas.txt'\r\n np.savetxt(name,timefalsetas,fmt='%.1d')\r\n\r\n print('Falseta extraction done')\r\n\r\n\r\n ############################## TRANSCRIPTION #################################\r\n # #\r\n # monoguit= samples of falseta file #\r\n # wavmonofile= audio file 'monoguit.wav' #\r\n # #\r\n ###################################################################################\r\n\r\n ############################## GUITAR MELODY EXTRACTION ##############################\r\n\r\n print('Guitar melody extraction')\r\n if not acc:\r\n wavmonofile=filename\r\n monoguit=samples\r\n\r\n if acc:\r\n # Klapuri Multipitch tracker (ESSENTIA)\r\n guitF0 = klapuri(monoguit)\r\n\r\n else:\r\n # Klapuri Pitch tracker (ESSENTIA)\r\n guitF0 = klapuriMono(monoguit)\r\n\r\n ####################### ONSET AND OFFSET DETECTION // NOTE SEGMENTATION ################\r\n\r\n print('Onset and offset detection')\r\n\r\n _onsetsflux,_onsetscomplex = algoOnsetDetection(wavmonofile)\r\n\r\n onsets, offsets, onsetsf0, offsetsf0,segmentsMG = Segmentation(_onsetsflux,monoguit)\r\n\r\n ################################ PITCH ESTIMATION ####################################\r\n\r\n print('Pitch estimation')\r\n notes,MyMIDI =algoPitchSegments(guitF0, onsetsf0, offsetsf0, monoguit, segmentsMG,filename, acc,method)\r\n\r\n ################################ POST-PROCESSING ####################################\r\n\r\n print('note post-processing')\r\n algoPostProcessTranscription(notes,tone,transposed)\r\n\r\n ########################### WRITE CSV FILE AND MIDI FILE ############################\r\n\r\n writeToCsv(notes,filename[0:len(filename)-3]+'notes.csv')\r\n\r\n namemidi = filename[0:len(filename) - 4] + '_MIDI.mid'\r\n with open(namemidi, \"wb\") as output_file:\r\n MyMIDI.writeFile(output_file)\r\n\r\n #display success & elapsed time\r\n\r\n print ('Done!')\r\n print('Elapsed time: %f seconds') % (time.time()-startTime)\r\n\r\n return\r\n\r\n\r\n\r\ndef transcribeguit(filename, acc = True, recursive = False, tmin=6, method=2,tone='default',transposed=0):\r\n '''\r\n TOQUE: Automatic Note-Level Transcription of Flamenco Guitar.\r\n\r\n The algorithm creates a .csv file containing the estimated note events corresponding to the\r\n guitar falseta melody, where each row corresponds to a note event as follows:\r\n\r\n < note onset [seconds] >, < note duration [seconds] >, < MIDI pitch value >;\r\n\r\n Input is a .wav audio file with a sample rate of 44.1kHz and a bit depth of 16 Bits. Otherwise\r\n an error is raised.\r\n\r\n If an f0 file is provided, the filename should be identical to the audio file, i.e. for test.wav,\r\n a file named test.csv should be located in the same folder. The required format matches the output\r\n of sonic visualizer (www.sonicvisualizer.org) and sonic annotator (http://www.vamp-plugins.org/sonic-annotator/):\r\n The first column contains the time instants in seconds and the second column holds the corresponding\r\n pitch values in Hz. Zero or negative pitch values indicate unvoiced frames. Hop size is restricted to\r\n 128 samples for a sample rate of 44.1 kHz.\r\n\r\n In recursive mode, the algorithm transcribes all .wav files in the provided folder path.\r\n\r\n For accompanied recordings (i.e. vocals + guitar), an additional contour filtering stage is\r\n applied. In this case, set acc = True.\r\n\r\n If you use this code for research purposes, please cite [1].\r\n\r\n :param filename: path to the input file or folder.\r\n :param acc: True if accompaniment is expected, False for a cappella recordings.\r\n :param f0_file: True if a .csv file containing the fundamental frequency is provided.\r\n :param recursive: True for folder recursion.\r\n :return: NONE - .notes.csv written to the same folder\r\n '''\r\n\r\n # transcribe a single file\r\n if not recursive:\r\n # sanity check\r\n if not os.path.isfile(filename):\r\n print (\"ERROR: file not found!\")\r\n return\r\n # transcription\r\n AST(filename, acc, tmin,method,tone,transposed)\r\n\r\n # recursive mode\r\n else:\r\n # sanity check\r\n if not os.path.isdir(filename):\r\n print (\"ERROR: folder not found!\")\r\n return\r\n # get list of all wav files\r\n files = []\r\n for file in os.listdir(filename):\r\n if file.endswith(\".wav\"):\r\n if not file.endswith(\"falsetas.wav\"):\r\n files.append(file)\r\n if not filename.endswith('/'):\r\n filename = filename + '/'\r\n for file in files:\r\n AST(filename+file, acc, tmin,method,tone,transposed)\r\n return\r\n\r\n\r\n","repo_name":"SoniaLuque/PyToque","sub_path":"toque/transcribeguit.py","file_name":"transcribeguit.py","file_ext":"py","file_size_in_byte":8154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26855073273","text":"# coding: UTF-8\n# Sample script for reading a single port using Receiver class.\n#Author - Yatharth Bhasin (github → yatharthb97)\n\nimport time\nimport os\nfrom receiver import Receiver\nparent_path = \"./Test Runs/\"\n\n\ndef try_directory(session_name):\n session_name = session_name.replace(' ', '_')\n path = os.path.join(parent_path, session_name)\n\n try: \n os.mkdir(path)\n return path\n except OSError as error: \n print(error)\n return \"\"\n\ndef Events_Acq(Unique_Session_Name, ports, receive_fn, Events):\n\n #Make a directory with the session name and use that to store\n directory = try_directory(Unique_Session_Name)\n if directory == \"\":\n return\n\n #Print Info\n print(f\" • Event Acquisition Session : {Unique_Session_Name} | Events to Acquire: {Events}\")\n\n #ports = PORTS #Make a copy\n reserve_ports = []\n\n #Set file and max counter values\n for port in ports:\n filename = os.path.join(directory, port.Name + \".dat\")\n port.to_file(filename)\n port.EventCounter.set_down_counter(Events);\n port.EventsList.append(port.EventCounter.val());\n\n #Open Ports\n for port in ports:\n port.open()\n\n #Check if the ports are open\n open_status = sum([port.is_open() for port in ports])\n\n #If ports are open → Read sequentially from each port\n if(open_status == len(ports)):\n while len(ports) > 0:\n for index, port in enumerate(ports):\n if port.EventCounter.val() > 0:\n receive_fn(port)\n else:\n _port_ = ports.pop(index)\n reserve_ports.append(_port_)\n else:\n print(\" • ERROR > Some ports are not open!\")\n\n \n #Check for sedidual ports\n if(len(ports) > 0):\n print(\" • ERROR > Port list is not empty before exit.\")\n\n #Close ports\n for port in reserve_ports:\n port.close()\n\n #Print status of ports\n for port in reserve_ports:\n port.status()\n\n return reserve_ports\n\n\ndef Time_Acq(Unique_Session_Name, ports, receive_fn, Time_Acquire_ms):\n\n #Make a directory with the session name and use that to store\n directory = try_directory(Unique_Session_Name)\n \n if directory == \"\":\n return\n\n print(f\" • Data Aquisition Session: {Unique_Session_Name} | Acquiring Data For : {Time_Acquire_ms} ms\")\n \n Time_Acquire_ns = Time_Acquire_ms * 1e-6\n Start_time = time.time_ns()\n print(f\"\\t• Started at Epoch Time:{Start_time * 1e-6} ms.\")\n\n #Set file and max counter values\n for port in ports:\n filename = os.path.join(directory, port.Name + \".dat\")\n port.to_file(filename)\n port.set_up_counter(0);\n\n #Open Ports\n for port in ports:\n port.open()\n\n #Check if the ports are open\n open_status = sum([port.is_open() for port in ports])\n\n #If ports are open → Read sequentially from each port\n if(open_status == len(ports)):\n while (time.time_ns() - Init_time >= Time_Acquire_ns) > 0:\n for index, port in enumerate(ports):\n receive_fn(port)\n else:\n print(\" • ERROR > Some ports are not open!\")\n\n #Close ports\n for port in ports:\n port.close()\n\n #Print status of ports\n for port in ports:\n port.status()\n \n #Reset Counters\n for port in ports:\n port.EventsList.append(port.EventCounter)\n port.EventCounter = 0\n\n return ports","repo_name":"yatharthb97/KGLabMiscCode","sub_path":"Serial Receiver/acquisition_fns.py","file_name":"acquisition_fns.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8081162333","text":"from survey import AnonymousSurvey\n\n# Defining question and making a survey.\nquestion = \"What is your native language?\"\nmy_survey = AnonymousSurvey(question)\n\n# Displaying question and storing ansvers.\nmy_survey.show_question()\nprint(\"Insert 'q' to quit the program.\\n\")\n\nwhile True:\n response = input(\"language: \")\n if response == 'q':\n break\n my_survey.store_response(response)\n\n# Displaying survey results.\nprint(\"\\nWe thank each respondent for participating in the survey.\")\nmy_survey.show_results()\n","repo_name":"mendyk-ja/code_testing","sub_path":"language_survey.py","file_name":"language_survey.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23276469303","text":"from tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\nfrom tensorflow.keras import backend as K\n\nimport datetime\nfrom pathlib import Path\n\nnow = datetime.datetime.now().isoformat()\nmodel_path = Path(f'/model/pneumonia_model_{now}.h5')\n# If this fails we lack write permission on /model\nmodel_path.touch()\n\n# dimensions of our images.\nimg_width, img_height = 150, 150\n\ntrain_data_dir = './chest_xray/train'\nvalidation_data_dir = './chest_xray/val'\ntest_data_dir = './chest_xray/test'\n\nnb_train_samples = 5216\nnb_validation_samples = 16\nepochs = 100\nbatch_size = 16\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True\n)\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary'\n)\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary'\n)\n\ntest_generator = test_datagen.flow_from_directory(\n test_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary'\n)\n\nmodel.fit(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size\n)\n\nscores = model.evaluate(test_generator)\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\nmodel.save(model_path)\n","repo_name":"jharmison-redhat/xray-pneumonia-risk-assessment-v2","sub_path":"training/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10774729406","text":"from setuptools import setup, find_packages # Always prefer setuptools over distutils\nfrom codecs import open # To use a consistent encoding\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the relevant file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n\n name='audiomodels',\n\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n # version='0.1.dev0',\n # according to https://semver.org/\n version='0.2.dev3',\n\n description=' audio models package with semantic features identification',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n # The project's main homepage.\n url='http://github.com/Uiuran/stattus4-audio-models',\n\n # Author details\n author='Daniel Penalva',\n author_email='dkajah@gmail.com',\n\n # Choose your license\n license='MIT',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 2 - Pre-Alpha\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Telecommunications Industry',\n 'Intended Audience :: Education',\n 'Intended Audience :: Religion',\n 'Intended Audience :: Other Audience',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Multimedia :: Sound/Audio :: Sound Synthesis',\n 'Topic :: Multimedia :: Sound/Audio :: Editors',\n 'Topic :: Multimedia :: Sound/Audio :: Mixers',\n 'Topic :: Multimedia :: Sound/Audio :: Speech',\n 'Topic :: Artistic Software',\n 'Topic :: Other/Nonlisted Topic',\n 'Topic :: Text Processing',\n 'Topic :: Text Processing :: Linguistic',\n 'Topic :: Multimedia :: Sound/Audio :: Sound Synthesis',\n # Pick your license as you wish (should match \"license\" above)\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2',\n ],\n\n # What does your project relate to?\n keywords=['physics', 'music','audio','neuralnetworks'],\n\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n # packages=[\"music\"],\n packages=find_packages(),\n #packages=find_packages(exclude=['contrib', 'docs', 'tests*']),\n\n # List run-time dependencies here. These will be installed by pip when your\n # project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n # TODO: test with virtualenv to know the dependencies\n\n install_requires=['numpy', 'scipy',\n 'matplotlib','pandas','librosa','tensorflow','pillow'],\n\n # List additional groups of dependencies here (e.g. development dependencies).\n # You can install these using the following syntax, for example:\n # $ pip install -e .[dev,test]\n #extras_require = {\n # 'dev': ['check-manifest'],\n # 'test': ['coverage'],\n #},\n\n # If there are data files included in your packages that need to be\n # installed, specify them here. If using Python 2.6 or less, then these\n # have to be included in MANIFEST.in as well.\n #package_data={\n # 'sample': ['package_data.dat'],\n #},\n\n # Although 'package_data' is the preferred approach, in some case you may\n # need to place data files outside of your packages.\n # see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files\n # In this case, 'data_file' will be installed into '/my_data'\n #data_files=[('my_data', ['data/data_file'])],\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n #entry_points={\n # 'console_scripts': [\n # 'sample=sample:main',\n # ],\n #},\n project_urls={\n 'Documentation': 'https://packaging.python.org/tutorials/distributing-packages/',\n #'Funding': 'https://donate.pypi.org',\n #'Say Thanks!': 'http://saythanks.io/to/example',\n 'Source': 'https://github.com/Uiuran/stattus4-audio-models',\n 'Tracker': 'https://github.com/pypa/sampleproject/issues',}\n)\n","repo_name":"Uiuran/stattus4-audio-models","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19068147293","text":"\"\"\"DB_Project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom bookstore import views as bs_views\n\nurlpatterns = [\n url(r'^$',bs_views.login,name = 'login'),\n url(r'^login/$',bs_views.login,name = 'login'),\n url(r'^register/$',bs_views.register,name = 'register'),\n url(r'^user_info/$',bs_views.user_info_float,name = 'order'),\n url(r'^order/$',bs_views.order,name = 'order'),\n url(r'^my_order_history/$',bs_views.order_history,name = 'order_history'),\n url(r'^comment/$', bs_views.comment, name='book_comments'),\n url(r'^my_comment_history/$', bs_views.my_comment, name='my_comments'),\n url(r'^view_all_orders/$', bs_views.all_order_history, name='view_all_orders'),\n url(r'^create_book/$', bs_views.create_books, name='create_book'),\n url(r'^add_book/$', bs_views.add_book, name='add_book'),\n url(r'^overview/$', bs_views.admin_panel, name='admin_panel'),\n]\n","repo_name":"Joe627487136/DB_Project","sub_path":"DB_Project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21633072985","text":"import os\n\n\n# Falta bastante acabamento e melhora do código, porem funciona\n\n\ndef funcao_cls(*args):\n os.system(\"cls\")\n\ndef check(player1, player2):\n \n if player1 and player2 == \"pedra\" or \"tesoura\" or \"papel\":\n if player1 == \"pedra\":\n print(f'{Player1} jogou pedra')\n if player2 == \"tesoura\":\n print(f'{player1} ganhou!')\n else:\n print(f'{Player2} ganhou! ')\n elif player1 == 'tesoura':\n print(f'{Player1} jogou tesoura')\n if player2 == \"pedra\":\n print(f'{Player2} jogou pedra')\n print(f\"{Player2} venceu\")\n else:\n print(f'{Player2} jogou papel')\n print(f\"{Player1} venceu\")\n else:\n if player2 == \"tesoura\":\n print(f'{Player2} venceu')\n else:\n print(f'{Player1} venceu ')\n else:\n print(\"Valores errados!\")\n\n\nfuncao_cls()\n\nPlayer1 = input(\"Qual o nome do Player 1?\")\nPlayer2 = input(\"Qual o nome do Player 2?\")\n\nfuncao_cls()\n\nprint(f\"{Player1} contra {Player2}\")\nprint()\n\n\nprint(\"Escolham entre: Pedra, Papel, Tesoura \")\nprint()\ninput_player1 = input(f'{Player1} começa: \\n').lower()\nfuncao_cls()\ninput_player2 = input(f'{Player2} sua vez: \\n').lower()\nfuncao_cls()\n\ncheck(input_player1, input_player2)\n\n","repo_name":"joaocosta956/Estudos","sub_path":"pedra_papel_tesoura.py","file_name":"pedra_papel_tesoura.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16179361607","text":"from django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.http import JsonResponse\nfrom .models import Pet, PetUser, Cart, Payment, OrderPlaced, WishList, PetSold\nfrom .forms import (\n UserRegistrationForm,\n UserProfileForm,\n UserPasswordResetForm,\n SellPetForm,\n)\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.forms.models import model_to_dict\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\n\n# Create your views here.\ndef homepage_view(request):\n \"\"\"\n Returns the homepage of the website.\n \"\"\"\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n return render(request, \"core/home.html\", locals())\n\n\ndef aboutpage_view(request):\n \"\"\"\n Returns the about page of the website.\n \"\"\"\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n return render(request, \"core/about.html\", locals())\n\n\ndef contactpage_view(request):\n \"\"\"\n Returns the about page of the website.\n \"\"\"\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n return render(request, \"core/contact.html\", locals())\n\n\nclass CategoryView(View):\n \"\"\"\n Returns the requested categories\n \"\"\"\n\n def get(self, request, value):\n pets = Pet.objects.filter(category=value)\n names = Pet.objects.filter(category=value).values(\"name\")\n breeds = pets.values_list(\"breed\", flat=True).distinct()\n category = value\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/category.html\", locals())\n\n\nclass BreedView(View):\n def get(self, request, category, value):\n pets = Pet.objects.filter(breed=value, category=category)\n breeds = (\n Pet.objects.filter(category=category)\n .values_list(\"breed\", flat=True)\n .distinct()\n )\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/breed.html\", locals())\n\n\nclass CategoryName(View):\n def get(self, request):\n pets = Pet.objects.all()\n categories = pets.values_list(\"category\", flat=True).distinct()\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/pets.html\", locals())\n\n\nclass PetDetails(View):\n def get(self, request, num):\n pet = Pet.objects.get(id=num)\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n wishlist = WishList.objects.filter(Q(pet=pet) & Q(user=request.user))\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/details.html\", locals())\n\n\nclass UserRegistrationView(View):\n def get(self, request):\n form = UserRegistrationForm()\n return render(request, \"core/userregistration.html\", locals())\n\n def post(self, request):\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n # messages.success(request, \"Congratulations! Welcome aboard.\")\n else:\n messages.warning(request, \"Invalid input data\")\n return render(request, \"core/userregistration.html\", locals())\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ProfileView(View):\n def get(self, request):\n form = UserProfileForm()\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n return render(request, \"core/profile.html\", locals())\n\n def post(self, request):\n form = UserProfileForm(request.POST)\n if form.is_valid():\n user = request.user\n name = form.cleaned_data[\"name\"]\n locality = form.cleaned_data[\"locality\"]\n city = form.cleaned_data[\"city\"]\n mobile = form.cleaned_data[\"mobile\"]\n zipcode = form.cleaned_data[\"zipcode\"]\n state = form.cleaned_data[\"state\"]\n\n newUser = PetUser(\n user=user,\n name=name,\n locality=locality,\n city=city,\n mobile=mobile,\n zipcode=zipcode,\n state=state,\n )\n newUser.save()\n # messages.success(request, \"Congratulations! Profile updated.\")\n else:\n messages.warning(request, \"Invalid input data\")\n\n return redirect(\"address\")\n\n\n@login_required\ndef address_view(request):\n address = PetUser.objects.filter(user=request.user)\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n return render(request, \"core/address.html\", locals())\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UpdateAddress(View):\n def get(self, request, pk):\n add = PetUser.objects.get(pk=pk)\n form = UserProfileForm(instance=add)\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n return render(request, \"core/updateaddress.html\", locals())\n\n def post(self, request, pk):\n form = UserProfileForm(request.POST)\n if form.is_valid():\n add = PetUser.objects.get(pk=pk)\n add.user = request.user\n add.name = form.cleaned_data[\"name\"]\n add.locality = form.cleaned_data[\"locality\"]\n add.city = form.cleaned_data[\"city\"]\n add.mobile = form.cleaned_data[\"mobile\"]\n add.zipcode = form.cleaned_data[\"zipcode\"]\n add.state = form.cleaned_data[\"state\"]\n add.save()\n # messages.success(request, \"Congratulations! Profile updated.\")\n else:\n messages.warning(request, \"Invalid input data\")\n\n return redirect(\"address\")\n\n\n@login_required\ndef deleteaddress_view(request):\n add_id = request.GET[\"add_id\"]\n add = PetUser.objects.get(Q(id=add_id) & Q(user=request.user))\n add.delete()\n data = {\n \"message\": \"Address deleted!\",\n }\n return JsonResponse(data)\n\n\n@login_required\ndef addtocart_view(request):\n user = request.user\n pet_id = request.GET.get(\"pet_id\").replace(\"/\", \"\")\n pet = Pet.objects.get(id=pet_id)\n cart = Cart.objects.filter(user=user, pet=pet)\n if not cart:\n Cart(user=user, pet=pet).save()\n return redirect(\"/cart\")\n\n\n@login_required\ndef showcart_view(request):\n user = request.user\n cart = Cart.objects.filter(user=user)\n amount = 0\n for pt in cart:\n amount = amount + pt.pet.price\n totalamount = amount + 2500\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/addtocart.html\", locals())\n\n\n@login_required\ndef wishlist(request):\n user = request.user\n pets = WishList.objects.filter(user=user)\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/wishlist.html\", locals())\n\n\n@login_required\ndef removecart_view(request):\n if request.method == \"GET\":\n pet_id = request.GET[\"pet_id\"]\n cd = Cart.objects.get(Q(pet=pet_id) & Q(user=request.user))\n cd.delete()\n user = request.user\n cart = Cart.objects.filter(user=user)\n amount = 0\n for p in cart:\n amount = amount + p.pet.price\n totalamount = amount + 2500\n data = {\"amount\": amount, \"totalamount\": totalamount}\n return JsonResponse(data)\n\n\n@login_required\ndef orders(request):\n order_placed = OrderPlaced.objects.filter(user=request.user)\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/orders.html\", locals())\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass checkout_view(View):\n def get(self, request):\n user = request.user\n address = PetUser.objects.filter(user=user)\n uid = address\n cart_items = Cart.objects.filter(user=user)\n amount = 0\n for p in cart_items:\n amount = amount + p.pet.price\n totalamount = amount + 2500\n\n email = user.email\n paystack_pub_key = settings.PAYSTACK_PUBLIC_KEY\n\n new_pay = Payment.objects.create(amount=totalamount, email=email, user=user)\n new_pay.save()\n\n payment = model_to_dict(new_pay)\n amount_value = new_pay.amount_value()\n\n wishitem = 0\n if request.user.is_authenticated:\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/checkout.html\", locals())\n\n\n@login_required\ndef verify_payment(request, ref, uid):\n payment = Payment.objects.get(ref=ref)\n verified = payment.verify_payment()\n\n if verified:\n payment.verified = True\n\n petuser = PetUser.objects.get(id=uid)\n cart = Cart.objects.filter(user=request.user)\n for c in cart:\n OrderPlaced(\n user=request.user,\n petuser=petuser,\n pet=c.pet,\n payment=payment,\n ).save()\n c.delete()\n\n return redirect(\"orders\")\n\n\ndef plus_wishlist(request):\n if request.method == \"GET\":\n pet_id = request.GET[\"pet_id\"]\n pet = Pet.objects.get(id=pet_id)\n user = request.user\n WishList(user=user, pet=pet).save()\n data = {\n \"message\": \"Added to wishlist!\",\n }\n return JsonResponse(data)\n\n\ndef minus_wishlist(request):\n if request.method == \"GET\":\n pet_id = request.GET[\"pet_id\"]\n pet = Pet.objects.get(id=pet_id)\n user = request.user\n WishList.objects.filter(user=user, pet=pet).delete()\n data = {\n \"message\": \"Removed from wishlist!\",\n }\n return JsonResponse(data)\n\n\ndef search(request):\n query = request.GET[\"search\"]\n pets = Pet.objects.filter(\n Q(breed__icontains=query)\n | Q(category__icontains=query)\n | Q(name__icontains=query)\n )\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/search.html\", locals())\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass sell(View):\n def get(self, request):\n form = SellPetForm()\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n return render(request, \"core/sellpet.html\", locals())\n\n def post(self, request):\n form = SellPetForm(request.POST, request.FILES)\n if form.is_valid():\n pet = form.save(commit=False)\n pet.user = request.user\n pet.save()\n PetSold.objects.create(user=request.user, pet=pet)\n # messages.success(request, \"Congratulations! Pet added successfully.\")\n return redirect(\"sales\")\n else:\n messages.warning(request, \"Invalid input data\")\n\n return render(request, \"core/sellpet.html\", locals())\n\n\n@login_required\ndef petsonsale(request):\n listed_pets = PetSold.objects.filter(user=request.user)\n\n totalitem = 0\n wishitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n wishitem = len(WishList.objects.filter(user=request.user))\n\n return render(request, \"core/sales.html\", locals())\n","repo_name":"thegirlSynth/Pet_Frenzy","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35235781722","text":"import torch\n\n\ndef torch_unique(x, dim=None):\n \"\"\"Unique elements of x and indices of those unique elements\n https://github.com/pytorch/pytorch/issues/36748#issuecomment-619514810\n e.g.\n unique(tensor([\n [1, 2, 3],\n [1, 2, 4],\n [1, 2, 3],\n [1, 2, 5]\n ]), dim=0)\n => (tensor([[1, 2, 3],\n [1, 2, 4],\n [1, 2, 5]]),\n tensor([0, 1, 3]))\n \"\"\"\n unique, inverse = torch.unique(\n x, sorted=True, return_inverse=True, dim=dim)\n perm = torch.arange(inverse.size(0), dtype=inverse.dtype,\n device=inverse.device)\n inverse, perm = inverse.flip([0]), perm.flip([0])\n return unique, inverse.new_empty(unique.size(0)).scatter_(0, inverse, perm)","repo_name":"IrinaArmstrong/GazeVerification","sub_path":"gaze_verification/metrics/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74282346433","text":"\"\"\"\n\tSearch Engine\n\tby Vincent Jeanselme\n\tand Matthieu Clin\n\tvincent.jeanselme@gmail.com\n\"\"\"\n\nimport sys\nimport os\nimport unittest\n# Needs to be execute from the project source\nsys.path.append(os.getcwd())\nimport IndexationEngine as iE\n\n\nclass TestIndexationEngine(unittest.TestCase):\n\n\tdef test_force_argument(self):\n\t\tparser = iE.create_parser()\n\t\targ = parser.parse_args(['--force', './IndexationEngine.py'])\n\t\tself.assertEqual(arg.force_indexation, True)\n\n\t\targ = parser.parse_args(['./IndexationEngine.py'])\n\t\tself.assertEqual(arg.force_indexation, False)\n\n\tdef test_documents_argument(self):\n\t\tparser = iE.create_parser()\n\t\tdocs = ['./IndexationEngine.py']\n\t\targ = parser.parse_args(docs)\n\t\tself.assertEqual(arg.documents, docs)\n\n\t\tdocs2 = list(docs)\n\t\tdocs2.append('dummy')\n\n\t\targ = parser.parse_args(docs2)\n\t\tself.assertEqual(arg.documents, docs)\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"Jeanselme/Search_Engine","sub_path":"Tests/Unit/TestIndexationEngine.py","file_name":"TestIndexationEngine.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26084014233","text":"\"\"\"empty message\n\nRevision ID: 3565b16d313e\nRevises: 559565b45d06\nCreate Date: 2016-01-09 00:12:41.375256\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3565b16d313e'\ndown_revision = '559565b45d06'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('comment',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('comment', sa.Text(), nullable=True),\n sa.Column('title', sa.String(length=512), nullable=True),\n sa.Column('author', sa.Integer(), nullable=True),\n sa.Column('paragraph', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['author'], ['user.id'], ),\n sa.ForeignKeyConstraint(['paragraph'], ['paragraph.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('comment')\n ### end Alembic commands ###\n","repo_name":"warrensavich/FlaskCMS","sub_path":"migrations/versions/3565b16d313e_.py","file_name":"3565b16d313e_.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71766802435","text":"#!/usr/bin/python\n# coding=utf-8\n#\n# Simple demo of reading each analog input from the ADS1x15 and printing it to\n# the screen.\n# Author: Tony DiCola (Edited by Kyle Gabriel)\n# License: Public Domain\n\nimport os\nimport sys\nimport time\nimport Adafruit_ADS1x15\nimport RPi.GPIO as GPIO\n\nif not os.geteuid() == 0:\n print(\"Error: Script must be executed as root.\\n\")\n sys.exit(1)\n\n# Setup I2C bus\ntry:\n if GPIO.RPI_REVISION == 2 or GPIO.RPI_REVISION == 3:\n I2C_bus_number = 1\n else:\n I2C_bus_number = 0\nexcept Exception as except_msg:\n print(\"Could not identify I2C bus: {}\".format(\n except_msg))\n\n# Create an ADS1115 ADC (16-bit) instance.\n# adc = Adafruit_ADS1x15.ADS1115()\n\n# Or create an ADS1015 ADC (12-bit) instance.\n# adc = Adafruit_ADS1x15.ADS1015()\n\n# Note you can change the I2C address from its default (0x48), and/or the I2C\n# bus by passing in these optional parameters:\nadc = Adafruit_ADS1x15.ADS1115(address=0x48, busnum=I2C_bus_number)\n\n# Choose a gain of 1 for reading voltages from 0 to 4.09V.\n# Or pick a different gain to change the range of voltages that are read:\n# - 2/3 = +/-6.144V\n# - 1 = +/-4.096V\n# - 2 = +/-2.048V\n# - 4 = +/-1.024V\n# - 8 = +/-0.512V\n# - 16 = +/-0.256V\n# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.\nGAIN = 1\n\nprint('Reading ADS1x15 values, press Ctrl-C to quit...')\n# Print nice channel column headers.\nprint('| {0:>6} | {1:>6} | {2:>6} | {3:>6} |'.format(*range(4)))\nprint('-' * 37)\n# Main loop.\nwhile True:\n # Read all the ADC channel values in a list.\n values = [0] * 4\n for i in range(4):\n # Read the specified ADC channel using the previously set gain value.\n values[i] = adc.read_adc(i, gain=GAIN)\n # Note you can also pass in an optional data_rate parameter that controls\n # the ADC conversion time (in samples/second). Each chip has a different\n # set of allowed data rate values, see datasheet Table 9 config register\n # DR bit values.\n # values[i] = adc.read_adc(i, gain=GAIN, data_rate=128)\n # Each value will be a 12 or 16 bit signed integer value depending on the\n # ADC (ADS1015 = 12-bit, ADS1115 = 16-bit).\n # Print the ADC values.\n print('| {0:>6} | {1:>6} | {2:>6} | {3:>6} |'.format(*values))\n # Pause for half a second.\n time.sleep(0.5)\n","repo_name":"kizniche/Mycodo","sub_path":"mycodo/tests/manual_tests/test_i2c_ADS1x15.py","file_name":"test_i2c_ADS1x15.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"61"}
+{"seq_id":"16140582626","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sklearn.metrics\nimport pickle\nimport pdb\nimport os\n\n\ndef accuracy(output, target, is_train=True, state=[0, 0], save=None):\n \"\"\"\n train:\n output: [num_bag, r]\n target: [num_bag]\n test:\n output: [num_bag, r, r]\n target: [num_bag, r]\n \"\"\"\n with torch.no_grad():\n if state is None:\n state = [0, 0]\n if is_train:\n pred = torch.argmax(output, dim=1)\n assert pred.shape[0] == len(target)\n correct = torch.sum(pred == target).item()\n state[0] += correct\n state[1] += len(target)\n else:\n pred = torch.argmax(output, dim=2)\n pred = torch.eq(pred, torch.arange(target.shape[1]).unsqueeze(0))\n correct = torch.sum(pred.int() * target.int()).item()\n state[0] += correct\n state[1] += torch.sum(target).item()\n return state[0] / state[1] if state[1] != 0 else 0, state\n\n\ndef non_na_accuracy(output, target, is_train=True, state=[0, 0], save=None):\n with torch.no_grad():\n if state is None:\n state = [0, 0]\n if is_train:\n non_na_index = ~target.eq(0)\n output = torch.masked_select(output, non_na_index.unsqueeze(1)).reshape(\n -1, output.shape[1]\n )\n target = torch.masked_select(target, non_na_index)\n if len(target) != 0:\n pred = torch.argmax(output, dim=1)\n assert pred.shape[0] == len(target)\n state[0] += torch.sum(pred == target).item()\n state[1] += len(target)\n else:\n pred = torch.argmax(output, dim=2)\n pred = torch.eq(pred, torch.arange(target.shape[1]).unsqueeze(0))\n state[0] += torch.sum(pred.double()[:, 1:] * target[:, 1:]).item()\n state[1] += torch.sum(target[:, 1:]).item()\n return state[0] / state[1] if state[1] != 0 else 0, state\n\n\ndef auc(output, target, is_train=False, state=None, save=None):\n with torch.no_grad():\n output = torch.diagonal(F.softmax(output, 2), offset=0, dim1=1, dim2=2)\n result = {\n \"output\": output[:, 1:].cpu().numpy(),\n \"target\": target[:, 1:].cpu().numpy(),\n }\n output = torch.flatten(output[:, 1:])\n target = torch.flatten(target[:, 1:])\n precision, recall, _ = sklearn.metrics.precision_recall_curve(\n target.cpu(), output.cpu()\n )\n auc = sklearn.metrics.auc(x=recall, y=precision)\n if save is not None:\n with open(os.path.join(save, \"test_results.pickle\"), \"wb\") as f:\n result.update({\"recall\": recall, \"precision\": precision})\n pickle.dump(result, f)\n p_at_r = [0, 0, 0]\n for p, r in zip(precision[::-1], recall[::-1]):\n if r >= 0.1 and p_at_r[0] == 0:\n p_at_r[0] = p\n if r >= 0.2 and p_at_r[1] == 0:\n p_at_r[1] = p\n if r >= 0.3 and p_at_r[2] == 0:\n p_at_r[2] = p\n print(\n \"P@0.1: %.4f\\nP@0.2: %.4f\\nP@0.3:%.4f\"%(p_at_r[0], p_at_r[1], p_at_r[2])\n )\n\n return auc, None\n\n","repo_name":"sunlab-osu/REDS2","sub_path":"model/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"}
+{"seq_id":"37282261098","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"app_django_first\"\n\nurlpatterns = [\n path('index/', views.index, name='index'),\n path('', views.index, name='index'),\n path('index/create/', views.create, name='create'),\n path('index/sp/', views.sp, name='sp'),\n path('index/calc/', views.calc, name='calc'),\n path('template/', views.template, name='template'),\n path('index/form', views.form, name='form'),\n path('index/form2', views.form_name, name='form2'),\n path('index/get', views.form_name, name='form3'),\n]\n","repo_name":"silviocapelo/djangoFirst","sub_path":"app_django_first/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71836020034","text":"# def reverse(str, res = \"\"):\n# length = len(str)\n# if length == 1: return res + str\n# else: return reverse(str[:length - 1], res + str[length - 1])\n#\n# res = reverse(\"hello world!\")\n# print(res)\n\nreverse = lambda str: str[-1] + reverse(str[:-1]) if len(str) > 1 else str\n#print(reverse(2))\n\nstr = \"hello world\"\n#print(str[:-1])\nprint(reverse(str))","repo_name":"codeAligned/codingChallenges","sub_path":"Misc/recursive_reverse_string.py","file_name":"recursive_reverse_string.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39585342251","text":"from rejson.path import Path\nimport pytz\nfrom utils.client import get_redis_client\nfrom datetime import datetime\n\nfrom utils.mail import send_email\nfrom celery import Celery\nimport requests\nimport os\n\nAPP_NAME = 'tasks'\nREDIS_PASSWORD = os.environ['REDIS_PASSWORD']\nREDIS_HOST = os.environ['REDIS_HOST']\nREDIS_PORT = os.environ['REDIS_PORT']\nBROKER_NAME = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}'\n\napp = Celery(APP_NAME, broker=BROKER_NAME)\n\n@app.task\ndef send_verification_mail(email: str, code: str):\n email = email\n code = code\n subject = \"Verify your email!\"\n body = f\"\"\"\n Please use the below code to verify yourself and start receiving alerts.\n CODE: {code}\n \"\"\"\n\n send_email([email], subject, body)\n\n@app.task\ndef fetch_slots(district_id: int):\n url = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByDistrict\"\n IST = pytz.timezone('Asia/Kolkata')\n date = datetime.now(IST).strftime(\"%d-%m-%Y\")\n params = {\"district_id\": district_id, \"date\": date}\n headers = {\"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36\"}\n resp = requests.get(url, params=params, headers=headers)\n centers = resp.json()\n\n client = get_redis_client()\n # Get all emails for this district.\n emails = client.jsonget(\"districts\", Path(f'[\"{district_id}\"]'))\n paths = [Path(f'[\"{email}\"]') for email in emails]\n # Get all users with the emails.\n users = client.jsonget(\"users\", *paths)\n\n for center in centers['centers']:\n for session in center['sessions']:\n # Consider sessions which have a non zero capacity.\n if session['available_capacity'] > 0:\n tb_notified_users = []\n session_id = session['session_id']\n # If emails/paths has only one element, user is a dictionary representing the user\n # object.\n if len(emails) == 1:\n if session_id not in users['session_ids']:\n if users['verified']:\n email = users['email']\n tb_notified_users.append(email)\n # Record the session into the user to avoid notifiying again.\n client.jsonarrappend(\"users\", Path(f'[\"{email}\"][\"session_ids\"]'), session_id)\n # Else users is a dictionary, with the key as the email and value as the object\n else:\n for user in users.values():\n if user['verified']:\n if session_id not in user['session_ids']:\n email = user['email']\n tb_notified_users.append(email)\n client.jsonarrappend(\"users\", Path(f'[\"{email}\"][\"session_ids\"]'), session_id)\n subject = \"COWIN Vaccine Slots alert\"\n body = f'''\n Slots have opened up for your district, {center['district_name']}. Please check the given details below:\n Center Name: {center['name']}\n Date: {session['date']}\n From: {center['from']}\n To: {center['to']}\n\n Please book your vaccination slot now. Stay safe!\n '''\n send_email(tb_notified_users, subject, body)\n","repo_name":"aryan9600/redis-hack-cowin-emailer","sub_path":"backend/worker/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"71527551554","text":"# Ultrasonic Multi-threaded example\n# Reads sensor with some failsafes against faulty readings.\n# \n#\n# Author: Net-time, 2019\n#\n\"\"\"\n\n \n Ultrasonic \n Reads sensor with some failsafes against faulty readings. \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\"\"\"\nimport Domoticz\nimport sys,os\nimport threading\nimport RPi.GPIO as GPIO\nimport time\nfrom collections import deque\n\nsensorHistory = deque([0.0]*3)\nticks = 0\n#Variables for GPIO Pins\nGPIO_TRIGGER = 0\nGPIO_ECHO = 0\nSENSOR_MIN = 0\nSENSOR_MAX = 0\nSENSOR_SPAN = 0\nimage = 0\n\nclass BasePlugin:\n terminate = False\n\n def handleSensor(self):\n global sensorHistory,GPIO_TRIGGER, GPIO_ECHO\n try:\n Domoticz.Log(\"Entering sensor handler\")\n while True:\n GPIO.output(GPIO_TRIGGER, True)\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False) \n StartTime = time.time()\n StopTime = time.time() \n # save StartTime\n while GPIO.input(GPIO_ECHO) == 0:\n StartTime = time.time() \n # save time of arrival\n while GPIO.input(GPIO_ECHO) == 1:\n StopTime = time.time() \n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n distance = round(((TimeElapsed * 34300) / 4),1)\n # store last 3 values for comparison against faulty readings-\n sensorHistory.rotate() ; sensorHistory[0]= distance\n #Domoticz.Log(str(sensorHistory))\n for x in range(10): # Delay to spare the sensor and cpu load.\n #Domoticz.Log(str(x))\n time.sleep(1)\n #Message = self.messageQueue.get()\n if self.terminate == True:\n GPIO.cleanup()\n Domoticz.Log(\"Exiting sensor handler\")\n break\n except Exception as err:\n Domoticz.Log(\"handleSensor: \"+str(err))\n \n def onStart(self):\n global GPIO_TRIGGER, GPIO_ECHO, SENSOR_MIN, SENSOR_MAX, SENSOR_SPAN, image\n GPIO_TRIGGER = int(Parameters[\"Mode1\"])\n GPIO_ECHO = int(Parameters[\"Mode2\"])\n SENSOR_MIN = int(Parameters[\"Mode3\"])\n SENSOR_MAX = int(Parameters[\"Mode4\"])\n SENSOR_SPAN = int(Parameters[\"Mode5\"])\n VerBose(\"GPIO_TRIGGER/ECHO: \"+ str(GPIO_TRIGGER)+\"/\"+str(GPIO_ECHO))\n try:\n if \"UltrasonicSG\" not in Images:\n Domoticz.Image(\"UltrasonicSG.zip\").Create()\n image = Images[\"UltrasonicSG\"].ID\n Domoticz.Log(\"Image created. ID: \"+str(image))\n else:\n image = Images[\"UltrasonicSG\"].ID\n except:\n image = 0\n if len(Devices)==0:\n Domoticz.Device(\"UltraSonic\", Unit=1, Type= 243, Subtype=31, Image=image, Options={\"Custom\": \"1;cm\"}).Create()\n Domoticz.Log(\"Created device: \")\n #GPIO Mode (BOARD / BCM)\n GPIO.setmode(GPIO.BCM)\n #set GPIO direction (IN / OUT)\n GPIO.setup(GPIO_TRIGGER, GPIO.OUT)\n GPIO.setup(int(GPIO_ECHO), GPIO.IN)\n self.messageThread = threading.Thread(name=\"QueueThread\", target=BasePlugin.handleSensor, args=(self,))\n self.messageThread.start()\n \n\n\n def onHeartbeat(self):\n global ticks, sensorHistory, SENSOR_MIN, SENSOR_MAX, SENSOR_SPAN, image\n if ticks>6:\n low = min(sensorHistory)\n high = max(sensorHistory)\n if low != 0.0: # After restart, wait for 3 measurements before before displaying data\n if low>SENSOR_MIN and high 1):\n for thread in threading.enumerate():\n if (thread.name != threading.current_thread().name):\n Domoticz.Log(\"'\"+thread.name+\"' is still running, waiting otherwise Domoticz will abort on plugin exit.\")\n time.sleep(1.0)\n\nglobal _plugin\n_plugin = BasePlugin()\n\ndef onStart():\n global _plugin\n _plugin.onStart()\n\ndef onStop():\n global _plugin\n _plugin.onStop()\n\ndef onHeartbeat():\n global _plugin\n _plugin.onHeartbeat()\n\n# Generic helper functions\n\ndef VerBose(text):\n if Parameters[\"Mode6\"] != \"Normal\":\n Domoticz.Log(text)\n return","repo_name":"Net-time/Domoticz_rpi_ultrasonic","sub_path":"Ultrasonic/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73221376195","text":"#-*-coding:utf8;-*-\n#qpy:3\n#qpy:console\n\nfrom __future__ import print_function\nimport threading\nimport time\n\nvalue = 1\n\ndef work(l):\n global value\n for i in range(5):\n l.acquire() # schaltet den Lock ein\n value += 1\n print(threading.currentThread().getName(), value)\n l.release() # schaltet den Lock frei\n time.sleep(3)\n\nl = threading.Lock()\n\nt1 = threading.Thread(target=work, args=(l,))\nt2 = threading.Thread(target=work, args=(l,))\n\nt1.start()\nt2.start()\n\nt1.join()\nt2.join()","repo_name":"jedamus/qpython-scripts","sub_path":"lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"33410054446","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status, generics, viewsets\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework import mixins\nfrom rest_framework_jwt.views import RefreshJSONWebToken\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom .models import (Skill, Team, TeamMembership, TeamMentorship,\n Mentor, Season, Competitor,\n BlackListToken, SeasonCompetitorInfo)\nfrom .serializers import (SkillSerializer, TeamSerializer, Invitation,\n InvitationSerializer, MentorSerializer,\n SeasonSerializer, PublicTeamSerializer,\n OnBoardingCompetitorSerializer,\n SeasonCompetitorInfoSerializer,\n TeamMembershipSerializer,\n TeamMentorshipSerializer,\n CompetitorListSerializer,\n CustomTeamSerializer,\n MentorForTeamSerializer)\n\nfrom .permissions import (CanAttachMoreMentorsToTeam,\n CanInviteMoreMembersInTeam,\n IsHackFMIUser, IsMemberOfTeam,\n TeamLeaderCantCreateOtherTeam,\n CantChangeOtherCompetitorsData,\n IsTeamMembershipInActiveSeason,\n IsInvitedMemberAlreadyInYourTeam,\n IsInvitedMemberAlreadyInOtherTeam,\n CanNotAcceptInvitationIfTeamLeader,\n IsInvitedUserInTeam, IsSeasonActive,\n CanNotAccessWronglyDedicatedIvitation,\n IsTeamLeader, IsSeasonDeadlineUpToDate,\n IsCompetitorMemberOfTeamForActiveSeason,\n CantCreateTeamWithTeamNameThatAlreadyExists,\n IsInvitedMemberCompetitor, IsTeamLeaderOrReadOnly,\n IsMentorDatePickUpToDate, IsTeamleaderOrCantCreateIvitation,\n MentorIsAlreadySelectedByThisTeamLeader,\n )\n\nfrom .helper import send_team_delete_email, send_invitation, get_object_variable_or_none\nfrom .mixins import MeSerializerMixin, JwtApiAuthenticationMixin\n\nfrom loki.base_app.helper import try_open\n\nimport json\n\n\nclass MeAPIView(JwtApiAuthenticationMixin,\n MeSerializerMixin,\n generics.GenericAPIView):\n\n def get(self, request, *args, **kwargs):\n data = super().get(request, *args, **kwargs)\n teams_data = None\n data[\"teams\"] = teams_data\n\n if not data['is_competitor']:\n return Response(data=data, status=status.HTTP_200_OK)\n\n competitor = self.request.user.get_competitor()\n teams = TeamMembership.objects.list_all_teams_for_competitor(competitor=competitor)\n\n if teams:\n teams_data = CustomTeamSerializer(teams, many=True).data\n\n data[\"teams\"] = teams_data\n\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass MeSeasonAPIView(JwtApiAuthenticationMixin,\n MeSerializerMixin,\n generics.GenericAPIView):\n\n def get(self, request, *args, **kwargs):\n data = super().get(request, *args, **kwargs)\n\n team_data = None\n data[\"team\"] = None\n data[\"mentors\"] = None\n\n if not data['is_competitor']:\n return Response(data=data, status=status.HTTP_200_OK)\n\n season_id = self.kwargs.get('season_pk')\n season = get_object_or_404(Season, pk=season_id)\n\n competitor = self.request.user.get_competitor()\n team = Team.objects.get_all_teams_for_current_season(season=season).\\\n get_all_teams_for_competitor(competitor=competitor).first()\n\n tm_id = None\n\n if team:\n team_data = CustomTeamSerializer(team).data\n tm_id = get_object_variable_or_none(\n queryset=TeamMembership.objects.filter(team=team, competitor=competitor),\n variable=\"id\")\n\n mentors = [tm.mentor.id for tm in TeamMentorship.objects.filter(team=team).all()]\n data[\"mentors\"] = mentors\n\n data[\"team\"] = team_data\n data[\"team_membership_id\"] = tm_id\n\n try:\n season_competitor_info = SeasonCompetitorInfo.objects.get(competitor=competitor)\n looking_for_team = season_competitor_info.looking_for_team\n season_competitor_info_id = season_competitor_info.id\n except ObjectDoesNotExist:\n looking_for_team = False\n season_competitor_info_id = None\n\n data[\"looking_for_team\"] = looking_for_team\n data[\"season_competitor_info_id\"] = season_competitor_info_id\n\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass SkillListAPIView(generics.ListAPIView):\n permission_classes = (AllowAny,)\n queryset = Skill.objects.all()\n serializer_class = SkillSerializer\n\n\nclass MentorListView(generics.ListAPIView):\n permission_classes = (AllowAny,)\n queryset = Mentor.objects.filter(seasons__is_active=True)\n serializer_class = MentorSerializer\n\n\nclass SeasonView(generics.RetrieveAPIView):\n permission_classes = (AllowAny,)\n serializer_class = SeasonSerializer\n\n def get_object(self):\n return Season.objects.filter(is_active=True).first()\n\n\nclass PublicTeamView(generics.ListAPIView):\n permission_classes = (AllowAny,)\n serializer_class = PublicTeamSerializer\n queryset = Team.objects.filter(season__is_active=True)\n\n\nclass TeamAPI(JwtApiAuthenticationMixin,\n mixins.CreateModelMixin,\n mixins.ListModelMixin,\n mixins.UpdateModelMixin,\n mixins.RetrieveModelMixin,\n viewsets.GenericViewSet):\n serializer_class = TeamSerializer\n queryset = Team.objects.filter(season__is_active=True)\n\n \"\"\"\n Get away from overriding JwtApiAuthenticationMixin'permission classes\n \"\"\"\n\n def get_permissions(self):\n permission_classes = [IsHackFMIUser(), IsTeamLeaderOrReadOnly(),\n IsSeasonDeadlineUpToDate(),\n CantCreateTeamWithTeamNameThatAlreadyExists(),\n TeamLeaderCantCreateOtherTeam()]\n return super().get_permissions() + permission_classes\n\n def perform_create(self, serializer):\n season = Season.objects.get(is_active=True)\n team = serializer.save(season=season)\n team.add_member(self.request.user.get_competitor(), is_leader=True)\n team.save()\n\n\nclass TeamMembershipAPI(JwtApiAuthenticationMixin,\n generics.DestroyAPIView):\n serializer_class = TeamMembershipSerializer\n\n def get_permissions(self):\n return super().get_permissions() + [IsHackFMIUser(), IsMemberOfTeam(),\n IsTeamMembershipInActiveSeason()]\n\n def get_queryset(self):\n return TeamMembership.objects.all()\n\n def perform_destroy(self, instance):\n # Remove team if teamleader leaves\n if instance.is_leader is True:\n team = instance.team\n send_team_delete_email(team)\n team.delete()\n instance.delete()\n\n\nclass MentorsForTeamListAPI(JwtApiAuthenticationMixin, generics.ListAPIView):\n serializer_class = MentorForTeamSerializer\n\n def get_permissions(self):\n return super().get_permissions() + [IsHackFMIUser(), ]\n\n def get_queryset(self):\n competitor = self.request.user.get_competitor()\n team = TeamMembership.objects.get_team_memberships_for_active_season(\n competitor=competitor).first().team\n\n return Mentor.objects.filter(teammentorship__team=team)\n\n\nclass TeamMentorshipAPI(JwtApiAuthenticationMixin,\n mixins.DestroyModelMixin,\n generics.CreateAPIView):\n\n serializer_class = TeamMentorshipSerializer\n queryset = TeamMentorship.objects.all()\n\n def get_permissions(self):\n permission_classes = [IsHackFMIUser(), IsTeamLeader(),\n IsMentorDatePickUpToDate(),\n CanAttachMoreMentorsToTeam(),\n MentorIsAlreadySelectedByThisTeamLeader()]\n\n return super().get_permissions() + permission_classes\n\n def perform_create(self, serializer):\n team = TeamMembership.objects.get_team_memberships_for_active_season(\n competitor=self.request.user.get_competitor()).first().team\n\n serializer.save(team=team)\n\n def get_object(self):\n competitor = self.request.user.get_competitor()\n team = TeamMembership.objects.get_team_memberships_for_active_season(\n competitor=competitor).first().team\n mentor = get_object_or_404(Mentor, id=self.kwargs['mentor_pk'])\n teammentor_ship = TeamMentorship.objects.filter(team=team, mentor=mentor)\n\n if not teammentor_ship.exists():\n raise ParseError(detail=\"You can't delete non-selected mentor.\")\n\n return teammentor_ship.first()\n\n def delete(self, request, *args, **kwargs):\n return self.destroy(request, *args, **kwargs)\n\n\nclass InvitationViewSet(JwtApiAuthenticationMixin, viewsets.ModelViewSet):\n serializer_class = InvitationSerializer\n\n list_permission_classes = (IsHackFMIUser,\n IsTeamleaderOrCantCreateIvitation,\n IsInvitedMemberCompetitor,\n IsInvitedMemberAlreadyInYourTeam,\n IsInvitedMemberAlreadyInOtherTeam,\n CanInviteMoreMembersInTeam)\n\n detail_permission_classes = (IsHackFMIUser,\n CanNotAccessWronglyDedicatedIvitation)\n\n accept_permission_classes = (IsHackFMIUser,\n CanNotAcceptInvitationIfTeamLeader,\n IsInvitedUserInTeam,\n CanNotAccessWronglyDedicatedIvitation)\n\n def get_queryset(self):\n return Invitation.objects.get_competitor_invitations_for_active_season(\n competitor=self.request.user.get_competitor())\n\n def get_object(self):\n obj = get_object_or_404(Invitation, id=self.kwargs['pk'])\n self.check_object_permissions(self.request, obj)\n return obj\n\n def perform_create(self, serializer):\n # Request user is the leader of the team and he has exactly one TeamMembership.\n team = TeamMembership.objects.get_team_memberships_for_active_season(\n competitor=self.request.user.get_competitor()).first().team\n\n competitor = serializer.validated_data.get('competitor')\n if Invitation.objects.filter(competitor=competitor, team=team).exists():\n raise ParseError(detail=\"You have already sent an invitation for that user.\")\n\n invitation = serializer.save(team=team)\n send_invitation(invitation)\n\n def accept(self, request, *args, **kwargs):\n invitation = self.get_object()\n TeamMembership.objects.create(team=invitation.team,\n competitor=invitation.competitor)\n invitation.delete()\n return Response(\"You have accepted this invitation!\", status=status.HTTP_200_OK)\n\n @classmethod\n def get_urls(cls):\n invitation_list = cls.as_view({\n 'get': 'list',\n 'post': 'create',\n },\n permission_classes=cls.permission_classes + cls.list_permission_classes\n )\n\n invitation_detail = cls.as_view({\n 'delete': 'destroy',\n },\n permission_classes=cls.permission_classes + cls.detail_permission_classes\n )\n\n invitation_accept = cls.as_view({\n 'post': 'accept',\n },\n permission_classes=cls.permission_classes + cls.accept_permission_classes\n )\n\n return locals()\n\n\n@api_view(['GET'])\ndef get_schedule(request):\n content = \"\"\n\n with open(\"media/mentors.html\", \"r\") as f:\n content = f.read()\n\n return Response(content, status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\n# @permission_classes((AllowAny, ))\ndef schedule_json(request):\n content = {\n \"placed\": {},\n \"leftovers\": []\n }\n\n with try_open(\"media/placing.json\", \"r\") as (f, error):\n if error is None:\n content = json.loads(f.read())\n\n return Response(content, status=status.HTTP_200_OK)\n\n\nclass OnBoardCompetitorAPI(JwtApiAuthenticationMixin,\n APIView):\n\n def post(self, request, format=None):\n if not request.user.get_competitor():\n serializer = OnBoardingCompetitorSerializer(data=request.data, baseuser=request.user)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n return Response({\"custom_errors\": [\"User is already competitor!\"]}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass TestApi(JwtApiAuthenticationMixin,\n APIView):\n\n def get(self, request):\n return Response(\"Great, status 200\", status=status.HTTP_200_OK)\n\n\nclass SeasonInfoAPI(JwtApiAuthenticationMixin,\n generics.CreateAPIView,\n generics.UpdateAPIView):\n serializer_class = SeasonCompetitorInfoSerializer\n queryset = SeasonCompetitorInfo.objects.filter(season__is_active=True)\n\n def get_permissions(self):\n permission_classes = [IsSeasonActive(), IsCompetitorMemberOfTeamForActiveSeason(),\n CantChangeOtherCompetitorsData()]\n return super().get_permissions() + permission_classes\n\n\nclass CompetitorListAPIView(JwtApiAuthenticationMixin, generics.ListAPIView):\n serializer_class = CompetitorListSerializer\n\n def get_permissions(self):\n permission_classes = [IsTeamLeader(), IsTeamMembershipInActiveSeason(),\n CanInviteMoreMembersInTeam()]\n return super().get_permissions() + permission_classes\n\n def get_queryset(self):\n return Competitor.objects.filter(seasoncompetitorinfo__season__is_active=True,\n seasoncompetitorinfo__looking_for_team=True)\n\n\nclass JWTLogoutView(JwtApiAuthenticationMixin,\n APIView):\n\n def post(self, request, *args, **kwargs):\n token = request.META.get('HTTP_AUTHORIZATION')\n\n if BlackListToken.objects.filter(token=token).exists():\n return Response(\"Token is already blacklisted.\", status=status.HTTP_400_BAD_REQUEST)\n\n BlackListToken.objects.create(token=token)\n return Response(\"Token has been blacklisted.\", status=status.HTTP_202_ACCEPTED)\n\n\nclass CustomRefreshJSONWebTokenAPIView(JwtApiAuthenticationMixin, RefreshJSONWebToken):\n\n def post(self, request, *args, **kwargs):\n token = request.META.get('HTTP_AUTHORIZATION')\n BlackListToken.objects.create(token=token)\n\n return super().post(request, *args, **kwargs)\n","repo_name":"rizplate/Loki","sub_path":"loki/hack_fmi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15125825793","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[22]:\n\n\nimport wiringpi as pi\nimport time\nfrom datetime import datetime\n\n\n# In[34]:\n\n\n# メモ\nA_PINs = [5, 4, 3, 2, 1, 7, 10,11] # 7セグのpin番号\nA_GPIOs = [14,15,18,23,24,25,8, 7] # raspberrypiのGPIO番号\nDIGITS_PINs = [6, 8, 9, 12] # 7セグの桁指定用pin番号\nDIGITs = [17,27,22,10] # 桁指定用GPIO番号\n\n# 出力数字とGPIO番号の対応\nNUMBERs = {'0': [0,1,0,1,1,1,1,1],\n '1':[0,1,0,0,0,1,0,0],\n '2':[1,0,0,1,1,1,0,1],\n '3':[1,1,0,1,0,1,0,1],\n '4':[1,1,0,0,0,1,1,0],\n '5':[1,1,0,1,0,0,1,1],\n '6':[1,1,0,1,1,0,1,1],\n '7':[0,1,0,0,0,1,1,1],\n '8':[1,1,0,1,1,1,1,1],\n '9':[1,1,0,1,0,1,1,1],\n '.':[0,0,1,0,0,0,0,0]}\n\n#str数字入力から各桁の値と桁数のarrayを作る\ndef analyze(input_num):\n \n nums = list(input_num)\n nums = nums[-1::-1] #桁数<4のときのため、桁を反転しておく\n selected_digits = ['0','1','2','3']\n \n return nums, selected_digits\n\ndef display_bydigit(number, selected_digit):\n\n # 桁指定(対応TrをON)\n digit_gpio = DIGITs[int(selected_digit)]\n pi.digitalWrite(digit_gpio, pi.HIGH)\n\n # 数字描画\n for i, zero_one in enumerate(NUMBERs[number]):\n if zero_one == 1:\n on_gpio = A_GPIOs[i]\n pi.digitalWrite(on_gpio, pi.HIGH)\n\n # 保持(ms)\n # pi.delay(7)\n\n # 描画の終了\n for i, x in enumerate(NUMBERs[number]):\n if x == 1:\n on_GPIO = A_GPIOs[i] \n pi.digitalWrite(on_GPIO, pi.LOW)\n\n # 桁指定の終了\n pi.digitalWrite(digit_gpio, pi.LOW)\n\n\ndef display_main(time_checking_period=1):\n \"\"\"\n str4桁数字列(input_num)を7セグに表示\n nums:input_num数字列のリスト\n num:numsリストの各要素(str型1桁の数字)\n \"\"\"\n\n\n # 処理開始\n pi.wiringPiSetupGpio() \n \n # 出力をOUTPUT、0Vに初期化\n for A_GPIO in A_GPIOs: \n pi.pinMode(A_GPIO, pi.OUTPUT)\n pi.digitalWrite(A_GPIO, pi.LOW)\n\n for digit in DIGITs:\n pi.pinMode(digit, pi.OUTPUT)\n pi.digitalWrite(digit, pi.LOW)\n\n\n # diisplay_time 秒間表示\n TIME_CHECKING_PERIOD = time_checking_period\n \n while True:\n \n # 現在時刻を取得\n now_time = datetime.now().strftime('%H%M')\n \n #analyze関数\n nums, selected_digits = analyze(now_time)\n \n start = time.time()\n \n while True:\n\n for i in range(len(nums)):\n\n num = nums[i]\n selected_digit = selected_digits[i]\n\n # display_bydigit関数\n display_bydigit(num, selected_digit)\n\n if time.time()-start >= TIME_CHECKING_PERIOD:\n\n break\n\n # 出力をINPUTに初期化\n for A_GPIO in A_GPIOs:\n pi.pinMode(A_GPIO, pi.INPUT)\n\n for digit in DIGITs:\n pi.pinMode(digit, pi.INPUT)\n\n\n# In[35]:\n\n\nif __name__ == '__main__' :\n\n display_main()\n\n","repo_name":"Fuji-Matcha/RaspberryPi","sub_path":"display_time_7deg.py","file_name":"display_time_7deg.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9813971632","text":"from spe import *\nfrom coupons import *\ndef parseAlgoodMarket1(page):\n\timport datetime\n\timport sys\n\tsys.path.append('./BeautifulSoup-3.2.0')\n\tfrom BeautifulSoup import BeautifulSoup\n\tsoup = BeautifulSoup(\"\".join(page))\n\tfor i in soup('div',{ \"id\" : \"offer\" }):\n\n\t\tstore = \"Algood Market\"\n\t \n\t\tpricesValid=\"\"\n\t\tstartDate=\"\"\n\t\tendDate=\"\"\n\t\tpricesValidInput = i.find('p')\n\t\tif pricesValidInput:\n\t\t\tpricesValid = \" \".join(removeNonAscii(pricesValidInput.renderContents()).split())\n\t\t\tstartDate = datetime.datetime.now().strftime(\"%m/%d/%Y\").lstrip('0')\n\t\t\tendDate = pricesValid[17:99]\n\n\t\ttitle=\"\"\n\t\ttitleInput = i.find('h3')\n\t\tif titleInput:\n\t\t\ttitle = removeNonAscii(titleInput.renderContents())\n\n\t\tdescription = title\n\n\t\tprice = \"\"\n\t\tpriceInput = i.find('h1')\n\t\tif priceInput:\n\t\t\tprice = removeNonAscii(priceInput.renderContents())\n\n\t\tif title:\n\t\t\tprint (store+'|'+startDate+'|'+endDate+'|'+title+'|'+description+'|'+price).replace(\"\\r\\n\",\" \")\n\t\t\taddToSales(store,startDate,endDate,title,description,price)\n\treturn\ndef scrapeAlgoodMarket1():\n\n\timport urllib\n\tf = urllib.urlopen(\"http://algoodmarket.com/page/sales\")\n\taddPageToDb(\"AlgoodMarket1\",f.read())\n\treturn\ndef processAlgoodMarket1():\n\tfrom datetime import datetime\n\tfrom sqlite3 import connect\n\n\trows = getPagesFromDb(\"AlgoodMarket1\")\n\n\tfor row in rows:\n\t\tparseAlgoodMarket1(row[0])\n#\t\tsetPageAsProcessed(row[1])\n\treturn\n\n","repo_name":"twayneprice/twayneprice-spe","sub_path":"AlgoodMarket.py","file_name":"AlgoodMarket.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"34742723489","text":"import datetime\nimport mox\nfrom mox import IgnoreArg, IsA\nfrom common.helpers import email_provider\nfrom common.helpers.common_dependency_helper import register_common_mox_dependencies\nfrom common.utilities import sql\nfrom common.utilities.inversion_of_control import Dependency, dependencies\nfrom common.web_helpers import logging_helper\nfrom core.data_checks.config.config import email_settings\nfrom core.service.svc_workflow.implementation.task.implementation.custom_analytics.custom_analytics_cleanup_job import CustomAnalyticsCleanupJob\n\n__author__ = 'erezrubinstein'\n\nclass TestCustomAnalyticsCleanupJob(mox.MoxTestBase):\n\n def setUp(self):\n\n # call parent set up\n super(TestCustomAnalyticsCleanupJob, self).setUp()\n\n # register mock dependencies\n register_common_mox_dependencies(self.mox)\n\n # get various mox dependencies\n self.mock_retail_access = Dependency(\"RetailMongoAccess\").value\n self.mock_logger = Dependency(\"FlaskLogger\").value\n\n # various needed data\n self.context = { \"user\": \"chicken_woot\" }\n self.task_rec = { \"context\": self.context }\n\n # create the scheduler\n self.cleanup_job = CustomAnalyticsCleanupJob(self.task_rec)\n\n\n def doCleanups(self):\n\n # call parent clean up\n super(TestCustomAnalyticsCleanupJob, self).doCleanups()\n\n # clear dependencies\n dependencies.clear()\n\n\n def test_complete_run__success(self):\n\n # begin mocking stuff\n self.mox.StubOutWithMock(self.cleanup_job, \"_get_custom_analytics_jobs_to_clean_up\")\n self.mox.StubOutWithMock(self.cleanup_job, \"_back_up_target_db\")\n self.mox.StubOutWithMock(self.cleanup_job, \"_drop_target_and_logging_dbs\")\n self.mox.StubOutWithMock(self.cleanup_job, \"_mark_ca_run_as_cleaned_up\")\n\n # begin recording\n self.cleanup_job._get_custom_analytics_jobs_to_clean_up().AndReturn([\"chicken\", \"woot\"])\n self.cleanup_job._back_up_target_db(\"chicken\")\n self.cleanup_job._drop_target_and_logging_dbs(\"chicken\")\n self.cleanup_job._mark_ca_run_as_cleaned_up(\"chicken\")\n self.cleanup_job._back_up_target_db(\"woot\")\n self.cleanup_job._drop_target_and_logging_dbs(\"woot\")\n self.cleanup_job._mark_ca_run_as_cleaned_up(\"woot\")\n\n # replay all\n self.mox.ReplayAll()\n\n # go!\n self.cleanup_job.run()\n\n\n def test_complete_run__exception(self):\n\n # define exception side effect method\n exception = Exception(\"yo mama\")\n def raise_exception():\n raise exception\n\n # begin mocking stuff\n self.mox.StubOutWithMock(logging_helper, \"log_exception\")\n self.mox.StubOutWithMock(self.cleanup_job, \"_get_custom_analytics_jobs_to_clean_up\")\n self.mox.StubOutWithMock(self.cleanup_job, \"_send_error_email\")\n\n # begin recording\n self.cleanup_job._get_custom_analytics_jobs_to_clean_up().WithSideEffects(raise_exception)\n logging_helper.log_exception(self.mock_logger, \"Error running CustomAnalyticsCleanupJob\", exception, IgnoreArg())\n self.cleanup_job._send_error_email(\"yo mama\", IsA(basestring))\n\n # replay all\n self.mox.ReplayAll()\n\n # go!\n self.assertRaises(Exception, self.cleanup_job.run)\n\n\n def test_send_error_email(self):\n\n # create mock ids\n mock_error = \"I love goooooooold\"\n\n # create expected email stuff\n mock_subject = \"Error Running Custom Analytics Cleanup Job\"\n mock_body = \"Error: I love goooooooold\\nTrace Stack: trace_stack\"\n mock_from_email = \"support@signaldataco.com\"\n mock_to_email = [\"engineering@signaldataco.com\"]\n\n # replace email settings with mocks/stubs\n email_settings[\"smtp_server\"] = \"chicken\"\n email_settings[\"username\"] = \"woot\"\n email_settings[\"password\"] = \"Austin-Danger\"\n\n # begin stubbing\n self.mox.StubOutClassWithMocks(email_provider, \"EmailProvider\")\n\n # begin recording\n mock_email_provider = email_provider.EmailProvider(\"chicken\", \"woot\", \"Austin-Danger\")\n mock_email_provider.send_email(mock_from_email, mock_to_email, mock_subject, mock_body)\n\n # replay all\n self.mox.ReplayAll()\n\n # I love gooooold!\n self.cleanup_job._send_error_email(mock_error, \"trace_stack\")\n\n\n def test_get_custom_analytics_jobs_to_clean_up(self):\n\n # define some mocks and expected values\n expected_entity_fields = { \"_id\": 1, \"target_db_name\": 1, \"logging_db_name\": 1 }\n expected_query = {\n \"$or\": [\n { \"database_deleted\": { \"$exists\": False }},\n { \"database_deleted\": False }\n ],\n \"target_db_name\": { \"$exists\": True },\n \"logging_db_name\": { \"$exists\": True },\n \"heart_beat\" : { \"$lt\": datetime.datetime(2014, 2, 25) }\n }\n\n # begin stubbing\n self.mox.StubOutWithMock(self.cleanup_job, \"_get_utc_now\")\n\n # begin recording\n self.cleanup_job._get_utc_now().AndReturn(datetime.datetime(2014, 3, 25))\n self.mock_retail_access.find(\"custom_analytics_run\", expected_query, expected_entity_fields).AndReturn([\"woot\"])\n\n # replay all\n self.mox.ReplayAll()\n\n # go\n self.assertEqual(self.cleanup_job._get_custom_analytics_jobs_to_clean_up(), [\"woot\"])\n\n\n def test_back_up_target_db(self):\n\n # create mocks\n mock_ca_run = { \"target_db_name\": \"chicken_woot\" }\n mock_sql_statement = \"\"\"\n BACKUP DATABASE chicken_woot\n TO DISK = 'D:\\SQLData\\Backup\\Custom Analytics\\chicken_woot.bak'\n WITH FORMAT,\n COMPRESSION,\n MEDIANAME = 'D_SQLServerBackups',\n NAME = 'Full Backup of chicken_woot'\n \"\"\"\n\n # begin stubbing\n self.mox.StubOutWithMock(sql, \"sql_execute\")\n\n # begin recording\n sql.sql_execute(mock_sql_statement, database_name = \"chicken_woot\")\n\n # replay all\n self.mox.ReplayAll()\n\n # go!\n self.cleanup_job._back_up_target_db(mock_ca_run)\n\n\n def test_drop_target_and_logging_dbs(self):\n\n # create mocks\n mock_ca_run = {\n \"target_db_name\": \"chicken\",\n \"logging_db_name\": \"woot\"\n }\n\n # begin stubbing\n self.mox.StubOutWithMock(sql, \"sql_execute\")\n\n # begin recording\n sql.sql_execute(\"alter database chicken set single_user with rollback immediate\", database_name = \"master\")\n sql.sql_execute(\"alter database woot set single_user with rollback immediate\", database_name = \"master\")\n sql.sql_execute(\"drop database chicken\", database_name = \"master\")\n sql.sql_execute(\"drop database woot\", database_name = \"master\")\n\n # replay all\n self.mox.ReplayAll()\n\n # go!\n self.cleanup_job._drop_target_and_logging_dbs(mock_ca_run)\n\n\n def test_mark_ca_run_as_cleaned_up(self):\n\n # create mocks and expected values\n mock_ca_run = { \"_id\": \"Gold Member\", \"whatever\": \"whichever\" }\n mock_query = { \"_id\": \"Gold Member\" }\n mock_operation = {\n \"$set\": {\n \"database_deleted\": True\n }\n }\n\n # begin recording\n self.mock_retail_access.update(\"custom_analytics_run\", mock_query, mock_operation)\n\n # replay all\n self.mox.ReplayAll()\n\n # I love gooooold!\n self.cleanup_job._mark_ca_run_as_cleaned_up(mock_ca_run)\n","repo_name":"erezrubinstein/aa","sub_path":"tests/unit_tests/core_tests/service_tests/workflow_tests/test_custom_analytics_cleanup_job.py","file_name":"test_custom_analytics_cleanup_job.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39221083913","text":"# Stopwatch: The Game\r\n\r\nimport simplegui\r\n\r\n# define global variables\r\ninterval = 100\r\ncount = 0\r\ntotal_stops = 0\r\nsucess_stops = 0\r\nstop = True\r\n\r\n\r\n# define helper function format that converts time\r\n# in tenths of seconds into formatted string A:BC.D\r\ndef format(t):\r\n A = 0\r\n B = 0\r\n C = 0\r\n D = t\r\n\r\n if t >= 10:\r\n C = t // 10\r\n D = t % 10\r\n\r\n if C >= 10:\r\n B = C // 10\r\n C = C % 10\r\n\r\n if B >= 6:\r\n A = B // 6\r\n B = B % 6\r\n\r\n return str(A) + \":\" + str(B) + str(C) + \".\" + str(D)\r\n \r\n pass\r\n \r\n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\r\n\r\ndef Start():\r\n global count, stop\r\n stop = False\r\n timer.start()\r\n \r\ndef Stop():\r\n global total_stops, success_stops, stop\r\n if stop == False :\r\n if count % 10 == 0 and count != 0 :\r\n success_stops += 1\r\n total_stops += 1\r\n elif count != 0 :\r\n total_stops += 1\r\n stopped = True\r\n timer.stop()\r\n \r\ndef Reset():\r\n global count, success_stops, total_stops\r\n count = 0\r\n stop = True\r\n total_stops = 0\r\n success_stops = 0\r\n timer.stop()\r\n\r\n# define event handler for timer with 0.1 sec interval\r\ndef tick():\r\n global count\r\n count = count + 1\r\n \r\n\r\n# define draw handler\r\ndef draw(canvas):\r\n text = format(count)\r\n canvas.draw_text( text, (80, 125), 42, \"white\")\r\n canvas.draw_text(str(success_stops) + '/' + str(total_stops), (190,30), 24, \"blue\")\r\n \r\n# Create a frame \r\nframe = simplegui.create_frame(\"Stopwatch game\", 250, 250)\r\nframe.set_canvas_background('black')\r\n\r\n# Register event handlers\r\nframe.add_button(\"Start\", Start, 100)\r\nframe.add_button(\"Stop\", Stop, 100)\r\nframe.add_button(\"Reset\", Reset, 100)\r\nframe.set_draw_handler(draw)\r\ntimer = simplegui.create_timer(interval, tick)\r\n\r\n# Start the frame animation\r\nframe.start()\r\nReset()\r\n","repo_name":"Connienguyen1501/python-program","sub_path":"SimpleGUI.Game.Stopwatch.py","file_name":"SimpleGUI.Game.Stopwatch.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5856083236","text":"import cv2, numpy as np, matplotlib.pyplot as plt\n\nimg = cv2.imread('falls.jpeg')\nimg = cv2.resize(img, (250, 250))\nedges1 = cv2.Canny(img,100,200)\nedges2 = cv2.Canny(img,350,500)\n\nplt.subplot(131),plt.imshow(img)\nplt.title('Original'), plt.xticks([]), plt.yticks([])\nplt.subplot(132),plt.imshow(edges1,cmap = 'gray')\nplt.title('Aristas con ruido'), plt.xticks([]), plt.yticks([])\nplt.subplot(133),plt.imshow(edges2,cmap = 'gray')\nplt.title('Aristas con pérdida'), plt.xticks([]), plt.yticks([])\nplt.show()\n","repo_name":"Gilberto-Lopez/Is-This-LOSS","sub_path":"Reporte/edges.py","file_name":"edges.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18776150484","text":"# Using Lists with While Loops\n\n# While a for loop is effective for looping through a list,\n# it shouldn't be used to modify the list because Python\n# will have difficulty keeping track of the items in the list.\n# Instead, use a while loop to modify a list as you loop through it.\n# Using a while loop will allow you to collect, store & organize lots\n# of input.\n\n# Moving Items from One List to Another\n\nneeds_shots = ['bear', 'fido', 'fluffy']\nvaccinated = ['rosie', 'zena']\n\nfor pet in needs_shots:\n print(f\"{pet.title()} is in the waiting room.\")\n\nfor pet in vaccinated:\n print(f\"{pet.title()}'s vaccinations are up to date.\")\n\nwhile needs_shots:\n get_shots = needs_shots.pop()\n\n print(f\"{get_shots.title()} is being vaccinated.\")\n vaccinated.append(get_shots)\n#I want to format this but I'm not sure how to do it.\n#if len(needs_shots) => 0\n# print(\"All pets have been vaccinated. The waiting room is empty.\"\n#else:\n# print(f\"To Be Vaccinated: {needs_shots}\")\n\nfor pet in vaccinated:\n print(f\"{pet.title()} is fully vaccinated. Come back next year!\")\n\nprint(\"\\nAnd now how we can remove all instances of a specific value from a list.\\n\")\nprint(\"\"\"As we already know, the remove() function only removes the first instance of\n a value. If we use remove() inside of a while loop, however, we can remove all\n instances of the specified value.\"\"\")\n\nmammels = ['bats', 'cetaceans', 'bears', 'rodents', 'aliens', 'monotremes', 'sloths', 'otters', 'aliens', 'seals']\n\nprint(f\"\\nmammels = {mammels}\")\n\nwhile 'aliens' in mammels:\n mammels.remove('aliens')\n\nprint(f\"\\nmammels (after while loop) = {mammels}\")\n\n","repo_name":"sdemoya/py-scripts","sub_path":"crash-course-notes/while-with-lists.py","file_name":"while-with-lists.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"40963450774","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\n\"\"\"Lithium's \"outputs\" interestingness test to assess whether an intended message shows\nup.\n\nExample:\n python -m lithium outputs --timeout=9 FOO --fuzzing-safe \n\"\"\"\n\nimport logging\nimport re\nimport sys\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nfrom . import utils\nfrom .timed_run import BaseParser, timed_run\n\nLOG = logging.getLogger(__name__)\n\n\ndef file_contains(path: Union[Path, str], is_regex: bool, search: str) -> bool:\n \"\"\"Determine if string is present in a file.\n\n Args:\n path:\n is_regex:\n search:\n\n Returns:\n\n \"\"\"\n if is_regex:\n return utils.file_contains_regex(path, search.encode())[0]\n return utils.file_contains_str(path, search.encode())\n\n\ndef interesting(\n cli_args: Optional[List[str]] = None,\n temp_prefix: Optional[str] = None,\n) -> bool:\n \"\"\"Interesting if the binary causes an intended message to show up. (e.g. on\n stdout/stderr)\n\n Args:\n cli_args: List of input arguments.\n temp_prefix: Temporary directory prefix, e.g. tmp1/1 or tmp4/1\n\n Returns:\n True if the intended message shows up, False otherwise.\n \"\"\"\n parser = BaseParser()\n parser.add_argument(\n \"-s\",\n \"--search\",\n help=\"String to search for.\",\n required=True,\n )\n parser.add_argument(\n \"-r\",\n \"--regex\",\n action=\"store_true\",\n default=False,\n help=\"Treat string as a regular expression\",\n )\n args = parser.parse_args(cli_args)\n if not args.cmd_with_flags:\n parser.error(\"Must specify command to evaluate.\")\n\n run_info = timed_run(args.cmd_with_flags, args.timeout, temp_prefix)\n\n if temp_prefix is None:\n outputs = (run_info.out, run_info.err)\n for data in outputs:\n if (args.regex and re.match(args.search, data, flags=re.MULTILINE)) or (\n args.search in data\n ):\n LOG.info(\"[Interesting] Match detected!\")\n return True\n\n LOG.info(\"[Uninteresting] No match detected!\")\n return False\n\n result = any(\n file_contains(f\"{temp_prefix}{suffix}\", args.regex, args.search)\n for suffix in (\"-out.txt\", \"-err.txt\")\n )\n if result:\n LOG.info(\"[Interesting] Match detected!\")\n return True\n\n LOG.info(\"[Uninteresting] No match detected!\")\n return False\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO)\n sys.exit(interesting())\n","repo_name":"MozillaSecurity/lithium","sub_path":"src/lithium/interestingness/outputs.py","file_name":"outputs.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"61"}
+{"seq_id":"72659959553","text":"import warnings\n\nfrom dagster import (\n AssetSelection,\n DefaultScheduleStatus,\n Definitions,\n ExperimentalWarning,\n ScheduleDefinition,\n define_asset_job,\n)\n\nwarnings.filterwarnings(\"ignore\", category=ExperimentalWarning)\n\nfrom .assets import dataset_assets, model_assets, prediction_assets\nfrom .resources import RESOURCES\n\nall_assets = [*dataset_assets, *model_assets, *prediction_assets]\n\nrefresh_all_assets = define_asset_job(\n \"refresh_all_assets\", AssetSelection.all()\n)\nraw_dataset_refresh_schedule = ScheduleDefinition(\n job=refresh_all_assets,\n cron_schedule=\"0 * * * *\",\n default_status=DefaultScheduleStatus.RUNNING,\n)\n\ndefs = Definitions(\n assets=all_assets, resources=RESOURCES, schedules=[raw_dataset_refresh_schedule]\n)\n","repo_name":"dagster-io/dagster_llm_finetune","sub_path":"tutorial/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"}
+{"seq_id":"18886682517","text":"from django.conf import settings\nfrom django.views.generic import ListView\nfrom django.utils.translation import ugettext as _\n\nfrom common.mixins import DatetimeSearchMixin\nfrom common.permissions import AdminUserRequiredMixin\n\nfrom .models import FTPLog\n\n\nclass FTPLogListView(AdminUserRequiredMixin, DatetimeSearchMixin, ListView):\n model = FTPLog\n template_name = 'audits/ftp_log_list.html'\n paginate_by = settings.DISPLAY_PER_PAGE\n user = asset = system_user = filename = ''\n date_from = date_to = None\n\n def get_queryset(self):\n self.queryset = super().get_queryset()\n self.user = self.request.GET.get('user')\n self.asset = self.request.GET.get('asset')\n self.system_user = self.request.GET.get('system_user')\n self.filename = self.request.GET.get('filename', '')\n\n filter_kwargs = dict()\n filter_kwargs['date_start__gt'] = self.date_from\n filter_kwargs['date_start__lt'] = self.date_to\n if self.user:\n filter_kwargs['user'] = self.user\n if self.asset:\n filter_kwargs['asset'] = self.asset\n if self.system_user:\n filter_kwargs['system_user'] = self.system_user\n if self.filename:\n filter_kwargs['filename__contains'] = self.filename\n if filter_kwargs:\n self.queryset = self.queryset.filter(**filter_kwargs).order_by('-date_start')\n return self.queryset\n\n def get_context_data(self, **kwargs):\n context = {\n 'user_list': FTPLog.objects.values_list('user', flat=True).distinct(),\n 'asset_list': FTPLog.objects.values_list('asset', flat=True).distinct(),\n 'system_user_list': FTPLog.objects.values_list('system_user', flat=True).distinct(),\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'user': self.user,\n 'asset': self.asset,\n 'system_user': self.system_user,\n 'filename': self.filename,\n \"app\": _(\"Audits\"),\n \"action\": _(\"FTP log\"),\n }\n kwargs.update(context)\n return super().get_context_data(**kwargs)\n","repo_name":"wzs654421772/jumpserver","sub_path":"apps/audits/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"29006597013","text":"# NEU CS5001 Project 5 graphicsPlus drawing\n# Yong Shi/shi.yong@northeastern.edu/NUID 001578845\n\nimport graphicsPlus as g\nimport random\nimport time\nimport sys\nimport complex_shapes as com\n\n\ndef main():\n k = int(sys.argv[1]) # scale control at terminal input\n\n win=g.GraphWin(\"My window\", 1000, 600)\n test=com.riverboat(100, 300, k)\n for thing in test:\n thing.draw(win)\n\n test2=com.village(500, 300, k)\n for thing in test2:\n thing.draw(win)\n\n test2=com.village(100, 300, k)\n for thing in test2:\n thing.draw(win)\n\n test4 = com.init_birds(-50,0,2)\n for thing in test4:\n thing.draw(win)\n\n for i in range(30):\n test3=com.grass(100, 300, 1)\n for thing in test3:\n thing.draw(win)\n\n frame = 0\n while True:\n key = win.checkKey()\n if key == 'q':\n break\n if win.checkMouse() != None:\n break\n\n # call the birds animation fly to right and left\n com.animate_birds( test4, frame, win )\n # call the boats and river animation fly to right and left\n com.animate_riverboat(test, frame, win)\n # call the village animation fly to right and left\n com.animate_village(test2, frame, win)\n\n win.update()\n frame += 1\n\n win.getMouse()\n win.close()\n\nif __name__==\"__main__\":\n main()\n\n# due to I am in China that can not access Google Drive, or other Google products,\n# so got noted with professor that I sent the video thru email to professor already, thanks!","repo_name":"shiyong5008/Python-project","sub_path":"project 5/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4524135048","text":"\"\"\"\r\nEscreva um programa que peça números inteiros positivos indefinidamente e armazene-os em uma lista. O programa\r\ndeverá ser encerrado caso o número digitado seja negativo ou nulo. Ao final mostre na tela a quantidade\r\nnúmeros pares e ímpares.\r\n\"\"\"\r\n\r\nvalores = []\r\npares = 0\r\nimpares = 0\r\n\r\nwhile True:\r\n n = int(input('Digite um número: '))\r\n if n > 0:\r\n valores.append(n)\r\n else:\r\n print('Encerrando o programa...')\r\n break\r\n\r\nfor i in valores:\r\n if i % 2 == 0:\r\n pares += 1\r\n else:\r\n impares += 1\r\n\r\nprint(f'Quantidade de pares: {pares}\\n'\r\n f'Quantidade de ímpares: {impares}')","repo_name":"hhigorb/exercicios_python_pratica","sub_path":"2_Estruturas de repetição/ex017.py","file_name":"ex017.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5154568766","text":"import pygame, sys\nfrom pygame.locals import *\n\npygame.init()\n\n\ndef UpdateDirty(Spritelist, surface): #Updates all dirty sprites in Spritelist\n for i in Spritelist:\n i.update()\n i.draw(surface)\n\n\ndef movement(Display, Backgrounds, Players, Houses, NPCs, Cursor):\n \"\"\"All sprite inputs have to be in DirtySprite format and in a LayeredDirty group,\n Creates a loop and detects keyboard inputs, controls with WASD but can be configured later\"\"\"\n BLACK = (0, 0, 0)\n MenuOpen = False\n Display.fill(BLACK)\n Movingsprites = (Backgrounds, Houses, NPCs)\n Cursorpos = pygame.mouse.get_pos()\n Backgrounds.clear(Display, Display)\n Houses.clear(Display, Display)\n NPCs.clear(Display, Display)\n Players.clear(Display, Display)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYUP:\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n if event.key == K_TAB:\n MenuOpen = True\n return MenuOpen\n if pygame.key.get_pressed()[K_a]: # Movement key detection and movement\n for i in Movingsprites:\n for j in i:\n j.move('moveLeft')\n if pygame.key.get_pressed()[K_d]:\n for i in Movingsprites:\n for j in i:\n j.move('moveRight')\n if pygame.key.get_pressed()[K_w]:\n for i in Movingsprites:\n for j in i:\n j.move('moveUp')\n if pygame.key.get_pressed()[K_s]:\n for i in Movingsprites:\n for j in i:\n j.move('moveDown')\n UpdateDirty((Backgrounds, Houses, NPCs, Players, Cursor), Display)\n \n \ndef OpenMenu(Menuimage, Menucoords, Selector_img, Selectcoords, Buttonlist, colorkey, surface, MOVESPEED):\n Menuimg = pygame.image.load(Menuimage)\n Selector_img = pygame.image.load(Selector_img)\n Menuimg.set_colorkey(colorkey)\n Selector_img.set_colorkey(colorkey)\n Menurect = Menuimg.get_rect()\n Selector_rect = Selector_img.get_rect()\n (pos_x, pos_y) = Selector_rect.topleft\n Cursorpos = pygame.mouse.get_pos()\n MenuEnter, MenuExit = False, False\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_w: pos_y -= MOVESPEED\n if event.key == K_s: pos_y += MOVESPEED\n if event.key == K_a: pos_x -= MOVESPEED\n if event.key == K_d: pos_x += MOVESPEED\n if event.key == K_RETURN: MenuEnter = True\n if event.key == K_TAB: MenuOpen = False\n if event.key == K_BACKSPACE: MenuExit = True\n if event.type == KEYUP:\n if event.key == K_RETURN: MenuEnter = False\n if event.key == K_BACKSPACE: MenuExit = False\n if event.type == MOUSEBUTTONDOWN: Click = True\n if event.type == MOUSEBUTTONUP: Click = False\n for i in Buttonlist:\n if i.dirty:\n if i.Click > 0 or (i.rect.collidepoint(pos_x, pos_y) and MenuEnter):\n i.Output\n surface.blit(Menuimg, Menucoords)\n surface.blit(Selector_img, Selectcoords)","repo_name":"dangercrow/Project_Awesome","sub_path":"test/dan/Dan's play area !/FunctLib.py","file_name":"FunctLib.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29519222378","text":"from src.oblivious_robots_target_searching.Graph import Playground\n\nnoOfNodes = 100\nnoOfRobots = 20\n\ndef checkResult(result):\n if result[0] != noOfNodes: return False\n if result[1] != noOfRobots: return False\n if result[2] < 10 or result[2] > 100: return False\n return True\n\ndef test_simRingWithChords():\n P = Playground(True)\n P.setup({\n \"type\": \"ring-with-chords\",\n \"val\": noOfNodes,\n \"noOfRobots\": noOfRobots,\n \"noOfChords\": 15\n })\n assert checkResult(P.run())\n \ndef test_InvalidGraphError():\n err = None\n P = Playground(True)\n try:\n P.setup({\n \"type\": \"no-such-graph-exists\",\n \"val\": noOfNodes,\n \"noOfRobots\": noOfRobots,\n \"noOfChords\": 15\n })\n except Exception as e:\n err = str(e)\n assert err == 'Invalid Graph'\n","repo_name":"Sairyo-No-Developers/oblivious-robots-target-search","sub_path":"tests/sim_test.py","file_name":"sim_test.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41502505191","text":"import os\nimport sys\nimport re\n\nfrom bcolors import bcolors\n\n\ndef extractInfomation(command, regex, path=None, failedToExtractCallBack=None, successCallBack=None, envs=None):\n env = \"\"\n try:\n env = os.environ[\"ORACLE_HOME\"] + \"/bin/\"\n except KeyError:\n try:\n env = path + \"/bin/\"\n except TypeError:\n env = \"\"\n\n if envs is not None:\n stdout = os.popen(envs + ';' + env + command).read()\n else:\n stdout = os.popen(env + command).read()\n\n try:\n result = re.search(regex, stdout).group(1)\n if successCallBack is not None:\n return successCallBack()\n return result\n except:\n if failedToExtractCallBack is not None and \"PRKF-1110\" in stdout:\n return failedToExtractCallBack()\n else:\n sys.exit(bcolors.FAIL +\n \"Failed to extract information. You may need to export ORACLE_HOME or change it in settings.py\" + bcolors.ENDC)\n","repo_name":"Voyager2718/Oracle-Clusterware-Info-Getter","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14862795035","text":"import os\r\nimport re\r\n\r\n#Search all files that match the string(regular expession) in dir, and then return a list contents all absolute path.\r\ndef SearchFile(Path, File):\r\n allFile=os.listdir(Path)\r\n temp_list = []\r\n for eachFile in allFile:\r\n if os.path.isdir(Path+os.sep+eachFile):\r\n temp_list.extend(SearchFile(Path+os.sep+eachFile, File))\r\n elif re.findall(File,eachFile) != []:\r\n objectFile = re.findall(File,eachFile)\r\n temp_list.append(Path + os.sep + objectFile[0])\r\n return temp_list\r\n#usage:******************************* \r\n#Path = '/home/fanzhibo/MPI_CODE'\r\n#File = re.compile(r'.*\\.c')\r\n#res = SearchFile(Path, File)\r\n#print(res)\r\n#***************************************\r\n","repo_name":"night1412/PythonTool","sub_path":"SearchFile.py","file_name":"SearchFile.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"38990521260","text":"from typing import List, Tuple, Optional\nfrom urllib.request import urlopen\nimport re\nfrom html.parser import HTMLParser\n\n#https://habr.com/ru/post/543760/\n\nclass MyHTMLParser(HTMLParser):\n\n def __init__(self):\n super().__init__()\n self.depth = 0\n self.best_depth = 1000\n self.best_data = ''\n self.html_data = ''\n\n def handle_starttag(self, tag, attrs):\n self.depth += 1\n self.html_data = del_space(self.get_starttag_text())\n\n def handle_data(self, data):\n ''' There we find data which is the least deep\n and consists of text on morw than 80% '''\n self.html_data += del_space(data)\n changed_data = re.findall(r'[А-Яа-я]+', del_space(data))\n changed_data = ''.join(changed_data)\n if self.html_data:\n if len(changed_data)/len(self.html_data) > 0.8:\n if self.best_depth > self.depth:\n self.best_data = data\n self.best_depth = self.depth\n\n def handle_endtag(self, tag):\n self.depth -= 1\n\n\ndef del_space(data:str) -> str:\n '''Removing spaces'''\n return ''.join([i for i in data if i != ' '])\n\ndef download_html(link:str) -> str:\n '''Getting decoded data'''\n response = urlopen(link)\n headers = response.headers\n data = response.read()\n data_decoded = data.decode('utf-8')\n\n return headers, data_decoded\n\n\n\n\nresponse_headers, response = download_html('https://habr.com/ru/post/543760/')\nprint(response)\n\nparser = MyHTMLParser()\nparser.feed(response)\nprint('_________________________________')\nprint(parser.best_data)\nprint(parser.best_depth)\n","repo_name":"luckyseadog/python_parser","sub_path":"pythonProject5/1.2.py","file_name":"1.2.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73661477633","text":"from mongoengine import *\n\nfrom spaceone.core.model.mongo_model import MongoModel\nfrom spaceone.inventory.model.zone_model import Zone\nfrom spaceone.inventory.model.region_model import Region\nfrom spaceone.inventory.model.collection_info_model import CollectionInfo\nfrom spaceone.inventory.model.reference_resource_model import ReferenceResource\n\n\nclass RoutingTable(EmbeddedDocument):\n cidr = StringField(max_length=40)\n destination = StringField(max_length=40)\n interface = StringField(max_length=40, null=True, default=None)\n\n\nclass NetworkPolicy(MongoModel):\n network_policy_id = StringField(max_length=40, generate_id='npolicy', unique=True)\n name = StringField(max_length=255)\n routing_tables = ListField(EmbeddedDocumentField(RoutingTable))\n dns = ListField(StringField(max_length=40))\n data = DictField()\n metadata = DictField()\n reference = EmbeddedDocumentField(ReferenceResource, default=ReferenceResource)\n tags = DictField()\n zone = ReferenceField('Zone', reverse_delete_rule=DENY)\n region = ReferenceField('Region', reverse_delete_rule=DENY)\n domain_id = StringField(max_length=255)\n collection_info = EmbeddedDocumentField(CollectionInfo, default=CollectionInfo)\n created_at = DateTimeField(auto_now_add=True)\n\n meta = {\n 'updatable_fields': [\n 'name',\n 'routing_tables',\n 'dns',\n 'data',\n 'metadata',\n 'reference',\n 'tags',\n 'collection_info'\n ],\n 'exact_fields': [\n 'network_policy_id',\n 'collection_info.state'\n ],\n 'minimal_fields': [\n 'network_policy_id',\n 'name',\n 'reference',\n 'collection_info.state'\n ],\n 'change_query_keys': {\n 'zone_id': 'zone.zone_id',\n 'region_id': 'region.region_id'\n },\n 'reference_query_keys': {\n 'zone': Zone,\n 'region': Region\n },\n 'ordering': [\n 'name'\n ],\n 'indexes': [\n 'network_policy_id',\n 'zone',\n 'region',\n 'domain_id',\n 'reference.resource_id',\n 'collection_info.state'\n ],\n 'aggregate': {\n 'lookup': {\n 'region': {\n 'from': 'region'\n },\n 'zone': {\n 'from': 'zone'\n }\n }\n }\n }\n","repo_name":"choonho/inventory","sub_path":"src/spaceone/inventory/model/network_policy_model.py","file_name":"network_policy_model.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71325220356","text":"import signal\nimport socket\nimport struct\nimport sys\nfrom threading import Thread\nimport tkinter\n\n\nBORN, MSG, DEAD = range(3)\nnicknames = []\nMSG_FORMAT = '=b'\ns = None\n\ntk = tkinter.Tk()\nlog_field = None\ninput_field = None\nname_field = None\n\n\ndef init_gui(nickname):\n global log_field, input_field, name_field\n text = tkinter.StringVar()\n name = tkinter.StringVar()\n text.set('')\n name.set(nickname)\n tk.title(\"Simple chat\")\n w = 400\n h = 300\n ws = tk.winfo_screenwidth()\n hs = tk.winfo_screenheight()\n x = (ws/2) - (w/2)\n y = (hs/2) - (h/2)\n tk.geometry('%dx%d+%d+%d' % (w, h, x, y))\n\n log_field = tkinter.Text(tk)\n log_field.tag_configure('server_msg', foreground='red')\n name_field = tkinter.Entry(tk, textvariable=name)\n name_field.configure(state='readonly')\n input_field = tkinter.Entry(tk, textvariable=text)\n input_field.pack(side='bottom', fill='x', expand='true')\n name_field.pack(side='bottom', fill='x', expand='true')\n log_field.pack(side='top', fill='both', expand='true')\n input_field.focus_set()\n\n\ndef update_log():\n log_field.see(tkinter.END)\n s.setblocking(False)\n try:\n data = s.recv(1024).decode(\"utf-8\")\n chunks = data.split()\n if data is '':\n log_field.insert(tkinter.END, \"user with this username is already existing\" + \"\\n\", 'server_msg')\n sys.exit(1)\n elif chunks[0] == \"!user\":\n nicknames.append(chunks[1])\n log_field.insert(tkinter.END, \"user %s connected\" % chunks[1] + \"\\n\", 'server_msg')\n elif chunks[0] == \"!quit\":\n nicknames.remove(chunks[1])\n log_field.insert(tkinter.END, \"user %s left chat\" % chunks[1] + \"\\n\", 'server_msg')\n else:\n log_field.insert(tkinter.END, data + \"\\n\")\n\n except:\n tk.after(1, update_log)\n return\n tk.after(1, update_log)\n return\n\n\ndef send_msg(event):\n content = input_field.get()\n if content == '' or content.split()[0] == \"!users\":\n log_field.insert(tkinter.END, ' '.join(nicknames) + \"\\n\", 'server_msg')\n input_field.delete(0, 'end')\n return\n full_msg = struct.pack(MSG_FORMAT, MSG) + content.encode(\"utf-8\")\n print(s)\n s.send(full_msg)\n log_field.insert(tkinter.END, name_field.get() + \":\" + content + \"\\n\")\n input_field.delete(0, 'end')\n\n\ndef exterminate():\n s.send(struct.pack(MSG_FORMAT, DEAD))\n sys.exit(0)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n print(\"usage : %s \")\n sys.exit(1)\n server_ip, server_port, nickname = sys.argv[1:]\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((server_ip, int(server_port)))\n msg = struct.pack(MSG_FORMAT, BORN) + nickname.encode(\"utf-8\")\n s.send(msg)\n connected = s.recv(1024).decode(\"utf-8\")\n nicknames.extend(connected.split())\n init_gui(nickname)\n input_field.bind('', send_msg)\n tk.after(1, update_log)\n tk.protocol(\"WM_DELETE_WINDOW\", exterminate)\n tk.mainloop()\n","repo_name":"artfly/Networks","sub_path":"im/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43009034245","text":"import pickle\nimport time\n\nimport sys\nsys.path.append('../src/')\nfrom transaction import Transaction\nfrom validator import Validator\n\nif __name__ == '__main__':\n Alice = Validator(port=1234)\n Bob = Validator(name=\"marshal-mbp.memphis.edu\", addr=\"10.101.7.184\",\n port=1234, bind=False)\n \n\n tx = pickle.dumps(Transaction(version=0.1, transaction_type='regular', tx_generator_address='0.0.0.0',\n inputs='', outputs='', lock_time=1234))\n try:\n while True:\n # Send the serialized object to Bob\n #Alice.message(Bob, tx)\n Alice.receive()\n Alice.message(Bob, tx)\n time.sleep(1)\n except KeyboardInterrupt:\n Alice.close()","repo_name":"noahcoomer/BlockchainPKI","sub_path":"tests/test_validator.py","file_name":"test_validator.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"71206390913","text":"\"\"\"\nHook for reading from the Launch Library API.\nhttps://launchlibrary.net/1.4\n\"\"\"\n\nfrom typing import Optional\n\nimport requests\nfrom airflow import AirflowException\nfrom airflow.hooks.base_hook import BaseHook\n\n\nclass LaunchLibraryHook(BaseHook):\n \"\"\"Airflow hook connecting to Launch Library API.\"\"\"\n\n def __init__(self, conn_id: Optional[str] = None, api_version: str = \"1.4\"):\n \"\"\"LaunchLibraryHook constructor.\"\"\"\n super().__init__(source=None)\n self._conn_id = conn_id\n self._api_version = api_version\n\n self._conn = None\n self._base_url = \"https://launchlibrary.net\"\n\n def get_conn(self):\n \"\"\"Initialise and cache session.\"\"\"\n if self._conn is None:\n session = requests.Session()\n self._conn = session\n if self._conn_id:\n try:\n conn = self.get_connection(self._conn_id)\n self._base_url = (\n f\"{conn.schema + '://' if conn.schema else ''}{conn.host}\"\n )\n except AirflowException:\n self.log.warning(\n f\"Connection '{self._conn_id}' not found, using defaults.\"\n )\n\n return self._conn\n\n def get(\n self, endpoint: str = \"launch\", params: Optional[dict] = None, **kwargs\n ) -> dict:\n \"\"\"\n Fetch a JSON response from the Launch Library API.\n :param str endpoint: Launch Library API endpoint.\n :param dict params: Optional parameters to pass with the URL.\n :return: JSON response.\n :rtype: dict\n \"\"\"\n session = self.get_conn()\n full_url = (\n f\"{self._base_url[:-1] if self._base_url.endswith('/') else self._base_url}\"\n f\"/{self._api_version}\"\n f\"/{endpoint}\"\n )\n response = session.get(url=full_url, params=params, **kwargs)\n\n if response.status_code not in (200, 404):\n # Launch Library returns 404 if no rocket launched in given interval.\n response.raise_for_status()\n\n return response.json()\n","repo_name":"BasPH/airflow-rocket","sub_path":"src/airflow_rocket/hooks/launchlibrary_hook.py","file_name":"launchlibrary_hook.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"}
+{"seq_id":"30552782113","text":"# Determine the sum of values in memory after applying various bitmasks to the values on entry\nimport re\n\n\ndef read_file(filename):\n values = []\n with open(filename, 'r') as f:\n for line in f:\n values.append(line.strip())\n\n return values\n\n\ndef test_data():\n return [\"mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\",\n \"mem[8] = 11\",\n \"mem[7] = 101\",\n \"mem[8] = 0\"]\n\n\nprint(\"Starting Day14-1\")\nvalues = read_file(\"input.txt\")\n# values = test_data()\n\n# For the first part, we are doing simple bitmask operations on values to put into memory. The memory is just going to\n# be a dictionary.\nmemory = dict()\nand_mask = 1\nor_mask = 0\nfor val in values:\n if \"mask\" in val:\n # We are updating the mask, so convert it into the appropriate and/or masks\n mask = val[7:]\n and_mask = int(mask.replace('X', '1'), 2)\n or_mask = int(mask.replace('X', '0'), 2)\n else:\n # We are saving a value into memory, so get the memory location and the value, apply the two masks, and save\n address, value = re.findall(\"[0-9]+\", val)\n memory[int(address)] = int(value) & and_mask | or_mask\n\nprint(memory)\nprint(\"The sum of the values in memory is: {0!s}\".format(sum(memory.values())))\n","repo_name":"theknoxinator/AoC","sub_path":"2020/Day14/day14-1.py","file_name":"day14-1.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4811655641","text":"# -*- coding: utf-8 -*-\nimport os\nimport time\nimport logging\n\nimport torch\n\n\ndef make_out_paths(args):\n timestamp = time.strftime('%Y_%m_%d_%H_%M_%S')\n data_ciph = args.datadir.replace('.', '_').replace('/', '-')\n if not args.test:\n if not hasattr(args, 'modeldir') or args.modeldir is None:\n args.modeldir = os.path.join('../saved_models/', args.lang + '-' + args.split + '-' + timestamp)\n if not os.path.exists(args.modeldir):\n os.makedirs(args.modeldir)\n args.logdir = os.path.join(args.modeldir, 'logs/')\n if not os.path.exists(args.logdir):\n os.makedirs(args.logdir)\n args.predout = args.modeldir\n else:\n args.testdir = os.path.join('../test_results/', args.lang + '-' + args.split + '-' + timestamp)\n if not os.path.exists(args.testdir):\n os.makedirs(args.testdir)\n args.logdir = os.path.join(args.testdir, 'logs/')\n if not os.path.exists(args.logdir):\n os.makedirs(args.logdir)\n args.predout = args.testdir\n\n\ndef get_logger(args):\n logger = logging.getLogger()\n fh = logging.FileHandler(os.path.join(args.logdir, 'main.log'))\n ch = logging.StreamHandler()\n if args.debug:\n logger.setLevel(logging.DEBUG)\n fh.setLevel(logging.DEBUG)\n ch.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.INFO)\n fh.setLevel(logging.INFO)\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger\n\n\ndef load_model_conf(model_dir, conf):\n with open(os.path.join(model_dir, 'conf.txt')) as f:\n exec(f.read())\n\n\ndef load_model(model_dir, model):\n model.load_state_dict(torch.load(os.path.join(model_dir, 'model')))\n\n\ndef save_model(model_dir, model):\n torch.save(model.state_dict(), os.path.join(model_dir, 'model'))\n","repo_name":"oncebasun/cmu-10618-project","sub_path":"code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41787166435","text":"from __future__ import division, absolute_import, unicode_literals\n\nimport re\nimport traceback\nimport json\n\nfrom product_ranking.spiders.asda import AsdaProductsSpider\nfrom scrapy.http import Request\n\n\nclass AsdaShelfPagesSpider(AsdaProductsSpider):\n name = 'asda_shelf_urls_products'\n allowed_domains = [\"asda.com\"]\n\n prods_per_page = 60\n\n CATEGORY_URL = \"https://groceries.asda.com/api/items/viewitemlist?catid={catid}&deptid={deptid}\" \\\n \"&aisleid={aisleid}&showfacets=1&pagesize={prods_per_page}&pagenum={pagenum}\" \\\n \"&contentids=New_IM_ShelfPage_FirstRow_1%2CNew_IM_ShelfPage_LastRow_1%2CNew_IM_SEO_ListingPage_Bottom_promo\" \\\n \"%2CNew_IM_Second_Navi_Shelf&storeid=4565&cacheable=true&shipDate=currentDate\" \\\n \"&sortby=relevance+desc&facets=shelf%3A0000%3A{catid}&requestorigin=gi\"\n\n CATEGORIES_URL = \"https://groceries.asda.com/api/categories/viewmenu?\" \\\n \"cacheable=true&storeid=4565&requestorigin=gi\"\n\n use_proxies = False\n\n HEADERS = {\n 'Accept-Language': 'en-US,en;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) ' \\\n 'AppleWebKit/537.36 (KHTML, like Gecko)' \\\n 'Chrome/55.0.2883.95 Safari/537.36',\n 'x-forwarded-for': '127.0.0.1'\n }\n\n def __init__(self, *args, **kwargs):\n kwargs.pop('quantity', None)\n self.current_page = 1\n self.num_pages = int(kwargs.pop('num_pages', 1))\n self.search_term = ''\n\n self.categories = []\n self.category_id = 0\n self.department_id = 0\n self.aisle_id = 0\n\n super(AsdaShelfPagesSpider, self).__init__(\n *args,\n **kwargs)\n\n def _setup_meta_compatibility(self):\n \"\"\" Needed to prepare first request.meta vars to use \"\"\"\n try:\n self.search_term = re.search('shelf/(.*)', self.product_url).group(1).split('/')[1]\n except Exception as e:\n self.log('Error while parsing search_term {}'.format(traceback.format_exc(e)))\n\n return {'remaining': self.quantity, 'search_term': self.search_term}.copy()\n\n def start_requests(self):\n yield Request(\n self.CATEGORIES_URL,\n headers=self.HEADERS,\n callback=self._start_requests\n )\n\n def _start_requests(self, response):\n try:\n data = json.loads(response.body_as_unicode())\n self.categories = data.get('categories')\n except Exception as e:\n self.log('Error while parsing categories {}'.format(traceback.format_exc(e)))\n\n category, dept, aisle, shelf = self._get_path()\n\n if dept and aisle:\n self.department_id = dept\n self.aisle_id = aisle\n self.category_id = shelf\n\n yield Request(\n self.CATEGORY_URL.format(\n pagenum=self.current_page,\n prods_per_page=self.prods_per_page,\n search_term=self.search_term,\n catid=self.category_id,\n deptid=self.department_id,\n aisleid=self.aisle_id\n ),\n meta=self._setup_meta_compatibility()\n )\n\n def _scrape_next_results_page_link(self, response):\n if self.current_page >= self.num_pages:\n return\n\n try:\n data = json.loads(response.body_as_unicode())\n max_page = int(data['maxPages'])\n if self.current_page >= max_page:\n return\n\n self.current_page += 1\n\n return Request(\n self.CATEGORY_URL.format(\n pagenum=self.current_page,\n prods_per_page=self.prods_per_page,\n search_term=self.search_term,\n catid=self.category_id,\n deptid=self.department_id,\n aisleid=self.aisle_id\n ),\n meta=self._setup_meta_compatibility(),\n headers=self.HEADERS,\n )\n except Exception as e:\n self.log('Page Count Error {}'.format(traceback.format_exc(e)))\n\n def _get_path(self):\n try:\n wrap = re.findall('(?<=/)\\d+', self.product_url)[0]\n\n for category in self.categories:\n depts = category.get('categories', [])\n for dept in depts:\n aisles = dept.get('categories', [])\n for aisle in aisles:\n shelves = aisle.get('categories', [])\n for shelf in shelves:\n\n if wrap == shelf.get('dimensionid'):\n return category.get('id'), dept.get('id'), aisle.get('id'), shelf.get('id')\n except Exception as e:\n self.log('Error while parsing categories {}'.format(traceback.format_exc(e)))\n\n return None, None, None, None\n","repo_name":"aprosdev/ecom-predictor","sub_path":"product-ranking/product_ranking/spiders/asda_shelf_pages.py","file_name":"asda_shelf_pages.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"35108307936","text":"import rltorch\n\nconfig = {}\nconfig['seed'] = 901\nconfig['zoom'] = 4\nconfig['environment_name'] = 'PongNoFrameskip-v4'\nconfig['learning_rate'] = 1e-4\nconfig['target_sync_tau'] = 1e-3\nconfig['discount_rate'] = 0.99\nconfig['exploration_rate'] = rltorch.scheduler.ExponentialScheduler(initial_value = 1, end_value = 0.02, iterations = 10**5)\nconfig['replay_skip'] = 4\nconfig['batch_size'] = 32 * (config['replay_skip'] + 1)\nconfig['num_sneaky_episodes'] = 10 # per loop\nconfig['disable_cuda'] = False\n\nconfig['seconds_play_per_state'] = 120\nconfig['seconds_play_per_state'] = 5\n# 30 transitions per second for 120 seconds = 3600 transitions per turn\nconfig['memory_size'] = 86400\nconfig['dqfd_demo_loss_weight'] = 0.01\nconfig['dqfd_td_loss_weight'] = 1.\nconfig['demo_prio_bonus'] = 0.\nconfig['observed_prio_bonus'] = 0.\n\n# Prioritized vs Random Sampling\n# 0 - Random sampling\n# 1 - Only the highest prioirities\nconfig['prioritized_replay_sampling_priority'] = 0.6\nconfig['prioritized_replay_sampling_priority'] = 0.\n# How important are the weights for the loss?\n# 0 - Treat all losses equally\n# 1 - Lower the importance of high losses\n# Should ideally start from 0 and move your way to 1 to prevent overfitting\nconfig['prioritized_replay_weight_importance'] = rltorch.scheduler.ExponentialScheduler(initial_value = 0.4, end_value = 1, iterations = 10**5)\nconfig['prioritized_replay_weight_importance'] = 0.\n","repo_name":"Brandon-Rozek/GymInteract","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70361748675","text":"import torch\n\nfrom torch.nn import Module, Parameter\nfrom torch.nn.functional import mse_loss\nimport matplotlib.pyplot as plt\n\ndef wake_function(z, z0, k=2 * torch.pi):\n return -torch.cos(k * (z - z0)) * (torch.tanh((z - z0) / 0.01) + 1) / 2\n\n\nclass Wakefield(Module):\n def __init__(self):\n super(Wakefield, self).__init__()\n dist = torch.distributions.Normal(0.0, 1.0)\n self.register_parameter(\"particle_z\", Parameter(dist.sample([100, 1])))\n\n def calculate_wakefield(self, z):\n wake = wake_function(z, self.particle_z)\n total_wake = torch.sum(wake, dim=0)\n\n return total_wake\n\ndef target_wake(z):\n return (torch.tanh(z / 0.01) + 1) / 2 *-250\n\nif __name__ == '__main__':\n model = Wakefield()\n\n z = torch.linspace(-2.0, 2.0, 200)\n z_test = z[:125]\n\n # target wakefield\n target = target_wake(z_test)\n\n optimizer = torch.optim.Adam(\n model.parameters(), lr=0.00001\n ) # Includes GaussianLikelihood parameters\n\n if 1:\n for i in range(10000):\n # Zero gradients from previous iteration\n optimizer.zero_grad()\n # Output from model\n output = model.calculate_wakefield(z_test)\n # Calc loss and backprop gradients\n loss = mse_loss(output, target)\n loss.backward()\n if not i % 1000:\n print(loss)\n optimizer.step()\n\n # calc entire wakefield\n total_wake = model.calculate_wakefield(z)\n\n fig, ax = plt.subplots()\n ax.plot(z, total_wake.detach())\n ax.plot(z_test, target)\n axb = ax.twinx()\n axb.hist(model.particle_z.detach().numpy(), alpha=0.25)\n\n plt.show()","repo_name":"austin-hoover/phase_space_reconstruction","sub_path":"dev/wakefield/reconstruct_wakefield.py","file_name":"reconstruct_wakefield.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32945870807","text":"from actionkit import rest\nfrom actionkit.models import *\nimport datetime\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.shortcuts import redirect\nfrom django.template.defaultfilters import date\nfrom djangohelpers import rendered_with, allow_http\nimport json\nfrom actionkit_userdetail.models import CalloutUserfield\n\ndef _mailing_history(request, agent):\n _sends = mailings_by_user(agent)\n\n sends = {}\n for send in _sends:\n id = send['id']\n if id not in sends:\n sends[id] = {\n 'id': send['id'],\n 'mailed_at': send['mailed_at'],\n 'subject_text': send['subject_text'],\n 'clicks': set(),\n 'opens': set(),\n }\n sends[id]['clicks'] = set(sends[id]['clicks'])\n sends[id]['opens'] = set(sends[id]['opens'])\n if send['clicked_at'] is not None:\n sends[id]['clicks'].add(send['clicked_at'])\n if send['opened_at'] is not None:\n sends[id]['opens'].add(send['opened_at'])\n sends[id]['clicks'] = list(sends[id]['clicks'])\n sends[id]['opens'] = list(sends[id]['opens'])\n \n return sends\n\n@allow_http(\"GET\")\ndef user_fields(request, user_id):\n try:\n agent = CoreUser.objects.using(\"ak\").get(id=user_id)\n except CoreUser.DoesNotExist:\n return HttpResponseNotFound(\"No such record exists\")\n\n fields = agent.fields.all()\n fields = [{'name': field.name, 'value': field.value} for field in fields]\n return HttpResponse(json.dumps(fields), content_type=\"application/json\")\n\n@allow_http(\"GET\")\ndef mailing_history(request, user_id):\n try:\n agent = CoreUser.objects.using(\"ak\").get(id=user_id)\n except CoreUser.DoesNotExist:\n return HttpResponseNotFound(\"No such record exists\")\n\n sends = _mailing_history(request, agent)\n\n def dthandler(obj):\n if isinstance(obj, datetime.datetime):\n return date(obj)\n return HttpResponse(json.dumps(sends, default=dthandler),\n content_type=\"application/json\")\n\n@allow_http(\"GET\")\ndef jump_to_member(request):\n member = request.GET.get(\"member\")\n if member.isdigit():\n return redirect(\"userdetail_detail\", member)\n elif '@' in member:\n try:\n member = CoreUser.objects.using(\"ak\").get(email=member)\n except CoreUser.DoesNotExist:\n return HttpResponseNotFound(\"No member exists with email %s\" % member)\n return redirect(\"userdetail_detail\", member.id)\n else:\n return HttpResponse(\n \"I could not recognize this as an ID or an email address: '%s'\" % member, \n status=400)\n\n@allow_http(\"GET\")\n@rendered_with(\"actionkit_userdetail/view_user_detail.html\")\ndef view_user_detail(request, user_id):\n ctx = _detail(request, user_id)\n ctx['member_id'] = user_id\n return ctx\n\n@allow_http(\"GET\")\ndef detail_json(request, user_id):\n ctx = _detail(request, user_id)\n def dthandler(obj):\n if isinstance(obj, datetime.datetime):\n return date(obj)\n elif hasattr(obj, 'to_json'):\n return obj.to_json()\n try:\n ctx['latest_action'] = ctx['actions'][0]\n except IndexError:\n ctx['latest_action'] = None\n try:\n ctx['latest_order'] = ctx['orders'][0]\n except IndexError:\n ctx['latest_order'] = None\n try:\n ctx['latest_open'] = ctx['opens'][0]\n except IndexError:\n ctx['latest_open'] = None\n try:\n ctx['latest_click'] = ctx['clicks'][0]\n except IndexError:\n ctx['latest_click'] = None\n\n agent = ctx['agent']\n ctx['sends'] = _mailing_history(request, agent).values()\n ctx['sends'] = sorted(ctx['sends'], key=itemgetter(\"mailed_at\"), reverse=True)\n try:\n ctx['latest_send'] = ctx['sends'][0]\n except IndexError:\n ctx['latest_send'] = None \n\n return HttpResponse(json.dumps(ctx, cls=JSONEncoder, default=dthandler),\n content_type=\"application/json\")\n \n\ndef fetch_contact_details(email):\n #url = ('https://api.fullcontact.com/v2/person.json?email=%s&apiKey=%s'\n # % (email, settings.FULLCONTACT_API))\n #try:\n # response = urllib2.urlopen(url)\n # result = response.read()\n # jsondata = json.loads(result)\n #except urllib2.HTTPError:\n # jsondata = dict(status=500,\n # message='Error retrieving supplemental data')\n #return jsondata\n\n return dict(status=500,\n message='Error retrieving supplemental data')\n\n@allow_http(\"GET\")\ndef supplemental_details_json(request, user_id):\n try:\n agent = CoreUser.objects.using(\"ak\").get(id=user_id)\n except CoreUser.DoesNotExist:\n raise Http404(\"No user: %s\" % user_id)\n email = agent.email\n contact_details = fetch_contact_details(email)\n return HttpResponse(json.dumps(contact_details),\n content_type=\"application/json\")\n\nfrom collections import namedtuple\n_AgentTag = namedtuple(\"AgentTag\", \"name ak_tag_id editable allowed_tag_id\")\nclass AgentTag(_AgentTag):\n def __repr__(self):\n return self.name\n\n@allow_http(\"GET\")\n@rendered_with(\"actionkit_userdetail/view_order_detail.html\")\ndef order_detail(request, user_id, order_id):\n try:\n order = CoreOrder.objects.using(\"ak\").select_related(\"user\").get(\n id=order_id, user_id=user_id)\n except CoreOrder.DoesNotExist:\n raise Http404(\"No order %s for user %\" % (order_id, user_id))\n \n recurrences = list(order.recurrences.all())\n transactions = list(order.transactions.all())\n\n if order.import_id:\n type = \"Standalone Order (imported)\"\n elif recurrences:\n type = \"Recurring Order\"\n elif transactions:\n type = \"Standalone Order\"\n\n return locals()\n\ndef _detail(request, user_id):\n callout_userfields = CalloutUserfield.objects.all()\n extra_select = {'phone_number': (\n \"SELECT `phone` FROM `core_phone` \"\n \"WHERE `core_phone`.`user_id`=`core_user`.`id` \"\n \"LIMIT 1\")}\n for field in callout_userfields:\n extra_select[field.name] = (\n \"SELECT `value` FROM `core_userfield` \"\n \"WHERE `core_userfield`.`parent_id`=`core_user`.`id` \"\n 'AND `core_userfield`.`name`=\"%s\" LIMIT 1' % field.name)\n\n try:\n agent = CoreUser.objects.using(\"ak\").extra(select=extra_select,\n ).get(id=user_id)\n except CoreUser.DoesNotExist:\n return HttpResponseNotFound(\"No such record exists\")\n\n actions = list(agent.action.all().select_related(\"page\").order_by(\"-created_at\"))\n orders = list(\n agent.orders.all().select_related(\"action\", \"action__page\").order_by(\n \"-created_at\"))\n transactions = list(\n CoreTransaction.objects.using(\"ak\").select_related(\"order\").filter(\n order__user=agent).order_by(\"-created_at\"))\n\n total_donations = sum(order.total for order in orders \n if order.import_id is not None \n and order.status == \"completed\"\n and order.id not in [t.order_id for t in transactions]) + \\\n sum(transaction.amount for transaction in transactions\n if transaction.status == \"completed\"\n and transaction.order.status == \"completed\")\n\n recurring_donations = list(CoreOrderRecurring.objects.using(\n \"ak\").filter(user=agent).select_related(\"order\").order_by(\"-start\"))\n\n now = datetime.date.today()\n upcoming_recurring_donations = [\n recurrence for recurrence in recurring_donations\n if recurrence.status == \"active\"\n and recurrence.start > now\n ]\n\n clicks = clicks_by_user(agent)\n opens = opens_by_user(agent)\n sends = CoreUserMailing.objects.using(\"ak\").filter(user=agent).order_by(\n \"-created_at\").select_related(\"subject\")\n\n _agent_tags = CoreTag.objects.using(\"ak\").filter(\n pagetags__page__coreaction__user=agent).values(\"name\", \"id\", \"pagetags__page_id\")\n\n agent_tags = []\n \n for tag in _agent_tags:\n editable = False\n allowed_tag_id = None\n agent_tags.append(AgentTag(tag['name'], tag['id'], editable, allowed_tag_id))\n\n # The list of already-used tags may contain duplicates.\n # We need to filter out duplicates, and if there is an \"editable\" copy of the tag\n # as well as an \"uneditable\" copy, we need to discard the editable one.\n _agent_tags = {}\n for tag in agent_tags:\n _agent_tags.setdefault(tag.name, [])\n if tag.editable:\n _agent_tags[tag.name].append(tag)\n else:\n _agent_tags[tag.name].insert(0, tag)\n agent_tags = (copies[0] for copies in _agent_tags.values())\n\n # We also need to filter out the \"special tag-page marker tag\" \n # from the list -- unless it too is editable!\n #agent_tags = [tag for tag in agent_tags\n # if (tag.ak_tag_id != settings.AKTIVATOR_TAG_PAGE_TAG_ID\n # or tag.editable)]\n\n # Then, we need to filter out already-used tags from the list of addable tags.\n #_agent_tags = [tag.name for tag in agent_tags]\n #allowed_tags = [tag for tag in _allowed_tags if tag.tag_name not in _agent_tags]\n\n return locals()\n","repo_name":"350dotorg/aktivator","sub_path":"actionkit_userdetail/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"24576933791","text":"from typing import Iterator, Dict, Any, Optional\nimport psycopg2, psycopg2.extras\nimport time, datetime\nimport re\nfrom functools import wraps\nimport csv\nfrom memory_profiler import memory_usage\n\n\n#------------------------ Connect\n\nconnection = psycopg2.connect(\n host=\"localhost\",\n port=5432,\n database=\"poojakale\",\n user=\"admin\",\n password=\"admin\",\n)\nconnection.set_session(autocommit=True)\n\n#------------------------ Read\n\ndef iter_company_from_file(path: str) -> Iterator[Dict[str, Any]]:\n skip = True \n idx = 0\n with open(path, 'r') as f:\n csv_reader = csv.reader(f)\n for line in csv_reader:\n if skip: # skip the headers\n skip = False \n continue\n company = {}\n company['id'] = idx\n company['company_name'] = line[0]\n company['company_li_names'] = line[1]\n company['description'] = line[2]\n company['headcount'] = line[3]\n company['founding_date'] = line[4]\n company['most_recent_raise'] = line[5]\n company['most_recent_valuation'] = line[6]\n company['investors'] = line[7]\n company['known_total_funding'] = line[8]\n idx +=1\n yield company\n \ndef iter_company_li_from_file(path: str) -> Iterator[Dict[str, Any]]:\n skip = True \n idx = 0\n with open(path, 'r') as f:\n csv_reader = csv.reader(f)\n for line in csv_reader:\n if skip: # skip the headers\n skip = False \n continue\n company_li_names = line[1].replace('\\n','').replace('\"','').replace(' ','').replace('[','').replace(']','').split(',')\n if company_li_names == ['']: company_li_names = []\n for li_name in company_li_names: \n company = {}\n company['id'] = idx\n company['company_name'] = line[0]\n company['company_li_name'] = li_name.strip()\n print(company)\n idx +=1\n yield company\n\n#------------------------ Analyze Load Metrics\n\ndef profile(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n print('Load Metrics: \\n')\n\n # Measure time\n t = time.perf_counter()\n retval = fn(*args, **kwargs)\n elapsed = time.perf_counter() - t\n print(f'Time {elapsed:0.4}')\n\n # Measure memory\n mem, retval = memory_usage((fn, args, kwargs), retval=True, timeout=200, interval=1e-7)\n\n print(f'Memory {max(mem) - min(mem)}')\n return retval\n\n return inner\n \n#------------------------ Load\n\ndef create_companies_staging_table(cursor):\n cursor.execute(\"\"\"\n DROP TABLE IF EXISTS companies;\n CREATE TABLE companies (\n id NUMERIC,\n company_name TEXT,\n company_li_names TEXT,\n description TEXT,\n headcount NUMERIC,\n founding_date DATE,\n most_recent_raise NUMERIC,\n most_recent_valuation NUMERIC,\n investors TEXT,\n known_total_funding NUMERIC\n );\n \"\"\")\n\ndef create_company_li_names_table(cursor):\n cursor.execute(\"\"\"\n DROP TABLE IF EXISTS company_li_names;\n CREATE TABLE company_li_names (\n id NUMERIC,\n company_li_name TEXT,\n company_name TEXT\n );\n \"\"\")\n\ndef parse_date(text: str) -> datetime.date:\n \n if text == '': \n return None # replace empty string with None\n parts = text.split('-')\n if len(parts) == 3:\n return datetime.date(int(parts[0]), int(parts[1]), int(parts[2]))\n else:\n print(parts)\n assert False, 'Unknown date format'\n\ndef parse_numeric_value(value: str) -> int:\n # set currently works at company when person does not provide end date\n if str.isalpha(value) or value == '': \n return None\n else:\n return int(value)\n\ndef parse_lists(text: str) -> str: \n\n if text == '[]' or text == '': \n return None \n else: \n return text.lower()\n\n@profile\ndef clean_db_and_insert_execute_batch_iterator(connection, companies: Iterator[Dict[str, Any]], page_size: int = 1) -> None:\n with connection.cursor() as cursor:\n create_companies_staging_table(cursor)\n \n iter_companies = ({\n **company,\n 'company_name':company['company_name'].lower(),\n 'company_li_names': parse_lists(company['company_li_names']), # psycopg converts str to list\n 'founding_date': parse_date(company['founding_date']),\n 'headcount':parse_numeric_value(company['headcount']),\n 'most_recent_raise':parse_numeric_value(company['most_recent_raise']),\n 'most_recent_valuation':parse_numeric_value(company['most_recent_valuation']),\n 'investors': parse_lists(company['investors']), \n 'known_total_funding':parse_numeric_value(company['known_total_funding']),\n } for company in companies)\n\n psycopg2.extras.execute_batch(cursor, \"\"\"\n INSERT INTO companies VALUES (\n %(id)s,\n %(company_name)s,\n %(company_li_names)s,\n %(description)s,\n %(headcount)s,\n %(founding_date)s,\n %(most_recent_raise)s,\n %(most_recent_valuation)s,\n %(investors)s,\n %(known_total_funding)s\n );\n \"\"\", iter_companies, page_size=page_size)\n\n@profile\ndef insert_execute_batch_iterator(connection, companies: Iterator[Dict[str, Any]], page_size: int = 1) -> None:\n with connection.cursor() as cursor: \n iter_companies = ({\n **company,\n 'company_name':company['company_name'].lower(),\n 'company_li_names': parse_lists(company['company_li_names']), # psycopg converts str to list\n 'founding_date': parse_date(company['founding_date']),\n 'headcount':parse_numeric_value(company['headcount']),\n 'most_recent_raise':parse_numeric_value(company['most_recent_raise']),\n 'most_recent_valuation':parse_numeric_value(company['most_recent_valuation']),\n 'investors': parse_lists(company['investors']), \n 'known_total_funding':parse_numeric_value(company['known_total_funding']),\n } for company in companies)\n\n psycopg2.extras.execute_batch(cursor, \"\"\"\n INSERT INTO companies VALUES (\n %(id)s,\n %(company_name)s,\n %(company_li_names)s,\n %(description)s,\n %(headcount)s,\n %(founding_date)s,\n %(most_recent_raise)s,\n %(most_recent_valuation)s,\n %(investors)s,\n %(known_total_funding)s\n );\n \"\"\", iter_companies, page_size=page_size)\n\ndef wipe_and_insert_execute(connection, company_li_names: Iterator[Dict[str,any]]): \n with connection.cursor() as cursor: \n create_company_li_names_table(cursor)\n\n psycopg2.extras.execute_values(cursor, \"\"\" INSERT INTO company_li_names VALUES %s; \"\"\",\n (\n (\n company['id'],\n company['company_li_name'],\n company['company_name'].strip().lower(),\n ) for company in company_li_names)\n )\n\n\npath = '/Users/poojakale/Documents/interviews/companies.csv'\ncompanies = list(iter_company_from_file(path))\nclean_db_and_insert_execute_batch_iterator(connection, companies, page_size=100)\n\ncompany_li_names = list(iter_company_li_from_file(path))\nwipe_and_insert_execute(connection,company_li_names)","repo_name":"pkale/employee_data_backend_engine","sub_path":"scripts/load_company_data_to_db.py","file_name":"load_company_data_to_db.py","file_ext":"py","file_size_in_byte":7929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10410815319","text":"from torch.utils.data.dataset import random_split\nfrom neuroIN.io.dataset import Dataset\n\nimport os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import random_split\n\nfrom ray import tune\nfrom ray.tune.schedulers.hb_bohb import HyperBandForBOHB\nfrom ray.tune.suggest.bohb import TuneBOHB\n\n\ndef train(model, optimizer, train_loader):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.train()\n criterion = nn.CrossEntropyLoss()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n\n\ndef test(model, data_loader):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.eval()\n correct = 0\n total = 0\n loss = 0\n criterion = nn.CrossEntropyLoss()\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(data_loader):\n data, target = data.to(device), target.to(device)\n outputs = model(data)\n _, predicted = torch.max(outputs.data, 1)\n total += target.size(0)\n correct += (predicted == target).sum().item()\n loss += criterion(outputs, target).item()\n\n return loss / len(data_loader), correct / total\n\ndef test_best_model(best_trial):\n dataset = Dataset(best_trial.config[\"data_dir\"])\n test_loader = dataset.test.get_dataloader(best_trial.config[\"batch_size\"])\n\n best_model = best_trial.config[\"model\"](n_classes=dataset.n_classes, shape=dataset.train[0][0].shape, **best_trial.config)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n best_model.to(device)\n\n checkpoint_path = os.path.join(best_trial.checkpoint.value, \"checkpoint\")\n model_state, optimizer_state = torch.load(checkpoint_path)\n best_model.load_state_dict(model_state)\n\n mean_loss, mean_acc = test(best_model, test_loader)\n print(f\"Test set has {mean_acc}% accuracy and mean loss of {mean_loss}\")\n\n\ndef train_dataset(config, checkpoint_dir=None):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n dataset = Dataset(config[\"data_dir\"])\n\n model = config[\"model\"](n_classes=dataset.n_classes, shape=dataset.train[0][0].shape, **config)\n model.to(device)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=config[\"lr\"], momentum=config[\"momentum\"])\n\n if checkpoint_dir:\n config[\"checkpoint_dir\"].mkdir(parents=True, exist_ok=True)\n checkpoint = os.path.join(checkpoint_dir, \"checkpoint\")\n model_state, optimizer_state = torch.load(checkpoint)\n model.load_state_dict(model_state)\n optimizer.load_state_dict(optimizer_state)\n \n trainset = dataset.train\n\n test_abs = int(len(trainset) * .8)\n train_subset, val_subset = random_split(trainset, [test_abs, len(trainset) - test_abs])\n \n train_loader = torch.utils.data.DataLoader(\n train_subset,\n batch_size=int(config[\"batch_size\"]),\n shuffle=True)\n val_loader = torch.utils.data.DataLoader(\n val_subset,\n batch_size=int(config[\"batch_size\"]),\n shuffle=True)\n\n for epoch in range(36):\n train(model, optimizer, train_loader)\n mean_loss, acc = test(model, val_loader)\n\n tune.report(mean_accuracy=acc, mean_loss=mean_loss)\n\n with tune.checkpoint_dir(step=epoch) as checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n torch.save(\n (model.state_dict(), optimizer.state_dict()), path)\n print(\"Finished Training\")\n \n\n\ndef run_optim(config, max_concurrent=4, num_samples=40):\n \"\"\"Run ray tune optimization\n\n :param config: the configuration optimization dictionary to use\n :type config: dict\n :param max_concurrent: the max number of concurrent modesl to train, defaults to 4\n :type max_concurrent: int, optional\n :param num_samples: the number of sample models to test, defaults to 40\n :type num_samples: int, optional\n \"\"\"\n if not isinstance(config, dict):\n config = torch.load(config)\n\n assert isinstance(config, dict), \"'config' must be a dictionary\"\n\n algo = TuneBOHB(max_concurrent=max_concurrent, metric=\"mean_loss\", mode=\"min\")\n bohb = HyperBandForBOHB(\n time_attr=\"training_iteration\",\n metric=\"mean_loss\",\n mode=\"min\",\n max_t=100)\n\n analysis = tune.run(train_dataset,\n num_samples=num_samples,\n config=config,\n scheduler=bohb,\n search_alg=algo)\n \n best_trial = analysis.get_best_trial('mean_loss', \"min\", \"last\")\n\n print(f\"{best_trial.config}, {best_trial.last_result['mean_loss']}, {best_trial.last_result['mean_accuracy']}\")\n test_best_model(best_trial)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Optimize a Dataset\")\n parser.add_argument('data_dir', help=\"Directory of Dataset\")\n parser.add_argument('-n', '--num_samples', nargs='?', default=40, type=int)\n parser.add_argument('-m', '--model_optim_idx', nargs='?', default=-1, type=int)\n args = parser.parse_args()\n\n print(f\"Optimizing Dataset located at: {args.data_dir}\")\n print(f\"Will use {args.num_samples} samples and model optim #{args.model_optim_idx}\")\n\n dataset = Dataset(args.data_dir)\n\n config = dataset.get_optim(args.model_optim_idx)\n run_optim(config, num_samples=args.num_samples)","repo_name":"markt/neuroIN","sub_path":"src/neuroIN/training/optim.py","file_name":"optim.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"18894903217","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Alien(Sprite):\n\n def __init__(self, game) -> None:\n super().__init__()\n\n self.screen = game.screen\n self.settings = game.settings\n self.screen_rect = self.screen.get_rect()\n\n self.image = pygame.image.load(\"images/alien.bmp\").convert()\n self.image.set_colorkey((230, 230, 230))\n self.rect = self.image.get_rect()\n # self.rect.topleft = self.screen_rect.topleft\n self.rect.x = 0\n self.rect.y = self.rect.height\n\n self.x = float(self.rect.x)\n self.y = float(self.rect.y)\n \n def update(self):\n '''move to the right'''\n self.x += self.settings.alien_speed * self.settings.fleet_direction\n self.rect.x = self.x\n\n def check_edges(self):\n '''if an alien is next to the border then return true'''\n if self.rect.right > self.screen_rect.right or self.rect.left < 0:\n return True\n\n # def blitme(self):\n # self.screen.blit(self.image,self.rect)\n","repo_name":"PingOnTheWay/alien_invasion","sub_path":"alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16795831596","text":"# -*- coding: utf-8 -*-\n\nfrom normalization import normalization\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\ndef syntatic_coeficient(text1, text2):\n \"\"\"\n Calculates the syntatic similarity of two terms using cosine similarity.\n\n Parameters\n ----------\n text1 : string\n The first term we want to compare.\n\n text2 : string\n The second term which we want to compare with the first one.\n\n Returns\n -------\n int\n The value, between 0 and 1, that represents the syntatic similarity between the two terms.\n\n \"\"\"\n if text1 != \"\" and text2 != \"\" and text1 != None and text2 != None:\n t1 = normalization.parse_text_to_compare(text1)\n t2 = normalization.parse_text_to_compare(text2)\n if t1 != \"\" and t2 != \"\":\n text = [t1, t2]\n try:\n vectorizer = CountVectorizer().fit_transform(text)\n vectors = vectorizer.toarray()\n csims = cosine_similarity(vectors)\n csim = csims[0][1]\n return csim\n except:\n return 0\n return 0\n","repo_name":"joanapereira115/cmdb-auto-creation","sub_path":"src/syntatic_matching/syntatic_matching.py","file_name":"syntatic_matching.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"26803349958","text":"import pytest\n\nfrom tinydisplay.render.collection import canvas, sequence\nfrom tinydisplay.render.widget import text\nfrom tinydisplay.utility import dataset, image2Text\n\n\n@pytest.fixture\ndef makeSetup():\n db = {\"artist\": \"Sting\", \"title\": \"Desert Rose\"}\n system = {\"state\": \"play\", \"temp\": 40}\n ds = dataset({\"db\": db, \"sys\": system})\n artist = text(value=\"f\\\"Artist {db['artist']}\\\"\", dataset=ds)\n title = text(value=\"f\\\"Title {db['title']}\\\"\", dataset=ds)\n alert = text(value=\"'ALERT -- HOT'\")\n time = text(\"'12:32p'\")\n cArt = canvas(\n size=(80, 16),\n duration=2,\n activeWhen=\"sys['state'] == 'play'\",\n dataset=ds,\n name=\"Artist\",\n )\n cArt.append(artist)\n cTitle = canvas(\n size=(80, 16),\n duration=2,\n activeWhen=\"sys['state'] == 'play'\",\n dataset=ds,\n name=\"Title\",\n )\n cTitle.append(title)\n cAlert = canvas(\n size=(80, 16),\n duration=5,\n minDuration=2,\n activeWhen=\"sys['temp'] >= 100\",\n dataset=ds,\n name=\"Alert\",\n )\n cAlert.append(alert)\n cTime = canvas(\n size=(80, 16),\n duration=10,\n activeWhen=\"sys['state'] == 'stop'\",\n dataset=ds,\n name=\"Time\",\n )\n cTime.append(time, placement=\"mm\")\n\n seq = sequence(dataset=ds)\n seq.append(cArt)\n seq.append(cTitle)\n seq.append(cAlert)\n seq.append(cTime)\n\n return (ds, seq)\n\n\ndef test_sequence_timing(makeSetup):\n\n ds, seq = makeSetup\n\n sImg = seq.render(force=True)[0]\n sSame = seq.render()[0]\n sNew = seq.render()[0]\n seq.render()[0]\n sOld = seq.render()[0]\n\n assert (\n sImg == sSame\n ), f\"Images should have been identical but instead were\\n{image2Text(sImg)}\\nand\\n{image2Text(sSame)}\"\n\n assert (\n sImg != sNew\n ), f\"Images should have been different but instead were\\n{image2Text(sImg)}\\nand\\n{image2Text(sNew)}\"\n\n assert (\n sImg == sOld\n ), f\"Images should have been back to identical but instead were\\n{image2Text(sImg)}\\nand\\n{image2Text(sOld)}\"\n\n\ndef test_sequence_conditions(makeSetup):\n ds, seq = makeSetup\n\n # Should be Artist Play canvas\n sOrig = seq.render(force=True)[0]\n\n # Skip to end of sequence (assuming state stays the same)\n for i in range(3):\n seq.render()\n\n sTst = seq.render()[0]\n assert (\n sOrig == sTst\n ), f\"Images should have been identical but instead were\\n{image2Text(sOrig)}\\nand\\n{image2Text(sTst)}\"\n\n ds.update(\"sys\", {\"state\": \"stop\"})\n sTst = seq.render()[0]\n assert (\n sOrig != sTst\n ), f\"Images should have been different but instead were\\n{image2Text(sOrig)}\\nand\\n{image2Text(sTst)}\"\n\n\ndef test_min_duration(makeSetup):\n ds, seq = makeSetup\n\n ds.update(\"sys\", {\"temp\": 100, \"state\": \"stop\"})\n\n sOrig = seq.render(force=True)[0]\n ds.update(\"sys\", {\"temp\": 40})\n sTst = seq.render()[0]\n sTst2 = seq.render()[0]\n\n # sTst should == sOrig (because of the minDuration of 2 for cAlert)\n assert (\n sOrig == sTst\n ), f\"Images should have been identical but instead were\\n{image2Text(sOrig)}\\nand\\n{image2Text(sTst)}\"\n\n assert (\n sOrig != sTst2\n ), f\"Images should have been different but instead were\\n{image2Text(sOrig)}\\nand\\n{image2Text(sTst2)}\"\n","repo_name":"dhrone/tinyDisplay","sub_path":"tests/test_sequence.py","file_name":"test_sequence.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"11569586684","text":"import sys\nimport os\nfrom time import sleep\nfrom random import randrange\nfrom sys import exit\nfrom subprocess import Popen, PIPE, call\nimport subprocess\nfrom traceback import format_exc\n\nfrom mininet.log import setLogLevel, info\nfrom minindn.minindn import Minindn\nfrom minindn.util import MiniNDNCLI\nfrom minindn.apps.app_manager import AppManager\nfrom minindn.apps.nfd import Nfd\nfrom minindn.helpers.nfdc import Nfdc\nfrom minindn.apps.nlsr import Nlsr\nfrom minindn.helpers.ndn_routing_helper import NdnRoutingHelper\nfrom minindn.wifi.minindnwifi import MinindnWifi\nfrom minindn.util import MiniNDNWifiCLI, getPopen\n\ndef registerRouteToAllNeighbors(ndn, host, prefix):\n for node in ndn.net.hosts:\n for neighbor in node.connectionsTo(host):\n ip = node.IP(neighbor[0])\n Nfdc.createFace(host, ip)\n Nfdc.registerRoute(host, prefix, ip)\n\ndef setTsharkLog(ndn):\n for host in ndn.net.hosts:\n print (\"Setting tshark for host:\", host.name)\n host.cmd('tshark -o ip.defragment:TRUE -o ip.check_checksum:FALSE -ni any -f \"udp port 6363\" -w {}.pcap &> /dev/null &'.format(host.name))\n # host.cmd('ndndump -i any &> {}.ndndump &'.format(host.name))\n sleep(0.1)\n\ndef setMTUsize(self, ndn, mtu=9000):\n for host in self.ndn.net.hosts:\n for intf in host.intfList():\n host.cmd(\"ifconfig {} mtu {}\".format(intf, mtu))\n\n# type = C or P for consumer and producer respectively\n# count = how many?\ndef generateNodes(type, count, serviceType = 'printer', publicationInterval=100):\n nodes = dict()\n if type == 'C':\n for c in range(0, count):\n name = 'c{}'.format(c+1)\n nodes[name] = serviceType\n elif type == 'P':\n for c in range(0, count):\n name = 'p{}'.format(c+1)\n nodes[name] = [serviceType, publicationInterval]\n return nodes\n\nclass NDNSDExperiment():\n '''\n This is a base class for all ndnsd experiments (both wireless and wired)\n ndn: object, either mini-ndn object, can be minindn or minindnwifi object\n producers: list, service publishers\n consumers: list, service finder\n expType: string, wired or wireless\n '''\n def __init__(self, ndn, producers, consumers, expType=\"wired\", nlsr=True):\n self.ndn = ndn\n self.args = ndn.args\n self.expType = expType\n self.producers = producers\n self.consumers = consumers\n if expType == 'wifi':\n self.hosts = ndn.net.stations\n else:\n self.hosts = ndn.net.hosts\n self.producerNodes = [host for host in self.hosts if host.name in self.producers]\n self.consumerNodes = [host for host in self.hosts if host.name in self.consumers]\n self.start(nlsr)\n\n def start(self, nlsr):\n self.ndn.start()\n sleep(5)\n AppManager(self.ndn, self.hosts, Nfd, logLevel='DEBUG')\n if nlsr:\n AppManager(self.ndn, self.ndn.net.hosts, Nlsr, security=self.ndn.args.security, logLevel='INFO')\n sleep(180)\n Popen(['cp', 'test.info', '/usr/local/etc/ndn/ndnsd_default.info'], stdout=PIPE, stderr=PIPE).communicate()\n\n def startProducer(self):\n print(\"Starting producers\")\n hostInfo = dict([])\n for producer in self.producerNodes: # if host.name not in consumer:\n hostName = producer.name\n hostInfoFile = '{}/{}/ndnsd_{}.info'.format(self.args.workDir, hostName, hostName)\n appPrefix = '/ndnsd/{}/service-info'.format(hostName)\n\n Popen(['cp', '/usr/local/etc/ndn/ndnsd_default.info', hostInfoFile], stdout=PIPE, stderr=PIPE).communicate()\n producer.cmd('infoedit -f {} -s required.appPrefix -v {}'.format(hostInfoFile, appPrefix))\n\n # uncomment to enable sync log\n cmd = 'export NDN_LOG=ndnsd.*=TRACE:psync.*=TRACE:sync.*=TRACE'\n producer.cmd(cmd)\n cmd = 'ndnsd-producer {} 1 &> {}/{}/producer.log &'.format(hostInfoFile, self.args.workDir, hostName)\n try:\n producer.cmd(cmd)\n except Exception as e:\n print (\"couldn't start producer\", e)\n exit(0)\n sleep(2)\n\n def startConsumer(self):\n print(\"Staring consumers\")\n for consumer in self.consumerNodes:\n cName = consumer.name\n cmd = 'export NDN_LOG=ndnsd.*=TRACE:psync.*=TRACE:sync.*=TRACE'\n consumer.cmd(cmd)\n cmd = 'ndnsd-consumer -s {} &> {}/{}/consumer.log -c 1 -p 1 &'.format(self.consumers[cName], self.args.workDir, cName)\n try:\n consumer.cmd(cmd)\n except Exception as e:\n print (\"couldn't start producer\", e)\n exit(0)\n # sleep -- let consumer boot up properly\n sleep(2)\n","repo_name":"dulalsaurab/NDNSD","sub_path":"experiments/ndnsd_experiment_base.py","file_name":"ndnsd_experiment_base.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"72356138755","text":"# dictionary name melons\n# for each melon_name track the following characteristics\n# melon_price s/b price\n# melon_seedlessness s/b seedless\n# add flesh-color\n# add weight\n# add rind color\n# defaults should be \n# seedless False\n# price 0.99\n# flesh-color,weight and rind-color None\n# move all existing data to lump together by key\n# note for each dictionary item there is no comma for the last value as nothing more needs to be separated. There is a comma between each dictionary item.\nmelons={\n'Honeydew': {\n 'price': 0.99,\n 'seedless': True,\n 'flesh-color': None,\n 'weight': None,\n 'rind-color': None,\n 'locale': None\n },\n'Crenshaw': {\n 'price': 2.00,\n 'seedless': False,\n 'flesh-color': None,\n 'weight': None,\n 'rind-color': None,\n 'locale': None\n },\n'Crane': {\n 'price': 2.50,\n 'seedless': False,\n 'flesh-color': None,\n 'weight': None,\n 'rind-color': None,\n 'locale': None\n },\n'Casaba': {\n 'price': 2.50,\n 'seedless': False,\n 'flesh-color': None,\n 'weight': None,\n 'rind-color': None,\n 'locale': None\n },\n'Cantaloupe': {\n 'price': 0.99,\n 'sedless': False,\n 'flesh-color': None,\n 'weight': None,\n 'rind-color': None,\n 'locale': None\n }\n}\n","repo_name":"annewoosam/accounting_scripts","sub_path":"melons.py","file_name":"melons.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27494776669","text":"import structlog\n\nfrom backend.celery import app\nfrom substrapp.compute_tasks.asset_buffer import delete_models_from_buffer\nfrom substrapp.compute_tasks.datastore import Datastore\nfrom substrapp.compute_tasks.datastore import DatastoreError\nfrom substrapp.compute_tasks.datastore import get_datastore\n\nlogger = structlog.get_logger(__name__)\n\n\ndef queue_remove_intermediary_models_from_buffer(model_key: str) -> None:\n # This task is broadcast to all worker (see the broadcast defined in backend/celery.py)\n remove_intermediary_models_from_buffer.apply_async((model_key,))\n\n\n# This task is routed to run on the broadcast exchange\n# Each worker is listening to the broadcast queue. All running worker will perform this task.\n# Multiple tasks with the same task_id are created in the db\n# With ignore_result set to true, we ignore the result of the task as the different task state\n# result might be conflicting.\n# See https://docs.celeryproject.org/en/stable/userguide/routing.html#Broadcast&Results\n@app.task(ignore_result=True)\ndef remove_intermediary_models_from_buffer(model_key: str) -> None:\n delete_models_from_buffer([model_key])\n\n\ndef queue_remove_intermediary_model_from_db(channel_name: str, model_key: str) -> None:\n from substrapp.task_routing import get_generic_worker_queue\n\n worker_queue = get_generic_worker_queue()\n\n remove_intermediary_model_from_db.apply_async((channel_name, model_key), queue=worker_queue)\n\n\n@app.task(ignore_result=False)\ndef remove_intermediary_model_from_db(channel_name: str, model_key: str) -> None:\n datastore = get_datastore(channel_name)\n _delete_intermediary_model_from_db(datastore, model_key)\n\n\ndef _delete_intermediary_model_from_db(datastore: Datastore, model_key: str) -> None:\n try:\n datastore.delete_model(model_key)\n except DatastoreError:\n logger.debug(\"model deletion from datastore failed\", model_key=model_key)\n","repo_name":"Substra/substra-backend","sub_path":"backend/substrapp/tasks/tasks_remove_intermediary_models.py","file_name":"tasks_remove_intermediary_models.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"61"}
+{"seq_id":"70883021634","text":"import random\n\ndef generator(text, sep=\" \", option=None):\n \"\"\"Splits the text according to sep value and yield the substrings.\n option precise if a action is performed to the substrings before it is yielded.\"\"\"\n\n assert isinstance(text, str), \"ERROR\"\n assert isinstance(sep, str), \"ERROR\"\n assert option in [None, \"shuffle\", \"unique\", \"ordered\"], \"ERROR\"\n\n words = text.split(sep)\n\n if option == \"shuffle\":\n words = [words.pop(random.randrange(len(words))) for _ in range(len(words))]\n\n elif option == \"unique\":\n words = list(set(words))\n\n elif option == \"ordered\":\n words = sorted(words)\n for word in words:\n yield word\n\n\n","repo_name":"Zekao/Piscine-Python","sub_path":"module01/ex03/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"25042766412","text":"# https://www.acmicpc.net/problem/1697\n# 숨바꼭질 \n\nfrom collections import deque\n\n\nN, K = map(int, input().split())\nMAX_LOCATION = 100000\n\ndef bfs(N, K):\n if N == K:\n return 0\n queue = deque([N])\n predecessor = {N: None}\n while queue:\n now = queue.popleft()\n possible_loc = [now-1, now+1, now*2]\n for loc in possible_loc:\n # loc 범위 체크해주지 않으면 메모리 초과\n if loc not in predecessor and 0 <= loc <= MAX_LOCATION:\n predecessor[loc] = now\n queue.append(loc)\n if loc == K:\n queue = None\n break\n\n temp = K\n minute = 0\n while temp is not None: # while temp로 하면 0인 경우에도 반복문 수행하지 않아서 오답!!\n minute += 1\n temp = predecessor.get(temp)\n return minute - 1\n\nprint(bfs(N, K))","repo_name":"hmkim199/PrepareCodingTest","sub_path":"Baekjoon/1697.py","file_name":"1697.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"21633177959","text":"# csv format WITHOUT heading: start,node,Kh,Kv,Ss,Sy\r\n# sorted by: start,node\r\ninputfile = '004_total_TVM.csv'\r\noutputfile = inputfile.replace('.csv','.tvm')\r\ntotal_sp = 108\r\n\r\nfout = open(outputfile,'w')\r\n\r\n\r\nfout.write(' # MODFLOW-USG Time-Variant Materials (TVM) Package \\n\\\r\n 1 0 0 0 0 -1\\n\\\r\n 0 0 0 0 0 Start SP 1\\n')\r\n\r\n\r\nsp_list = []\r\nwith open(inputfile) as f:\r\n for l in f:\r\n sp_list.append(l.split(',')[0])\r\n#print(len(sp_list))\r\n\r\noutstack = []\r\nwrite_flag = True\r\nfor c_sp in range(1,total_sp + 1):\r\n if str(c_sp) in sp_list:\r\n fin = open(inputfile)\r\n for line in fin:\r\n c_in = line.replace('\\n','').split(',')\r\n if c_in[0] == str(c_sp):\r\n c_count = str(sp_list.count(c_in[0]))\r\n if write_flag == True:\r\n outstr = ' '+c_count+' '+c_count+' '+c_count+' '+c_count+' 0 End SP ' + str(c_in[0]) + '\\n'\r\n fout.write(outstr)\r\n write_flag = False\r\n outstack.append([c_in[1],c_in[2],c_in[3],c_in[4],c_in[5]])\r\n for item in outstack:\r\n fout.write(' '+ item[0] + ' ' + item[1] +'\\n')\r\n for item in outstack:\r\n fout.write(' '+ item[0] + ' ' + item[2] +'\\n')\r\n for item in outstack:\r\n fout.write(' '+ item[0] + ' ' + item[3] +'\\n') \r\n for item in outstack:\r\n fout.write(' '+ item[0] + ' ' + item[4] +'\\n')\r\n \r\n fin.close()\r\n outstack = []\r\n else:\r\n outstr = ' 0 0 0 0 0 End SP ' + str(c_sp) + '\\n'\r\n fout.write(outstr)\r\n write_flag = True\r\n\r\nfout.close()\r\nprint('done~')\r\n","repo_name":"pollozhao/Hydro","sub_path":"csv2tvm/csv2tvm.py","file_name":"csv2tvm.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35519178539","text":"#!/usr/bin/env python3\nimport rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import PointCloud2, PointField, Imu\nfrom rclpy.qos_event import PublisherEventCallbacks\nfrom rclpy.duration import Duration\nfrom rclpy.qos import QoSProfile\nfrom rclpy.qos import QoSDurabilityPolicy\nfrom rclpy.qos import QoSLivelinessPolicy\nfrom rclpy.qos import QoSHistoryPolicy \nfrom rclpy.qos import QoSReliabilityPolicy\nimport math\nimport struct\nimport numpy as np\nimport sensor_msgs.msg as sensor_msgs\nfrom geometry_msgs.msg import TransformStamped\nfrom tf2_ros.static_transform_broadcaster import StaticTransformBroadcaster\nimport std_msgs.msg as std_msgs\nimport open3d as o3d\nimport sys\nimport os\nfrom scipy.spatial.transform import Rotation as R\nimport mrob\n\n_DATATYPES = {}\n_DATATYPES[PointField.INT8] = ('b', 1)\n_DATATYPES[PointField.UINT8] = ('B', 1)\n_DATATYPES[PointField.INT16] = ('h', 2)\n_DATATYPES[PointField.UINT16] = ('H', 2)\n_DATATYPES[PointField.INT32] = ('i', 4)\n_DATATYPES[PointField.UINT32] = ('I', 4)\n_DATATYPES[PointField.FLOAT32] = ('f', 4)\n_DATATYPES[PointField.FLOAT64] = ('d', 8)\n\n\ndef filter_map_point_cloud(map_cloud, observation_origin, threshold=20):\n filtered_cloud = []\n for point in map_cloud:\n distance = np.linalg.norm(point - observation_origin)\n if distance <= threshold:\n filtered_cloud.append(point)\n return filtered_cloud\n\ndef tf_to_pq(t):\n p = np.array([t.transform.translation.x, t.transform.translation.y, t.transform.translation.z])\n q = np.array([t.transform.rotation.x, t.transform.rotation.y,\n t.transform.rotation.z, t.transform.rotation.w])\n return p, q\n\ndef pq_2_trans(p,q):\n rot = mrob.geometry.quat_to_so3(q)\n pose = mrob.geometry.SE3(mrob.geometry.SO3(rot),p).T()\n return pose\n\ndef correct_imu(q): \n r = R.from_quat(q)\n m = r.as_matrix()\n rot = R.from_rotvec([0, 0, -np.pi])\n m2 = rot.as_matrix()\n s = R.from_matrix(m@m2)\n quat = s.as_quat()\n return quat\n\ndef preprocess_point_cloud(pcd, voxel_size):\n pcd_down = pcd.voxel_down_sample(voxel_size)\n\n radius_normal = voxel_size * 2\n pcd_down.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))\n\n radius_feature = voxel_size * 5\n pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(\n pcd_down,\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))\n return pcd_down, pcd_fpfh\n\ndef prepare_pointcloud(voxel_size, source, map):\n source = source\n target = map\n source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)\n target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)\n return source_down, target_down, source_fpfh, target_fpfh\n\ndef execute_global_registration(voxel_size, source, map):\n distance_threshold = voxel_size * 1.5\n source_down, target_down, source_fpfh, target_fpfh =prepare_pointcloud(voxel_size, source, map)\n result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(\n source_down, target_down, source_fpfh, target_fpfh, True, distance_threshold,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(False),\n 3, [\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)\n ], \n o3d.pipelines.registration.RANSACConvergenceCriteria(100000, 0.999))\n return result.transformation\n\ndef execute_fast_global_registration(voxel_size, source, map):\n source_down, target_down, source_fpfh, target_fpfh =prepare_pointcloud(voxel_size, source, map)\n distance_threshold = voxel_size * 1.5\n result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(\n source_down, target_down, source_fpfh, target_fpfh,\n o3d.pipelines.registration.FastGlobalRegistrationOption(\n maximum_correspondence_distance=distance_threshold))\n return result.transformation\n\n\ndef read_points(cloud, field_names=None, skip_nans=False, uvs=[]):\n\n assert isinstance(cloud, PointCloud2), 'cloud is not a sensor_msgs.msg.PointCloud2'\n fmt = _get_struct_fmt(cloud.is_bigendian, cloud.fields, field_names)\n width, height, point_step, row_step, data, isnan = cloud.width, cloud.height, cloud.point_step, cloud.row_step, cloud.data, math.isnan\n unpack_from = struct.Struct(fmt).unpack_from\n\n if skip_nans:\n if uvs:\n for u, v in uvs:\n p = unpack_from(data, (row_step * v) + (point_step * u))\n has_nan = False\n for pv in p:\n if isnan(pv):\n has_nan = True\n break\n if not has_nan:\n yield p\n else:\n for v in range(height):\n offset = row_step * v\n for u in range(width):\n p = unpack_from(data, offset)[:3]\n has_nan = False\n for pv in p:\n if isnan(pv):\n has_nan = True\n break\n if not has_nan:\n yield p\n offset += point_step\n else:\n if uvs:\n for u, v in uvs:\n yield unpack_from(data, (row_step * v) + (point_step * u))\n else:\n for v in range(height):\n offset = row_step * v\n for u in range(width):\n yield unpack_from(data, offset)[:3]\n offset += point_step\n\ndef _get_struct_fmt(is_bigendian, fields, field_names=None):\n fmt = '>' if is_bigendian else '<'\n\n offset = 0\n for field in (f for f in sorted(fields, key=lambda f: f.offset) if field_names is None or f.name in field_names):\n if offset < field.offset:\n fmt += 'x' * (field.offset - offset)\n offset = field.offset\n if field.datatype not in _DATATYPES:\n print('Skipping unknown PointField datatype [%d]' % field.datatype, file=sys.stderr)\n else:\n datatype_fmt, datatype_length = _DATATYPES[field.datatype]\n fmt += field.count * datatype_fmt\n offset += field.count * datatype_length\n\n return fmt\n\ndef point_cloud(points, parent_frame):\n \"\"\" Creates a point cloud message.\n Args:\n points: Nx3 array of xyz positions.\n parent_frame: frame in which the point cloud is defined\n Returns:\n sensor_msgs.msg/PointCloud2 message\n \"\"\"\n # In a PointCloud2 message, the point cloud is stored as an byte \n # array. In order to unpack it, we also include some parameters \n # which desribes the size of each individual point.\n ros_dtype = sensor_msgs.PointField.FLOAT32\n dtype = np.float32\n itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.\n data = points.astype(dtype).tobytes() \n # The fields specify what the bytes represents. The first 4 bytes \n # represents the x-coordinate, the next 4 the y-coordinate, etc.\n fields = [sensor_msgs.PointField(\n name=n, offset=i*itemsize, datatype=ros_dtype, count=1)\n for i, n in enumerate('xyz')]\n\n # The PointCloud2 message also has a header which specifies which \n # coordinate frame it is represented in. \n header = std_msgs.Header(frame_id=parent_frame)\n\n return sensor_msgs.PointCloud2(\n header=header,\n height=1, \n width=points.shape[0],\n is_dense=False,\n is_bigendian=False,\n fields=fields,\n point_step=(itemsize * 3), # Every point consists of three float32s.\n row_step=(itemsize * 3 * points.shape[0]),\n data=data\n )\n \nclass MapPublisher(Node):\n\n def __init__(self):\n super().__init__('map_publisher')\n \n qos_profile = QoSProfile(\n reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_RELIABLE,\n history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST,\n depth=1, \n durability = QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL\n )\n assert len(sys.argv) > 1, \"No pcd file given.\"\n assert os.path.exists(sys.argv[1]), \"File doesn't exist.\"\n map_path = sys.argv[1]\n pcd = o3d.io.read_point_cloud(map_path)\n # pcd = pcd.random_down_sample()\n self.points = np.asarray(pcd.points)\n self.pcd_publisher = self.create_publisher(sensor_msgs.PointCloud2, '/map_pcd', qos_profile = qos_profile)\n self.pcd = point_cloud(self.points, 'Map')\n self.pcd_publisher.publish(self.pcd) \n\n\nclass IMUO3dVis(Node):\n\n def __init__(self):\n super().__init__('imu_visualizer')\n self.vis = o3d.visualization.Visualizer()\n self.vis.create_window( window_name='IMU Visualization', width=540, height=540)\n self.o3d_mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(size = 0.1)\n # self.o3d_mesh.rotate(self.o3d_mesh.get_rotation_matrix_from_xyz((np.pi / 2, 0, np.pi / 4)), center = (0, 0, 0))\n # Set up a subscription to the 'pcd' topic with a callback to the \n # function `listener_callback`\n self.imu_subscriber = self.create_subscription(Imu,'/imu',self.imu_callback,10)\n self.init = True\n self.imu_reset = None\n\n \n def imu_callback(self, msg):\n \n if self.init: \n translation = np.array([0, 0 , 0])\n quat = np.array([msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w])\n rot = mrob.geometry.quat_to_so3(quat)\n pose = mrob.geometry.SE3(mrob.geometry.SO3(rot),translation)\n # self.imu_reset = pose.inv().T()\n self.imu_reset = np.eye(4)\n self.imu_prev = np.eye(4) \n self.init = False\n else: \n # The rest here is for visualization.\n translation = np.array([0, 0 , 0])\n quat = np.array([msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w])\n rot = mrob.geometry.quat_to_so3(quat)\n pose = mrob.geometry.SE3(mrob.geometry.SO3(rot),translation).T()\n T = self.imu_reset @ pose\n self.imu_rotation = np.linalg.inv(self.imu_prev) @T\n self.imu_rotation[:3, 3] = [0, 0, 0]\n self.imu_prev = T\n self.vis.remove_geometry(self.o3d_mesh)\n self.o3d_mesh = self.o3d_mesh.transform(self.imu_rotation)\n self.vis.add_geometry(self.o3d_mesh)\n self.vis.poll_events()\n self.vis.update_renderer()\nclass Frame_saver(Node):\n\n def __init__(self):\n super().__init__('frame_saver')\n self.o3d_mesh = o3d.geometry.PointCloud()\n\n # Set up a subscription to the 'pcd' topic with a callback to the \n # function `listener_callback`\n self.pcd_subscriber = self.create_subscription(\n PointCloud2, # Msg type\n '/velodyne_points', # topic\n self.listener_callback, # Function to call\n 10 # QoS depth \n )\n self.count = 0\n\n \n def listener_callback(self, msg):\n pcd_as_numpy_array = np.array(list(read_points(msg)))\n \n # The rest here is for visualization.\n self.o3d_mesh = o3d.geometry.PointCloud(\n o3d.utility.Vector3dVector(pcd_as_numpy_array))\n o3d.io.write_point_cloud('/home/r/frames/frame'+'{:05d}'.format(self.count)+'.pcd',self.o3d_mesh)\n self.count+=1\n ","repo_name":"AhmedBaza1/localization_repo","sub_path":"localization/localization/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":11682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31868929306","text":"import datetime\nimport os.path\nimport unittest\n\nfrom beancount_bot import transaction\nfrom beancount_bot.builtin.template_dispatcher import TemplateDispatcher, split_command\nfrom beancount_bot.transaction import NotMatchException\n\nPATH = os.path.split(os.path.realpath(__file__))[0]\n\n\nclass TestTemplateDispatcher(unittest.TestCase):\n\n def test_quick_check(self):\n d = TemplateDispatcher(os.path.join(PATH, 'template_config.yml'))\n self.assertTrue(d.quick_check('饮料 20'))\n self.assertFalse(d.quick_check('! @饮料 '))\n self.assertFalse(d.quick_check('咖'))\n self.assertTrue(d.quick_check('饭 4.00'))\n self.assertTrue(d.quick_check('咖啡 123'))\n\n def test_split_command(self):\n cases = [\n ('饮料 20', ['饮料', '20']),\n ('饮料20', ['饮料20']),\n ('\"饮料\"\"20\"', ['饮料', '20']),\n ('饮料 666<21', ['饮料', '666', '<', '21']),\n ('饮料 20< 521', ['饮料', '20', '<', '521']),\n ('饮料 0 <21', ['饮料', '0', '<', '21']),\n ('饮料 201 < 21', ['饮料', '201', '<', '21']),\n ('饮料 \"201 \"< 22', ['饮料', '201 ', '<', '22']),\n ('饮料 \"201 <\" 55', ['饮料', '201 <', '55']),\n ('饮料 \"10\\\\\"1 <\" ', ['饮料', '10\"1 <']),\n ('\"\\\\\"\"\"\\\\\"\\\\\"\"', ['\"', '\"\"']),\n ('\"\\\\\\\\\" \"\\\\\\\\233\\\\\\\\\"', ['\\\\', '\\\\233\\\\']),\n ]\n exception_cases = [\n ('吃饭<<1', 3),\n ('吃饭< \\\\1', 4),\n ('123\"2', 5),\n ('\"\\\\', 2),\n ('\"\\\\\"', 3),\n ]\n\n for cmd, expected in cases:\n ret = split_command(cmd)\n self.assertEqual(ret, expected)\n\n for cmd, pos in exception_cases:\n try:\n ret = split_command(cmd)\n print(ret)\n self.fail(\"未发生错误\")\n except ValueError as e:\n self.assertIn(str(pos), e.args[0])\n\n def test_process_simple(self):\n today = datetime.date.today().isoformat()\n cases = [\n ('vultr',\n f'{today} * \"Vultr\" \"月费\"\\n'\n ' Assets:Digital:Alipay\\n'\n ' Expenses:Tech:Cloud 5 USD\\n'),\n ('vultr < wx',\n f'{today} * \"Vultr\" \"月费\"\\n'\n ' Assets:Digital:Wechat\\n'\n ' Expenses:Tech:Cloud 5 USD\\n'),\n ('饮料 3.0',\n f'{today} * \"饮料\"\\n'\n ' Assets:Digital:Alipay\\n'\n ' Expenses:Food:Drink 3.0 CNY\\n'),\n ('饮料 3.23= 21 else \\\n 'Expenses:Food:Dinner:Breakfast' if hour <= 10 else \\\n 'Expenses:Food:Dinner:Lunch' if hour <= 16 else \\\n 'Expenses:Food:Dinner:Supper'\n\n d = TemplateDispatcher(os.path.join(PATH, 'template_config.yml'))\n\n ret = d.process('饭 20')\n ret = transaction.stringfy(ret)\n self.assertIn(expense, ret)\n print(ret)\n\n ret = d.process('饭 20 pure_run_photons:\n true_positives = pure_run_photons\n false_positives = nsb_run_photons - true_positives\n false_negatives = pure_run_photons - true_positives\n true_negatives = (\n all_photons - true_positives\n - false_positives - false_negatives)\n try:\n precision = true_positives / (\n true_positives + false_positives)\n sensitivity = true_positives / (\n true_positives + false_negatives)\n except ZeroDivisionError:\n sensitivity = 0\n precision = 0\n precisions.append(precision)\n sensitivities.append(sensitivity)\n mask = np.array(precisions) != 0\n number_muons = mask.sum()\n precisions = np.array(precisions)[mask]\n sensitivities = np.array(sensitivities)[mask]\n std_precision = np.multiply(np.std(precisions), 100)\n std_sensitivity = np.multiply(np.std(sensitivities), 100)\n event = {\n \"number_muons\": number_muons,\n \"avg_precision\": np.multiply(np.average(precisions), 100),\n \"std_precision\": std_precision,\n \"precision_SE\": np.divide(std_precision, np.sqrt(number_muons)),\n \"avg_sensitivity\": np.multiply(np.average(sensitivities), 100),\n \"std_sensitivity\": std_sensitivity,\n \"sensitivity_SE\": np.divide(std_sensitivity, np.sqrt(number_muons))\n\n }\n return event\n\n\ndef cluster_single_run(dir_name, clustering=ps.PhotonStreamCluster):\n nsb_run_photons = []\n pure_run_photons = []\n all_run_photons = []\n pure_run_path = os.path.join(dir_name, \"pure\", \"psf_0.sim.phs\")\n nsb_run_path = os.path.join(dir_name, \"NSB\", \"psf_0.sim.phs\")\n nsb_run = ps.EventListReader(nsb_run_path)\n pure_run = ps.EventListReader(pure_run_path)\n for event in nsb_run:\n photon_clusters = clustering(event.photon_stream)\n cherenkov_cluster_mask = photon_clusters.labels >= 0\n nsb_cherenkov_photon_stream = photon_clusters.point_cloud\n nsb_cherenkov_ps = nsb_cherenkov_photon_stream[\n cherenkov_cluster_mask]\n nsb_run_photons.append(nsb_cherenkov_ps[:, 0:3])\n all_photons = event.photon_stream.point_cloud\n all_run_photons.append(all_photons)\n for muon in pure_run:\n pure_photon_stream = muon.photon_stream.point_cloud\n pure_run_photons.append(pure_photon_stream)\n return (\n all_run_photons,\n pure_run_photons,\n nsb_run_photons\n )\n\n\ndef single_run_analysis(dir_name, clustering=ps.PhotonStreamCluster):\n photons = cluster_single_run(dir_name, clustering=ps.PhotonStreamCluster)\n nsb_info = true_false_decisions(\n photons[0], photons[1], photons[2])\n nsb_rate = re.split(\"/\", dir_name)[-1]\n nsb_info[\"nsb_rate\"] = nsb_rate\n return nsb_info\n\n\ndef different_nsb_rates(resource_path):\n nsb_infos = {\n \"number_muons\": [],\n \"avg_precision\": [],\n \"std_precision\": [],\n \"precision_SE\": [],\n \"avg_sensitivity\": [],\n \"std_sensitivity\": [],\n \"sensitivity_SE\": [],\n \"nsb_rate\": []\n }\n wild_card_path = os.path.join(resource_path, \"*\")\n for path in glob.glob(wild_card_path):\n nsb_info = single_run_analysis(path, clustering=ps.PhotonStreamCluster)\n for key, value in nsb_info.items():\n nsb_infos[key].append(value)\n return nsb_infos\n\n","repo_name":"fact-project/muons","sub_path":"muons/method_evaluation/detection/benchmarking.py","file_name":"benchmarking.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40026545295","text":"# from pymysql.converters import escape_string as es\nimport codecs, json\nimport datetime\nfrom utils import debugger\nfrom scrawler import selscrapy\nfrom scrawler.rawscrapy import RawScrapy\nfrom db import sqldb\nimport time\n\n\ndef get_new_content(*args: str):\n\n global json_contents_filename\n global contents\n json_contents_filename = \"./resources/contents\"\n for arg in args:\n json_contents_filename += arg + '_'\n json_contents_filename += str(datetime.datetime.now().strftime('%Y-%m-%d-%H_%M_%S'))\n json_contents_filename += \".json\"\n\n fp = codecs.open(json_contents_filename, 'w', 'utf-8')\n contents = sel_scrapy.get_contents_for_yahoo(*args, getall=True)\n\n fp.write(json.dumps(contents, indent=4, separators=(',', ': '), ensure_ascii=False))\n fp.close()\n debugger.INFO(\"written to json file: {}\".format(json_contents_filename))\n\n fp = codecs.open(json_contents_filename, 'r', 'utf-8')\n contents = json.load(fp, strict=False)\n debugger.INFO(\"loaded json file: {}\".format(json_contents_filename))\n fp.close()\n\n\ndef get_new_pages():\n # sel_scrapy.get_page_for_yahoo(contents[0][\"url\"])\n debugger.INFO(\"prepare to get {} pages\".format(len(contents)))\n for index, article in enumerate(contents):\n\n if article[\"url\"].find('image') != -1:\n debugger.INFO(\"pass an image article\")\n continue\n\n article_body = sel_scrapy.get_page_for_yahoo(contents[index][\"url\"])\n if article_body == \"None\":\n article_body = sel_scrapy.get_page_for_yahoo(contents[index][\"url\"])\n if article_body == \"None\":\n debugger.ERROR(\"Cannot get this article\")\n else:\n debugger.INFO(\"Problem solved. Got this article.\")\n\n article[\"body\"] = article_body\n\n debugger.INFO(\"got pages finished!\")\n\n json_all_filename = json_contents_filename.replace(\"contents\", \"all\")\n fp = codecs.open(json_all_filename, 'w', 'utf-8')\n fp.write(json.dumps(contents, indent=4, separators=(',', ': '), ensure_ascii=False))\n debugger.INFO(\"written to json file: {}\".format(json_all_filename))\n fp.close()\n\n\ndef write_to_db(filename: str, tablename: str):\n\n db = sqldb.Sqldb()\n\n debugger.INFO(\"begin writing {} to table {}\".format(filename, tablename))\n gen = db.generate(\"./resources/\" + filename, tablename)\n if gen is False:\n debugger.ERROR(\"Writing to table failed\")\n else:\n debugger.INFO(\"Writing to table succeeded.\")\n\n db.close()\n\n \ndef get_comments(filename: str):\n path_contents = \"./resources/\" + filename\n debugger.INFO(\"prepare to get comments for {}\".format(filename))\n fp = codecs.open(path_contents, 'r', 'utf-8')\n contents = json.load(fp)\n fp.close()\n raw_scrapy = RawScrapy()\n # content = contents[15]\n # url = content[\"url\"]\n # comment = raw_scrapy.get_comment(url + \"/comments\")\n # content[\"comment\"] = comment\n for (id, content) in enumerate(contents):\n start_time = time.time()\n url = content[\"url\"]\n debugger.INFO(\"getting {}th comments for {}\".format(id, url))\n comment = raw_scrapy.get_comment(url + \"/comments\")\n content[\"comment\"] = comment\n debugger.INFO(\"got by {:.2f}s!\".format(time.time() - start_time))\n debugger.INFO(\"Getting comments finished!\")\n debugger.INFO(\"Begin writing to file\")\n\n json_all_filename = path_contents.replace(\"all\", \"all_plus\")\n fp = codecs.open(json_all_filename, 'w', 'utf-8')\n fp.write(json.dumps(contents, indent=4, separators=(',', ': '), ensure_ascii=False))\n debugger.INFO(\"written to json file: {}\".format(json_all_filename))\n\n\nif __name__ == '__main__':\n\n debugger.INFO(\"Hello SRT!\")\n # sel_scrapy = selscrapy.SelScrapy(headless=False)\n\n # get_new_content(\"五輪\", \"中国\", \"選手\")\n\n # get_new_pages()\n\n get_comments(\"all五輪_中国_選手_2021-12-01-14_16_57.json\")\n\n # write_to_db(\"all中国_五輪_選手_2021-10-22-12_33_35.json\", \"yahoo_A\")\n\n # sel_scrapy.quit()\n\n","repo_name":"xiurui-pan/Sino-Jap-SRT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4524022068","text":"\"\"\"\r\nEscreva um programa que peça o nome e a idade do usuário. Caso a idade do usuário seja maior ou igual a\r\n18 anos apresente a seguinte mensagem: \"Seja bem-vindo ao nosso site [nome]!\"; caso contrário, apresente\r\na seguinte mensagem: \"Você não pode acessar nosso site [nome].\".\r\n\"\"\"\r\n\r\nnome = input('Qual seu nome? ').title()\r\nidade = int(input('Digite sua idade: '))\r\n\r\nif idade >= 18:\r\n print(f'Bem vindo ao nosso site, {nome}')\r\nelse:\r\n print(f'Você não pode acessar nosso site {nome}')","repo_name":"hhigorb/exercicios_python_pratica","sub_path":"1_Variáveis, tipos de dados e condicionais/ex014.py","file_name":"ex014.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14869014316","text":"from abc import ABC\nfrom typing import Iterable, List\n\nfrom pydantic import BaseModel\nfrom pymongo.results import UpdateResult\n\nfrom fastapi_contrib.db.models import MongoDBModel\n\n\nclass AbstractMeta(ABC):\n exclude: set = set()\n model: MongoDBModel = None\n write_only_fields: set = set()\n read_only_fields: set = set()\n\n\nclass Serializer(BaseModel):\n \"\"\"\n Base Serializer class.\n\n Almost ALWAYS should be used in conjunction with\n `fastapi_contrib.serializers.openapi.patch` decorator to correctly handle\n inherited model fields and OpenAPI Schema generation with `response_model`.\n\n Responsible for sanitizing data & converting JSON to & from MongoDBModel.\n\n Contains supplemental function, related to MongoDBModel,\n mostly proxied to corresponding functions inside model (ex. save, update)\n\n Heavily uses `Meta` class for fine-tuning input & output. Main fields are:\n * exclude - set of fields that are excluded when serializing to dict\n and sanitizing list of dicts\n * model - class of the MongoDBModel to use, inherits fields from it\n * write_only_fields - set of fields that can be accepted in request,\n but excluded when serializing to dict\n * read_only_fields - set of fields that cannot be accepted in request,\n but included when serializing to dict\n\n Example usage:\n\n .. code-block:: python\n\n app = FastAPI()\n\n\n class SomeModel(MongoDBModel):\n field1: str\n\n\n @openapi.patch\n class SomeSerializer(Serializer):\n read_only1: str = \"const\"\n write_only2: int\n not_visible: str = \"42\"\n\n class Meta:\n model = SomeModel\n exclude = {\"not_visible\"}\n write_only_fields = {\"write_only2\"}\n read_only_fields = {\"read_only1\"}\n\n\n @app.get(\"/\", response_model=SomeSerializer.response_model)\n async def root(serializer: SomeSerializer):\n model_instance = await serializer.save()\n return model_instance.dict()\n\n POST-ing to this route following JSON:\n\n .. code-block:: json\n\n {\"read_only1\": \"a\", \"write_only2\": 123, \"field1\": \"b\"}\n\n Should return following response:\n\n .. code-block:: json\n\n {\"id\": 1, \"field1\": \"b\", \"read_only1\": \"const\"}\n\n \"\"\"\n\n @classmethod\n def sanitize_list(cls, iterable: Iterable) -> List[dict]:\n \"\"\"\n Sanitize list of rows that comes from DB to not include `exclude` set.\n\n :param iterable: sequence of dicts with model fields (from rows in DB)\n :return: list of cleaned, without `excluded`, dicts with model rows\n \"\"\"\n\n def clean_d(d):\n if hasattr(cls.Meta, \"exclude\"):\n for e in cls.Meta.exclude:\n d.pop(e, None)\n return d\n return d\n\n return list(map(lambda x: clean_d(x), iterable))\n\n async def save(\n self,\n include: set = None,\n exclude: set = None,\n rewrite_fields: dict = None,\n ) -> MongoDBModel:\n \"\"\"\n If we have `model` attribute in Meta, it populates model with data\n and saves it in DB, returning instance of model.\n\n :param rewrite_fields: dict of fields with values that override any\n other values for these fields right before inserting into DB.\n This is useful when you need to set some value explicitly\n based on request (e.g. user or token).\n :param include: fields to include from model in DB insert command\n :param exclude: fields to exclude from model in DB insert command\n :return: model (MongoDBModel) that was saved\n \"\"\"\n if (\n hasattr(self, \"Meta\")\n and getattr(self.Meta, \"model\", None) is not None\n ):\n instance = self.Meta.model(**self.__dict__)\n await instance.save(\n include=include, exclude=exclude, rewrite_fields=rewrite_fields\n )\n return instance\n\n async def update_one(\n self,\n filter_kwargs: dict,\n skip_defaults: bool = True,\n array_fields: list = None,\n ) -> UpdateResult:\n \"\"\"\n If we have `model` attribute in Meta, it proxies filters & update data\n and after that returns actual result of update operation.\n\n :return: result of update operation\n \"\"\"\n if (\n hasattr(self, \"Meta\")\n and getattr(self.Meta, \"model\", None) is not None\n ):\n data = {}\n fields = self.dict(skip_defaults=skip_defaults)\n\n if not array_fields:\n array_fields = []\n\n if array_fields:\n tmp_data = {}\n for i in array_fields:\n tmp_data[i] = {\"$each\": fields.pop(i)}\n data.update({\"$push\": tmp_data})\n if fields:\n data.update({\"$set\": fields})\n return await self.Meta.model.update_one(\n filter_kwargs=filter_kwargs, **data\n )\n\n async def update_many(\n self,\n filter_kwargs: dict,\n skip_defaults: bool = True,\n array_fields: list = None,\n ) -> UpdateResult:\n \"\"\"\n If we have `model` attribute in Meta, it proxies filters & update data\n and after that returns actual result of update operation.\n\n :return: result of update many operation\n \"\"\"\n if (\n hasattr(self, \"Meta\")\n and getattr(self.Meta, \"model\", None) is not None\n ):\n data = {}\n fields = self.dict(skip_defaults=skip_defaults)\n\n if not array_fields:\n array_fields = []\n\n if array_fields:\n tmp_data = {}\n for i in array_fields:\n tmp_data[i] = {\"$each\": fields.pop(i)}\n data.update({\"$push\": tmp_data})\n if fields:\n data.update({\"$set\": fields})\n return await self.Meta.model.update_many(\n filter_kwargs=filter_kwargs, **data\n )\n\n def dict(self, *args, **kwargs) -> dict:\n \"\"\"\n Removes excluded fields based on `Meta` and `kwargs`\n :return: dict of serializer data fields\n \"\"\"\n exclude = kwargs.get(\"exclude\")\n if not exclude:\n exclude = set()\n\n exclude.update({\"_id\"})\n\n if hasattr(self.Meta, \"exclude\") and self.Meta.exclude:\n exclude.update(self.Meta.exclude)\n\n if (\n hasattr(self.Meta, \"write_only_fields\")\n and self.Meta.write_only_fields\n ):\n exclude.update(self.Meta.write_only_fields)\n\n kwargs.update({\"exclude\": exclude})\n original = super().dict(*args, **kwargs)\n return original\n\n class Meta(AbstractMeta):\n ...\n\n\nclass ModelSerializer(Serializer):\n \"\"\"\n Left as a proxy for correct naming until we figure out how to inherit\n all the specific to model-handling methods and fields directly in here.\n \"\"\"\n\n ...\n","repo_name":"identixone/fastapi_contrib","sub_path":"fastapi_contrib/serializers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","stars":578,"dataset":"github-code","pt":"61"}
+{"seq_id":"11090416915","text":"from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('primer.notifications.views',\n\n\t##\n # prefix: notifications\n #\n url(r'^widget/$', 'widget_content', name='notifications-widget'),\n url(r'^count/$', 'count', name='notifications-count'),\n\n)","repo_name":"jamesmfriedman/django-primer","sub_path":"primer/notifications/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"}
+{"seq_id":"15518703046","text":"import numpy as np\r\n\r\ndef CalculateR0(beta1, beta2, theta, gamma1, gamma2, gamma3 = None, hosrate = None, delta = None, model =\"Base\"):\r\n\tif model == \"Base\":\r\n\t\tR0 = beta1*(1-theta)/gamma1 + beta2*theta/gamma2\r\n\telif model == \"BaseDeath\":\r\n\t\tR0 = beta1*(1-theta)/gamma1 + beta2*theta/(gamma2 + delta)\r\n\telif model == \"Extended\" or model == \"BaseHospitalization\":\r\n\t\tR0 = -beta1*(theta*gamma2*gamma3 - gamma2*gamma3)/(gamma1*gamma2*gamma3) - beta2*(hosrate*theta*gamma1*gamma3 - theta*gamma1*gamma3)/(gamma1*gamma2*gamma3) + beta2*hosrate*theta/gamma3\r\n\treturn R0\r\n\r\n","repo_name":"yupengyanghuhu/USA-CDC-Bayesian-ODE","sub_path":"core/CalculateMetrics.py","file_name":"CalculateMetrics.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"36833068755","text":"# ============\n# Test classes\n# ============\n\n# A class which does nothing:\n\n# 1============================================================================\nclass DoNothing:\n pass\n\n# 2============================================================================\n# A parent/base class:\n\nclass Employee:\n # class variable:\n count = 0\n \n # class constructor:\n def __init__(self, name, email, role):\n self.name = name\n self.email = email\n self.role = role\n # Change class variable:\n Employee.count += 1\n \n # instance method:\n def get_info(self):\n return '{},{},{}'.format(self.name, self.email, self.role)\n\n# 3============================================================================\n# A child/derived class - single inheritance:\n\nclass Receptionist(Employee):\n def __init__(self, name, email, role, foreignLanguage):\n super().__init__(name, email, role)\n self.foreignLanguage = foreignLanguage\n\n def get_info(self):\n return '{},{},{}, {}'.format(self.name, self.email, self.role,\n self.foreignLanguage)\n\n# 4============================================================================\n# A child/derived class - multiple inheritance:\n\nclass Equipment:\n def __init__(self, equipType, equipOwner):\n self.equipType = equipType\n self.equipOwner = equipOwner\n def get_info(self):\n return 'equipment type: {}, owned by: {}'.format(self.equipType,\n self.equipOwner)\n\nclass Developer(Employee, Equipment):\n def __init__(self, name, email, role,\n equipType, equipOwner,\n progLanguage):\n super().__init__(name, email, role)\n self.equipType = equipType\n self.equipOwner = equipOwner\n self.progLanguage = progLanguage\n\n def get_info(self):\n return '{},{},{},{},{},{}'.format(self.name, self.email, self.role,\n self.equipType, self.equipOwner,\n self.progLanguage)\n\n# 5============================================================================\n# Data encapsulation\n\nclass Person:\n def __init__(self, name, age, weight):\n # public data:\n self.name = name\n # private data (using accessers)\n self._age = age\n # private data (using property)\n self._weight = weight\n\n def get_age(self):\n return self._age\n\n def set_age(self, age):\n self._age = age\n\n def delete_age(self):\n del self._age\n \n @property\n def weight(self):\n return self._weight\n\n @weight.setter\n def weight(self, weight):\n self._weight = weight\n\n @weight.deleter\n def weight(self):\n del self._weight\n\n# =============================================================================\n# 1============================================================================\n# Test class DoNothing\n\ndoNothing1 = DoNothing()\nprint(doNothing1)\n\n# 2============================================================================\n# Test base class\n\nemployee1 = Employee('John Smith', 'jsmith@abc.com', 'CEO')\nprint(employee1.get_info())\nprint(Employee.count)\n\n# 3============================================================================\n# Test single inheritance\n\nreceptionist1 = Receptionist('Sarah Jones', 'sjones@abc.com', 'Receptionist',\n 'Spanish')\nprint(receptionist1.get_info())\nprint(Employee.count)\n\n# 4============================================================================\n# Test multiple inheritance\n\nequipment1 = Equipment('MacBoook Pro', 'Company')\nprint(equipment1.get_info())\n\ndeveloper1 = Developer('Mike Wang', 'mwang@abc.com', 'Senior Developer',\n equipment1.equipType, equipment1.equipOwner,\n 'Python')\nprint(developer1.get_info())\nprint(Employee.count)\n\n# 5=============================================================================\n# Test data encapsulation\n\n# Initial values:\nperson1 = Person('Mike', 17, 100.0)\nprint(person1.name, person1.get_age(), person1.weight)\n\n# Change values:\nperson1.name = 'Michael'\nperson1.set_age(18)\nperson1.age = 19 # This will be ignored !!!\nperson1.weight = 110.0\nprint(person1.name, person1.get_age(), person1.weight)\n\n# Delete variables:\ndel person1.name\nperson1.delete_age()\ndel person1.weight\n# AttributionError:\n#print(person1.name, person1.get_age(), person1.weight)\n\n\n\n","repo_name":"jonathanzho/sweStuff","sub_path":"python/codequickly/test_classes.py","file_name":"test_classes.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23656929518","text":"import struct\n\n\ndebit_type = 'debit'\ncredit_type = 'credit'\nstart_auto_type = 'start_auto'\nend_auto_type = 'end_auto'\nrecord_types = {\n '\\x00': debit_type,\n '\\x01': credit_type,\n '\\x02': start_auto_type,\n '\\x03': end_auto_type,\n}\nchecked_user_id = 2456938384156277127\n\n\ndef process():\n path = 'txnlog.dat'\n debit_dollars = 0.0\n credit_dollars = 0.0\n started_autopays = 0\n ended_autopays = 0\n user_balance = 0\n with open(path, 'r') as f:\n _, _, num = read_header(f)\n for i in range(num):\n record_type, timestamp, user_id, amount = read_record(f)\n if record_type == debit_type:\n debit_dollars += amount\n if user_id == checked_user_id:\n user_balance += amount\n if record_type == credit_type:\n credit_dollars += amount\n if user_id == checked_user_id:\n user_balance -= amount\n if record_type == start_auto_type:\n started_autopays += 1\n if record_type == end_auto_type:\n ended_autopays += 1\n print('Record Type: {}, Time: {}, UserId: {}, Amount: {}'\n .format(record_type, timestamp, user_id, amount))\n show_answers(debit_dollars, credit_dollars, started_autopays, ended_autopays, user_balance)\n\n\ndef read_header(f):\n magic_string = f.read(4)\n version = ord(f.read(1))\n number_of_records, = struct.unpack('!I', f.read(4))\n return magic_string, version, number_of_records\n\n\ndef read_record(f):\n record_type = record_types[f.read(1)]\n timestamp, = struct.unpack('!I', f.read(4))\n user_id, = struct.unpack_from('!Q', f.read(8))\n amount = None\n if record_type in [debit_type, credit_type]:\n amount, = struct.unpack_from('!d', f.read(8))\n return record_type, timestamp, user_id, amount\n\n\ndef show_answers(debit, credit, started, ended, balance):\n questions = {\n 'What is the total amount in dollars of debits?': debit,\n 'What is the total amount in dollars of credits?': credit,\n 'How many autopays were started?': started,\n 'How many autopays were ended?': ended,\n 'What is balance of user ID 2456938384156277127?': balance\n }\n for question, answer in questions.iteritems():\n print('{} {}'.format(question, answer))\n\nif __name__ == '__main__':\n process()\n","repo_name":"lukashambsch/homework","sub_path":"proto/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35557750449","text":"import sys\n\ndef dfs(man1, man2, cnt):\n global res\n\n if res != -1:\n return\n\n for r in range(n):\n if not vi[man1][r] and arr[man1][r]:\n if r == man2:\n res = cnt\n else:\n vi[man1][r], vi[r][man1] = True, True\n dfs(r, man2, cnt+1)\n vi[man1][r], vi[r][man1] = False, False\n\n### MAIN\nn = int(sys.stdin.readline())\nman1, man2 = map(lambda x: int(x)-1, sys.stdin.readline().strip().split())\nm = int(sys.stdin.readline())\n\narr = [[False for _ in range(n)] for _ in range(n)]\nvi = [[False for _ in range(n)] for _ in range(n)]\nres = -1\n\nfor _ in range(m):\n p, c = map(lambda x: int(x)-1, sys.stdin.readline().strip().split())\n arr[p][c], arr[c][p] = True, True\n\ndfs(man1, man2, 1)\nprint(res)\n\n","repo_name":"deveun/Algorithm","sub_path":"Baekjoon/2544_촌수계산.py","file_name":"2544_촌수계산.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34366779784","text":"import logging\nfrom datetime import datetime\nfrom unittest import mock\nfrom unittest.mock import ANY, MagicMock, Mock, call, patch\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom lxml import etree\nfrom packtools.sps.pid_provider.xml_sps_adapter import PidProviderXMLAdapter\nfrom packtools.sps.pid_provider.xml_sps_lib import XMLWithPre\n\nfrom pid_requester import exceptions, models\n\nUser = get_user_model()\n\n\ndef _get_xml_adapter_from_file(path):\n for item in XMLWithPre.create(path=path):\n obj = PidProviderXMLAdapter(item)\n return obj\n\n\ndef _get_xml_with_pre(xml=None):\n xml = xml or \" \"\n return XMLWithPre(\"\", etree.fromstring(xml))\n\n\ndef _get_xml_adapter(xml=None):\n xml = xml or \" \"\n xml_with_pre = XMLWithPre(\"\", etree.fromstring(xml))\n obj = PidProviderXMLAdapter(xml_with_pre)\n return obj\n\n\ndef _get_xml_adapter_with_issue_data():\n xml_adapter = _get_xml_adapter()\n xml_adapter.journal_issn_electronic = \"data-issn-e\"\n xml_adapter.journal_issn_print = \"data-issn-p\"\n xml_adapter.volume = \"data-vol\"\n xml_adapter.number = \"data-num\"\n xml_adapter.suppl = \"data-suppl\"\n xml_adapter.pub_year = \"data-year\"\n xml_adapter.issue = models.XMLIssue.get_or_create(\n models.XMLJournal.get_or_create(\"data-issn-e\", \"data-issn-p\"),\n \"data-vol\",\n \"data-num\",\n \"data-suppl\",\n \"data-year\",\n )\n xml_adapter.fpage = \"data-fpage\"\n xml_adapter.fpage_seq = \"data-fpage-seq\"\n xml_adapter.lpage = \"data-lpage\"\n\n xml_adapter.article_pub_year = \"data-pub-year\"\n xml_adapter.v3 = \"123456789012345678901v3\"\n xml_adapter.v2 = \"123456789012345678901v2\"\n xml_adapter.aop_pid = \"12345678901234567890aop\"\n\n xml_adapter.main_doi = \"data-main_doi\"\n xml_adapter.main_toc_section = \"data-main_toc_section\"\n xml_adapter.elocation_id = \"data-elocation_id\"\n return xml_adapter\n\n\ndef _create_xml_adapter__aop():\n xml_adapter = _get_xml_adapter()\n xml_adapter.journal_issn_electronic = \"data-issn-e\"\n xml_adapter.journal_issn_print = \"data-issn-p\"\n xml_adapter.issue = None\n xml_adapter.article_pub_year = \"data-pub-year\"\n xml_adapter.v3 = \"123456789012345678901v3\"\n xml_adapter.v2 = \"123456789012345678901v2\"\n xml_adapter.aop_pid = \"12345678901234567890aop\"\n xml_adapter.main_doi = \"data-main_doi\"\n xml_adapter.main_toc_section = \"data-main_toc_section\"\n return xml_adapter\n\n\nclass PidRequesterXMLValidateQueryParamsTest(TestCase):\n def setUp(self):\n self.article_params = {\n \"z_article_titles_texts\": \"TITLES\",\n \"z_collab\": \"VALUE\",\n \"z_links\": \"Links\",\n \"z_partial_body\": \"Body\",\n \"z_surnames\": \"Z_SURNAMES\",\n \"article_pub_year\": \"2020\",\n \"elocation_id\": \"e19347\",\n \"journal__issn_electronic\": \"issn electronic\",\n \"journal__issn_print\": \"issn print\",\n \"main_doi\": \"DOI\",\n \"pkg_name\": \"pkgName\",\n }\n\n self.issue_params = {\n \"issue__pub_year\": \"year\",\n \"issue__volume\": \"vol\",\n \"issue__number\": \"num\",\n \"issue__suppl\": \"suppl\",\n \"fpage\": \"1\",\n \"fpage_seq\": \"a\",\n \"lpage\": \"11\",\n }\n\n def test_validate_query_params_all_present(self):\n params = self.article_params\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_all_present_plus_issue_params(self):\n params = self.article_params\n params.update(self.issue_params)\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_issue_params_only(self):\n params = {}\n params.update(self.issue_params)\n with self.assertRaises(exceptions.NotEnoughParametersToGetDocumentRecordError):\n result = models.PidRequesterXML.validate_query_params(params)\n\n def test_validate_query_params_journal_issns_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"journal__issn_print\"]\n del params[\"journal__issn_electronic\"]\n with self.assertRaises(exceptions.NotEnoughParametersToGetDocumentRecordError):\n result = models.PidRequesterXML.validate_query_params(params)\n\n def test_validate_query_params_pub_year_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"article_pub_year\"]\n del params[\"issue__pub_year\"]\n with self.assertRaises(exceptions.NotEnoughParametersToGetDocumentRecordError):\n result = models.PidRequesterXML.validate_query_params(params)\n\n def test_validate_query_params_main_doi_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"main_doi\"]\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_fpage_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"fpage\"]\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_elocation_id_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"elocation_id\"]\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_main_doi_fpage_elocation_id_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"main_doi\"]\n del params[\"fpage\"]\n del params[\"elocation_id\"]\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_z_surnames_id_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"main_doi\"]\n del params[\"fpage\"]\n del params[\"elocation_id\"]\n del params[\"z_surnames\"]\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_z_collab_id_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"main_doi\"]\n del params[\"fpage\"]\n del params[\"elocation_id\"]\n del params[\"z_collab\"]\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_z_collab_id_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"main_doi\"]\n del params[\"fpage\"]\n del params[\"elocation_id\"]\n del params[\"z_links\"]\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_z_collab_id_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"main_doi\"]\n del params[\"fpage\"]\n del params[\"elocation_id\"]\n del params[\"pkg_name\"]\n result = models.PidRequesterXML.validate_query_params(params)\n self.assertTrue(result)\n\n def test_validate_query_params_z_collab_id_absence(self):\n params = self.article_params\n params.update(self.issue_params)\n del params[\"main_doi\"]\n del params[\"fpage\"]\n del params[\"elocation_id\"]\n del params[\"pkg_name\"]\n del params[\"z_surnames\"]\n del params[\"z_collab\"]\n del params[\"z_links\"]\n\n with self.assertRaises(exceptions.NotEnoughParametersToGetDocumentRecordError):\n result = models.PidRequesterXML.validate_query_params(params)\n\n\n@patch(\n \"packtools.sps.pid_provider.xml_sps_adapter.PidProviderXMLAdapter.query_list\",\n new_callable=mock.PropertyMock,\n)\n@patch(\n \"pid_requester.models.PidRequesterXML.validate_query_params\",\n return_value=True,\n)\n@patch(\"pid_requester.models.PidRequesterXML.objects.get\")\nclass PidRequesterXMLQueryDocumentTest(TestCase):\n def test_query_document_is_called_with_query_params(\n self,\n mock_get,\n mock_validate_params,\n mock_query_list,\n ):\n \"\"\"\n PidRequesterXML._query_document is called with parameters returned by\n PidRequesterXML.query_list\n \"\"\"\n params_list = [\n {\"key\": \"value\"},\n ]\n mock_query_list.return_value = params_list\n mock_get.side_effect = models.PidRequesterXML.DoesNotExist\n xml_adapter = _get_xml_adapter()\n result = models.PidRequesterXML._query_document(xml_adapter)\n mock_get.assert_called_once_with(**{\"key\": \"value\"})\n\n def test_query_document_returns_none_if_document_does_not_exist(\n self,\n mock_get,\n mock_validate_params,\n mock_query_list,\n ):\n params_list = [\n {\"key\": \"value\"},\n ]\n mock_query_list.return_value = params_list\n mock_get.side_effect = models.PidRequesterXML.DoesNotExist\n xml_adapter = _get_xml_adapter()\n result = models.PidRequesterXML._query_document(xml_adapter)\n self.assertIsNone(result)\n\n def test_query_document_returns_found_document(\n self,\n mock_get,\n mock_validate_params,\n mock_query_list,\n ):\n params_list = [\n {\"key\": \"value\"},\n ]\n mock_query_list.return_value = params_list\n mock_get.return_value = models.PidRequesterXML()\n xml_adapter = _get_xml_adapter()\n result = models.PidRequesterXML._query_document(xml_adapter)\n self.assertEqual(models.PidRequesterXML, type(result))\n\n def test_query_document_returns_found_item_at_the_second_round(\n self,\n mock_get,\n mock_validate_params,\n mock_query_list,\n ):\n params_list = [\n {\"key\": \"value\"},\n {\"key\": \"value2\"},\n ]\n mock_query_list.return_value = params_list\n mock_get.side_effect = [\n models.PidRequesterXML.DoesNotExist,\n models.PidRequesterXML(),\n ]\n xml_adapter = _get_xml_adapter()\n result = models.PidRequesterXML._query_document(xml_adapter)\n self.assertEqual(models.PidRequesterXML, type(result))\n\n def test_query_document_raises_query_document_error_because_multiple_objects_returned(\n self,\n mock_get,\n mock_validate_params,\n mock_query_list,\n ):\n params_list = [\n {\"key\": \"value\"},\n ]\n mock_query_list.return_value = params_list\n mock_get.side_effect = models.PidRequesterXML.MultipleObjectsReturned\n with self.assertRaises(\n exceptions.QueryDocumentMultipleObjectsReturnedError\n ) as exc:\n xml_adapter = _get_xml_adapter()\n result = models.PidRequesterXML._query_document(xml_adapter)\n\n def test_query_document_raises_error(\n self,\n mock_get,\n mock_validate_params,\n mock_query_list,\n ):\n \"\"\"\n PidRequesterXML._query_document is called with parameters returned by\n PidRequesterXML.query_list\n \"\"\"\n params_list = [\n {\"key\": \"value\"},\n ]\n mock_query_list.return_value = params_list\n mock_validate_params.side_effect = (\n exceptions.NotEnoughParametersToGetDocumentRecordError\n )\n\n with self.assertRaises(exceptions.NotEnoughParametersToGetDocumentRecordError):\n xml_adapter = _get_xml_adapter()\n result = models.PidRequesterXML._query_document(xml_adapter)\n\n\n@patch(\"pid_requester.models.PidRequesterXML._query_document\")\nclass PidRequesterXMLGetRegisteredTest(TestCase):\n def setUp(self):\n self.xml_with_pre = _get_xml_with_pre()\n\n def test_get_registered_returns_dict_with_registered_data(\n self,\n mock_query_document,\n ):\n pid_req_xml = models.PidRequesterXML()\n pid_req_xml.pkg_name = \"registered_pkg_name\"\n pid_req_xml.v2 = \"registered_v2\"\n pid_req_xml.v3 = \"registered_v3\"\n pid_req_xml.aop_pid = \"registered_aop_pid\"\n pid_req_xml.created = datetime(2023, 2, 20)\n pid_req_xml.updated = datetime(2023, 2, 20)\n\n mock_query_document.return_value = pid_req_xml\n\n result = models.PidRequesterXML.get_registered(self.xml_with_pre)\n expected = {\n \"v3\": \"registered_v3\",\n \"v2\": \"registered_v2\",\n \"aop_pid\": \"registered_aop_pid\",\n \"pkg_name\": \"registered_pkg_name\",\n \"created\": \"2023-02-20T00:00:00\",\n \"updated\": \"2023-02-20T00:00:00\",\n \"record_status\": \"updated\",\n \"synchronized\": False,\n }\n self.assertDictEqual(expected, result)\n\n def test_get_registered_returns_none(\n self,\n mock_query_document,\n ):\n mock_query_document.return_value = None\n\n result = models.PidRequesterXML.get_registered(self.xml_with_pre)\n self.assertIsNone(result)\n\n def test_get_registered_returns_error_multiple_return(\n self,\n mock_query_document,\n ):\n mock_query_document.side_effect = (\n exceptions.QueryDocumentMultipleObjectsReturnedError\n )\n\n result = models.PidRequesterXML.get_registered(self.xml_with_pre)\n self.assertIn(\"error_type\", result.keys())\n self.assertIn(\"error_msg\", result.keys())\n\n def test_get_registered_returns_error_not_enough_params(\n self,\n mock_query_document,\n ):\n mock_query_document.side_effect = (\n exceptions.NotEnoughParametersToGetDocumentRecordError\n )\n\n result = models.PidRequesterXML.get_registered(self.xml_with_pre)\n self.assertIn(\"error_type\", result.keys())\n self.assertIn(\"error_msg\", result.keys())\n\n\nclass PidRequesterXMLEvaluateRegistrationTest(TestCase):\n def setUp(self):\n self.xml_adapter = _get_xml_adapter()\n\n def test_evaluate_registration_accepts_xml_is_aop_and_registered_is_aop(self):\n registered = Mock(spec=models.PidRequesterXML)\n registered.is_aop = True\n\n result = models.PidRequesterXML.evaluate_registration(\n self.xml_adapter, registered\n )\n self.assertTrue(result)\n\n def test_evaluate_registration_accepts_xml_is_not_aop_and_registered_is_aop(self):\n registered = Mock(spec=models.PidRequesterXML)\n registered.is_aop = True\n\n self.xml_adapter = _get_xml_adapter_from_file(\n \"./pid_requester/fixtures/sub-article/2236-8906-hoehnea-49-e1082020.xml\"\n )\n\n result = models.PidRequesterXML.evaluate_registration(\n self.xml_adapter, registered\n )\n self.assertTrue(result)\n\n def test_evaluate_registration_raises_error(self):\n registered = Mock(spec=models.PidRequesterXML)\n registered.is_aop = False\n\n with self.assertRaises(exceptions.ForbiddenPidRequesterXMLRegistrationError):\n result = models.PidRequesterXML.evaluate_registration(\n self.xml_adapter, registered\n )\n\n\n@patch(\"pid_requester.models.PidRequesterXML._get_unique_v2\")\nclass PidRequesterXMLAddV2Test(TestCase):\n def _get_xml_adapter(self, v2=None, v3=None, aop_pid=None):\n v2 = (\n v2\n and f'{v2} '\n or \"\"\n )\n v3 = (\n v3\n and f'{v3} '\n or \"\"\n )\n aop_pid = (\n aop_pid\n and f'{aop_pid} '\n or \"\"\n )\n\n return _get_xml_adapter(\n f\"\"\"\n \n {v2}\n {v3}\n {aop_pid}\n 10.36416/1806-3756/e20220072 \n 01100 \n \n \"\"\"\n )\n\n # TODO\n # def test_add_pid_v2_uses_registered_pid_v2(\n # self,\n # mock_get_unique_v2,\n # ):\n # found = models.PidRequesterXML()\n # found.v2 = \"registered_v2\"\n\n # xml_adapter = self._get_xml_adapter(v2='xml_v2')\n\n # mock_get_unique_v2.return_value = \"generated_v2\"\n\n # models.PidRequesterXML._add_pid_v2(xml_adapter, found)\n # self.assertEqual(\"registered_v2\", xml_adapter.v2)\n\n def test_add_pid_v2_replace_xml_v2_because_its_value_is_invalid_length_is_not_23(\n self,\n mock_get_unique_v2,\n ):\n found = models.PidRequesterXML()\n found.v2 = None\n\n xml_adapter = self._get_xml_adapter(v2=\"bad_size_not_23\")\n\n mock_get_unique_v2.return_value = \"S1806-37132022000201100\"\n\n models.PidRequesterXML._add_pid_v2(xml_adapter, found)\n self.assertEqual(\"S1806-37132022000201100\", xml_adapter.v2)\n\n def test_add_pid_v2_keeps_xml_v2(\n self,\n mock_get_unique_v2,\n ):\n found = models.PidRequesterXML()\n found.v2 = None\n\n xml_adapter = self._get_xml_adapter(v2=\"S1806-37132022000199999\")\n\n mock_get_unique_v2.return_value = \"S1806-37132022000300001\"\n\n models.PidRequesterXML._add_pid_v2(xml_adapter, found)\n self.assertEqual(\"S1806-37132022000199999\", xml_adapter.v2)\n\n def test_add_pid_v2_uses_unique_v2(\n self,\n mock_get_unique_v2,\n ):\n found = models.PidRequesterXML()\n found.v2 = None\n\n xml_adapter = self._get_xml_adapter()\n\n mock_get_unique_v2.return_value = \"S1806-37132022000201100\"\n\n models.PidRequesterXML._add_pid_v2(xml_adapter, found)\n self.assertEqual(\"S1806-37132022000201100\", xml_adapter.v2)\n\n\nclass PidRequesterXMLAddAopPidTest(TestCase):\n def _get_xml_adapter(self, v2=None, v3=None, aop_pid=None):\n v2 = (\n v2\n and f'{v2} '\n or \"\"\n )\n v3 = (\n v3\n and f'{v3} '\n or \"\"\n )\n aop_pid = (\n aop_pid\n and f'{aop_pid} '\n or \"\"\n )\n\n return _get_xml_adapter(\n f\"\"\"\n \n {v2}\n {v3}\n {aop_pid}\n 10.36416/1806-3756/e20220072 \n 01100 \n \n \"\"\"\n )\n\n def test_add_aop_pid_uses_registered_aop_pid(\n self,\n ):\n found = models.PidRequesterXML()\n found.aop_pid = \"12345678901234567890aop\"\n\n xml_adapter = self._get_xml_adapter(aop_pid=\"xml_aop_pid\")\n\n models.PidRequesterXML._add_aop_pid(xml_adapter, found)\n self.assertEqual(\"12345678901234567890aop\", xml_adapter.aop_pid)\n\n def test_add_aop_pid_does_not_replace_by_none(\n self,\n ):\n found = models.PidRequesterXML()\n found.aop_pid = None\n\n xml_adapter = self._get_xml_adapter(aop_pid=\"xml_aop_pid\")\n\n models.PidRequesterXML._add_aop_pid(xml_adapter, found)\n self.assertEqual(\"xml_aop_pid\", xml_adapter.aop_pid)\n\n\n@patch(\"pid_requester.models.PidRequesterXML._is_registered_pid\")\n@patch(\"pid_requester.models.PidRequesterXML._get_unique_v3\")\nclass PidRequesterXMLAddPidV3Test(TestCase):\n def _get_xml_adapter(self, v2=None, v3=None, aop_pid=None):\n v2 = (\n v2\n and f'{v2} '\n or \"\"\n )\n v3 = (\n v3\n and f'{v3} '\n or \"\"\n )\n aop_pid = (\n aop_pid\n and f'{aop_pid} '\n or \"\"\n )\n\n return _get_xml_adapter(\n f\"\"\"\n \n {v2}\n {v3}\n {aop_pid}\n 10.36416/1806-3756/e20220072 \n 01100 \n \n \"\"\"\n )\n\n def test_add_pid_v3_uses_registered_v3(\n self,\n mock__get_unique_v3,\n mock__is_registered_pid,\n ):\n found = models.PidRequesterXML()\n found.v3 = \"123456789012345678901v3\"\n\n xml_adapter = self._get_xml_adapter(v3=\"xml_v3\")\n\n models.PidRequesterXML._add_pid_v3(xml_adapter, found)\n self.assertEqual(\"123456789012345678901v3\", xml_adapter.v3)\n\n def test_add_pid_v3_replaced_by_generated(\n self,\n mock__get_unique_v3,\n mock__is_registered_pid,\n ):\n mock__is_registered_pid.return_value = True\n mock__get_unique_v3.return_value = \"gen456789012345678901v3\"\n\n found = None\n\n xml_adapter = self._get_xml_adapter(v3=\"xml_v3\")\n\n models.PidRequesterXML._add_pid_v3(xml_adapter, found)\n self.assertEqual(\"gen456789012345678901v3\", xml_adapter.v3)\n\n def test_add_pid_v3_keeps_xml_v3(\n self,\n mock__get_unique_v3,\n mock__is_registered_pid,\n ):\n mock__is_registered_pid.return_value = False\n mock__get_unique_v3.return_value = \"gen456789012345678901v3\"\n\n found = None\n\n xml_adapter = self._get_xml_adapter(v3=\"xml456789012345678901v3\")\n\n models.PidRequesterXML._add_pid_v3(xml_adapter, found)\n self.assertEqual(\"xml456789012345678901v3\", xml_adapter.v3)\n\n\n@patch(\n \"pid_requester.models.PidRequesterXML.current_version\",\n new_callable=mock.PropertyMock,\n)\nclass PidRequesterXMLIsEqualToTest(TestCase):\n def test_is_equal_to_returns_false(self, mock_last_version):\n registered = models.PidRequesterXML()\n\n xml_adapter = _get_xml_adapter()\n\n result = registered.is_equal_to(xml_adapter)\n self.assertFalse(result)\n\n def test_is_equal_to_returns_true(self, mock_last_version):\n version = Mock(spec=models.XMLVersion)\n version.finger_print = (\n \"fc676757308ad196fd4cebdbc6d7c1f135a68f6ed0c5d3af5f04075664ef6bb3\"\n )\n\n mock_last_version.return_value = version\n\n xml_adapter = _get_xml_adapter_from_file(\n \"./pid_requester/fixtures/sub-article/2236-8906-hoehnea-49-e1082020.xml\"\n )\n print(xml_adapter.finger_print)\n\n registered = models.PidRequesterXML()\n result = registered.is_equal_to(xml_adapter)\n self.assertTrue(result)\n\n\n@patch(\n \"pid_requester.models.utcnow\",\n side_effect=[datetime(2020, 2, 2, 0, 0), datetime(2020, 2, 3, 0, 0)],\n)\n@patch(\"pid_requester.models.PidRequest.save\")\nclass PidRequesterXMLRegisterTest(TestCase):\n def test_register_register_bad_request_and_returns_error(\n self,\n mock_pid_request_save,\n mock_now,\n ):\n expected = {\n \"result_type\": \"\",\n \"result_msg\": \"No attribute enough for disambiguations {'z_surnames': None, 'z_collab': None, 'main_doi': None, 'z_links': None, 'z_partial_body': None, 'pkg_name': None, 'elocation_id': None, 'journal__issn_print': None, 'journal__issn_electronic': None, 'article_pub_year': None, 'z_article_titles_texts': None}\",\n }\n\n user = User()\n xml_with_pre = _get_xml_with_pre()\n result = models.PidRequesterXML.register(\n xml_with_pre=xml_with_pre,\n filename=\"filename.xml\",\n user=user,\n is_published=False,\n synchronized=None,\n )\n self.assertEqual(expected[\"result_type\"], result[\"result_type\"])\n self.assertIsNotNone(result[\"result_msg\"])\n\n @patch(\"pid_requester.models.PidRequesterXML._is_registered_pid\")\n @patch(\"pid_requester.models.PidRequesterXML.objects.get\")\n @patch(\"pid_requester.models.PidRequesterXML.save\")\n @patch(\"pid_requester.models.SyncFailure.create\")\n @patch(\"pid_requester.models.XMLSPS.save\")\n @patch(\"pid_requester.models.XMLVersion.save\")\n @patch(\"pid_requester.models.XMLIssue.save\")\n @patch(\"pid_requester.models.XMLJournal.save\")\n def test_register_for_xml_zip_was_unable_to_get_pid_from_core(\n self,\n mock_xml_journal_save,\n mock_xml_issue_save,\n mock_xml_version_save,\n mock_xml_sps_save,\n mock_sync_failure_create,\n mock_pid_requester_xml_save,\n mock_pid_requester_xml_objects_get,\n mock_is_registered_pid,\n mock_pid_requester_bad_req_save,\n mock_now,\n ):\n # instancia os dublês\n mock_pid_requester_xml_objects_get.return_value = None\n mock_sync_failure_create.return_value = models.SyncFailure()\n mock_is_registered_pid.return_value = None\n\n items = XMLWithPre.create(\n path=\"./pid_requester/fixtures/sub-article/2236-8906-hoehnea-49-e1082020.xml\"\n )\n items = list(items)\n user = User.objects.first()\n\n result = models.PidRequesterXML.register(\n xml_with_pre=items[0],\n filename=\"filename.xml\",\n user=user,\n is_published=False,\n synchronized=None,\n error_type=\"error_type\",\n error_msg=\"error_msg\",\n traceback=\"traceback\",\n )\n\n result = list(result)\n mock_sync_failure_create.assert_called_once_with(\n \"error_msg\",\n \"error_type\",\n \"traceback\",\n user,\n )\n\n\n@patch(\"pid_requester.models.PidRequesterXML.is_equal_to\")\n@patch(\"pid_requester.models.PidRequesterXML._query_document\")\nclass PidRequesterGetRegistrationDemandTest(TestCase):\n def test_check_registration_demand_requires_none(\n self,\n mock_query_document,\n mock_is_equal_to,\n ):\n mock_is_equal_to.return_value = True\n registered = MagicMock(models.PidRequesterXML)\n registered.synchronized = True\n mock_query_document.return_value = registered\n demand = models.PidRequesterXML.check_registration_demand(ANY)\n\n self.assertIsNotNone(demand[\"registered\"])\n self.assertFalse(demand[\"required_remote_registration\"])\n self.assertFalse(demand[\"required_local_registration\"])\n\n def test_check_registration_demand_local_and_remote_required_for_new_record(\n self,\n mock_query_document,\n mock_is_equal_to,\n ):\n mock_is_equal_to.return_value = False\n mock_query_document.return_value = None\n demand = models.PidRequesterXML.check_registration_demand(ANY)\n\n self.assertDictEqual({}, demand[\"registered\"])\n self.assertTrue(demand[\"required_remote_registration\"])\n self.assertTrue(demand[\"required_local_registration\"])\n\n def test_check_registration_demand_error(\n self,\n mock_query_document,\n mock_is_equal_to,\n ):\n mock_query_document.side_effect = (\n exceptions.NotEnoughParametersToGetDocumentRecordError(\n \"NotEnoughParametersToGetDocumentRecordError\"\n )\n )\n demand = models.PidRequesterXML.check_registration_demand(ANY)\n\n self.assertIsNotNone(demand[\"error_type\"])\n self.assertIsNotNone(demand[\"error_msg\"])\n\n def test_check_registration_demand_local_and_remote_required_for_registered_record(\n self,\n mock_query_document,\n mock_is_equal_to,\n ):\n mock_is_equal_to.return_value = True\n registered = MagicMock(models.PidRequesterXML)\n registered.synchronized = False\n mock_query_document.return_value = registered\n demand = models.PidRequesterXML.check_registration_demand(ANY)\n\n self.assertIsNotNone(demand[\"registered\"])\n self.assertTrue(demand[\"required_remote_registration\"])\n self.assertTrue(demand[\"required_local_registration\"])\n\n def test_check_registration_demand_local_and_remote_required_for_updating_record(\n self,\n mock_query_document,\n mock_is_equal_to,\n ):\n mock_is_equal_to.return_value = False\n registered = MagicMock(models.PidRequesterXML)\n registered.synchronized = False\n mock_query_document.return_value = registered\n demand = models.PidRequesterXML.check_registration_demand(ANY)\n\n self.assertIsNotNone(demand[\"registered\"])\n self.assertTrue(demand[\"required_remote_registration\"])\n self.assertTrue(demand[\"required_local_registration\"])\n","repo_name":"scieloorg/scms-upload","sub_path":"pid_requester/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":29265,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"432954339","text":"#from robot_builder.rbuilder import RobotBuilder\nfrom rob_components import *\nfrom nbot import Robot\nimport time\n\n\nclass RobController(object):\n def build_robot(self, bout=None):\n robot = Robot()\n robot.name = type(self).__name__\n print(\"Building \" + robot.name)\n self.load_modules(robot, bout)\n\n setattr(self, 'get_position', robot.get_position)\n setattr(self, 'get_heading', robot.get_heading)\n setattr(self, 'get_hull', robot.get_hull)\n setattr(self, 'get_velocity', robot.get_velocity)\n\n robot.execute = self.run\n return robot\n\n def load_modules(self, robot, bout):\n print(\"Modules:\")\n for mname, m in self.modules.items():\n m_instance = m()\n print('\\t'+m_instance.name)\n if hasattr(m_instance, \"source\"):\n m_instance.source = robot\n if hasattr(m_instance, \"projectile_cb\"):\n m_instance.projectile_cb = bout.projectile_spawn_cb\n if hasattr(m_instance, \"scanner_cb\"):\n m_instance.scanner_cb = bout.scanner_cb\n if hasattr(m_instance, \"laser_cb\"):\n m_instance.laser_cb = bout.laser_cb\n setattr(robot, mname, m_instance)\n setattr(self, mname, m_instance.gen_interface())\n robot.modules.append(m_instance)\n\n","repo_name":"fostrb/robotfight","sub_path":"rbtest.py","file_name":"rbtest.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1584922757","text":"import myUtilHL as myl\nimport re\nimport copy as cp\n\n# TextGrid output of dict read in by i_tg()\n# (appended if file exists, else from scratch)\n# IN:\n# tg dict\n# f fileName\n# OUT:\n# intoFile\n\n\ndef o_tg(tg, fil):\n h = open(fil, mode='w', encoding='utf-8')\n idt = ' '\n fld = tg_fields()\n # head\n if tg['format'] == 'long':\n h.write(\"File type = \\\"ooTextFile\\\"\\nObject class = \\\"TextGrid\\\"\\n\")\n h.write(\"xmin = {}\\n\".format(tgv(tg['head']['xmin'], 'xmin')))\n h.write(\"xmax = {}\\n\".format(tgv(tg['head']['xmax'], 'xmax')))\n h.write(\"tiers? \\n\")\n h.write(\"size = {}\\n\".format(tgv(tg['head']['size'], 'size')))\n else:\n h.write(\"File type = \\\"ooTextFile\\\"\\nObject class = \\\"TextGrid\\\"\\n\")\n h.write(\"{}\\n\".format(tgv(tg['head']['xmin'], 'xmin')))\n h.write(\"{}\\n\".format(tgv(tg['head']['xmax'], 'xmax')))\n h.write(\"\\n\")\n h.write(\"{}\\n\".format(tgv(tg['head']['size'], 'size')))\n\n # item\n if (tg['format'] == 'long'):\n h.write(\"item []:\\n\")\n\n for i in myl.numkeys(tg['item']):\n # subkey := intervals or points?\n if re.search(tg['item'][i]['class'], 'texttier', re.I):\n subkey = 'points'\n else:\n subkey = 'intervals'\n if tg['format'] == 'long':\n h.write(\"{}item [{}]:\\n\".format(idt, i))\n for f in fld['item']:\n if tg['format'] == 'long':\n if f == 'size':\n h.write(\"{}{}{}: size = {}\\n\".format(\n idt, idt, subkey, tgv(tg['item'][i]['size'], 'size')))\n else:\n h.write(\"{}{}{} = {}\\n\".format(\n idt, idt, f, tgv(tg['item'][i][f], f)))\n else:\n h.write(\"{}\\n\".format(tgv(tg['item'][i][f], f)))\n\n # empty tier\n if subkey not in tg['item'][i]:\n continue\n for j in myl.numkeys(tg['item'][i][subkey]):\n if tg['format'] == 'long':\n h.write(\"{}{}{} [{}]:\\n\".format(idt, idt, subkey, j))\n for f in fld[subkey]:\n if (tg['format'] == 'long'):\n myv = tgv(tg['item'][i][subkey][j][f], f)\n h.write(\n \"{}{}{}{} = {}\\n\".format(idt, idt, idt, f, myv))\n else:\n myv = tgv(tg['item'][i][subkey][j][f], f)\n h.write(\"{}\\n\".format(myv))\n h.close()\n\n\n# returns field names of TextGrid head and items\n# OUT:\n# hol fieldNames\ndef tg_fields():\n return {'head': ['xmin', 'xmax', 'size'],\n 'item': ['class', 'name', 'xmin', 'xmax', 'size'],\n 'points': ['time', 'mark'],\n 'intervals': ['xmin', 'xmax', 'text']}\n\n\n# rendering of TextGrid values\n# IN:\n# s value\n# s attributeName\n# OUT:\n# s renderedValue\ndef tgv(v, a):\n if re.search('(xmin|xmax|time|size)', a):\n return v\n else:\n return \"\\\"{}\\\"\".format(v)\n\n# returns tier subdict from TextGrid\n# IN:\n# tg: dict by i_tg()\n# tn: name of tier\n# OUT:\n# t: dict tier (deepcopy)\n\n\ndef tg_tier(tg, tn):\n if tn not in tg['item_name']:\n return {}\n return cp.deepcopy(tg['item'][tg['item_name'][tn]])\n\n# returns list of TextGrid tier names\n# IN:\n# tg: textgrid dict\n# OUT:\n# tn: sorted list of tiernames\n\n\ndef tg_tn(tg):\n return sorted(list(tg['item_name'].keys()))\n\n# returns tier type\n# IN:\n# t: tg tier (by tg_tier())\n# OUT:\n# typ: 'points'|'intervals'|''\n\n\ndef tg_tierType(t):\n for x in ['points', 'intervals']:\n if x in t:\n return x\n return ''\n\n# returns text field name according to tier type\n# IN:\n# typ: tier type returned by tg_tierType(myTier)\n# OUT:\n# 'points'|<'text'>\n\n\ndef tg_txtField(typ):\n if typ == 'points':\n return 'mark'\n return 'text'\n\n# transforms TextGrid tier to 2 arrays\n# point -> 1 dim + lab\n# interval -> 2 dim (one row per segment) + lab\n# IN:\n# t: tg tier (by tg_tier())\n# opt dict\n# .skip <\"\"> regular expression for labels of items to be skipped\n# if empty, only empty items will be skipped\n# OUT:\n# x: 1- or 2-dim array of time stamps\n# lab: corresponding labels\n# REMARK:\n# empty intervals are skipped\n\n\ndef tg_tier2tab(t, opt={}):\n opt = myl.opt_default(opt, {\"skip\": \"\"})\n if len(opt[\"skip\"]) > 0:\n do_skip = True\n else:\n do_skip = False\n x = myl.ea()\n lab = []\n if 'intervals' in t:\n for i in myl.numkeys(t['intervals']):\n z = t['intervals'][i]\n if len(z['text']) == 0:\n continue\n if do_skip and re.search(opt[\"skip\"], z[\"text\"]):\n continue\n\n x = myl.push(x, [z['xmin'], z['xmax']])\n lab.append(z['text'])\n else:\n for i in myl.numkeys(t['points']):\n z = t['points'][i]\n if do_skip and re.search(opt[\"skip\"], z[\"mark\"]):\n continue\n x = myl.push(x, z['time'])\n lab.append(z['mark'])\n return x, lab\n\n\n# transforms table to TextGrid tier\n# IN:\n# t - numpy 1- or 2-dim array with time info\n# lab - list of labels <[]>\n# specs['class'] <'IntervalTier' for 2-dim, 'TextTier' for 1-dim>\n# ['name']\n# ['xmin'] <0>\n# ['xmax'] \n# ['size'] - will be determined automatically\n# ['lab_pau'] - <''>\n# OUT:\n# dict tg tier (see i_tg() subdict below myItemIdx)\n# for 'interval' tiers gaps between subsequent intervals will be bridged\n# by lab_pau\ndef tg_tab2tier(t, lab, specs):\n tt = {'name': specs['name']}\n nd = myl.ndim(t)\n # 2dim array with 1 col\n if nd == 2:\n nd = myl.ncol(t)\n # tier class\n if nd == 1:\n tt['class'] = 'TextTier'\n tt['points'] = {}\n else:\n tt['class'] = 'IntervalTier'\n tt['intervals'] = {}\n # pause label for gaps between intervals\n if 'lab_pau' in specs:\n lp = specs['lab_pau']\n else:\n lp = ''\n # xmin, xmax\n if 'xmin' not in specs:\n tt['xmin'] = 0\n else:\n tt['xmin'] = specs['xmin']\n if 'xmax' not in specs:\n if nd == 1:\n tt['xmax'] = t[-1]\n else:\n tt['xmax'] = t[-1, 1]\n else:\n tt['xmax'] = specs['xmax']\n # point tier content\n if nd == 1:\n for i in myl.idx_a(len(t)):\n # point tier content might be read as [[x],[x],[x],...]\n # or [x,x,x,...]\n if myl.listType(t[i]):\n z = t[i, 0]\n else:\n z = t[i]\n if len(lab) == 0:\n myMark = \"x\"\n else:\n myMark = lab[i]\n tt['points'][i+1] = {'time': z, 'mark': myMark}\n tt['size'] = len(t)\n # interval tier content\n else:\n j = 1\n # initial pause\n if t[0, 0] > tt['xmin']:\n tt['intervals'][j] = {'xmin': tt['xmin'],\n 'xmax': t[0, 0], 'text': lp}\n j += 1\n for i in myl.idx_a(len(t)):\n # pause insertions\n if ((j-1 in tt['intervals']) and\n t[i, 0] > tt['intervals'][j-1]['xmax']):\n tt['intervals'][j] = {'xmin': tt['intervals'][j-1]['xmax'],\n 'xmax': t[i, 0], 'text': lp}\n j += 1\n if len(lab) == 0:\n myMark = \"x\"\n else:\n myMark = lab[i]\n tt['intervals'][j] = {'xmin': t[i, 0],\n 'xmax': t[i, 1], 'text': myMark}\n j += 1\n # final pause\n if tt['intervals'][j-1]['xmax'] < tt['xmax']:\n tt['intervals'][j] = {'xmin': tt['intervals'][j-1]['xmax'],\n 'xmax': tt['xmax'], 'text': lp}\n j += 1 # so that uniform 1 subtraction for size\n # size\n tt['size'] = j-1\n return tt\n\n# add tier to TextGrid\n# IN:\n# tg dict from i_tg(); can be empty dict\n# tier subdict to be added:\n# same dict form as in i_tg() output, below 'myItemIdx'\n# opt\n# ['repl'] - replace tier of same name\n# OUT:\n# tg updated\n# REMARK:\n# !if generated from scratch head xmin and xmax are taken over from the tier\n# which might need to be corrected afterwards!\n\n\ndef tg_add(tg, tier, opt={'repl': True}):\n\n # from scratch\n if 'item_name' not in tg:\n fromScratch = True\n tg = {'name': '', 'format': 'long', 'item_name': {}, 'item': {},\n 'head': {'size': 0, 'xmin': 0, 'xmax': 0, 'type': 'ooTextFile'}}\n else:\n fromScratch = False\n\n # tier already contained?\n if (opt['repl'] and (tier['name'] in tg['item_name'])):\n i = tg['item_name'][tier['name']]\n tg['item'][i] = tier\n else:\n # item index\n ii = myl.numkeys(tg['item'])\n if len(ii) == 0:\n i = 1\n else:\n i = ii[-1]+1\n tg['item_name'][tier['name']] = i\n tg['item'][i] = tier\n tg['head']['size'] += 1\n\n if fromScratch and 'xmin' in tier:\n for x in ['xmin', 'xmax']:\n tg['head'][x] = tier[x]\n\n return tg\n","repo_name":"shushennn/S_Sprechstil","sub_path":"mld/src/TextGrids.py","file_name":"TextGrids.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26402187568","text":"import os\n\nimport pytest\n\nfrom moulinette import m18n\nfrom moulinette.core import MoulinetteError\nfrom moulinette.utils.filesystem import (\n append_to_file,\n read_file,\n read_json,\n rm,\n write_to_file,\n write_to_json,\n)\n\n\ndef test_read_file(test_file):\n content = read_file(str(test_file))\n assert content == \"foo\\nbar\\n\"\n\n\ndef test_read_file_missing_file():\n bad_file = \"doesnt-exist\"\n\n with pytest.raises(MoulinetteError) as exception:\n read_file(bad_file)\n\n translation = m18n.g(\"file_not_exist\", path=bad_file)\n expected_msg = translation.format(path=bad_file)\n assert expected_msg in str(exception)\n\n\ndef test_read_file_cannot_read_ioerror(test_file, mocker):\n error = \"foobar\"\n\n with mocker.patch(\"__builtin__.open\", side_effect=IOError(error)):\n with pytest.raises(MoulinetteError) as exception:\n read_file(str(test_file))\n\n translation = m18n.g(\"cannot_open_file\", file=str(test_file), error=error)\n expected_msg = translation.format(file=str(test_file), error=error)\n assert expected_msg in str(exception)\n\n\ndef test_read_json(test_json):\n content = read_json(str(test_json))\n assert \"foo\" in content.keys()\n assert content[\"foo\"] == \"bar\"\n\n\ndef test_read_json_cannot_read(test_json, mocker):\n error = \"foobar\"\n\n with mocker.patch(\"json.loads\", side_effect=ValueError(error)):\n with pytest.raises(MoulinetteError) as exception:\n read_json(str(test_json))\n\n translation = m18n.g(\"corrupted_json\", ressource=str(test_json), error=error)\n expected_msg = translation.format(ressource=str(test_json), error=error)\n assert expected_msg in str(exception)\n\n\ndef test_write_to_existing_file(test_file):\n write_to_file(str(test_file), \"yolo\\nswag\")\n assert read_file(str(test_file)) == \"yolo\\nswag\"\n\n\ndef test_write_to_new_file(tmp_path):\n new_file = tmp_path / \"newfile.txt\"\n\n write_to_file(str(new_file), \"yolo\\nswag\")\n\n assert os.path.exists(str(new_file))\n assert read_file(str(new_file)) == \"yolo\\nswag\"\n\n\ndef test_write_to_existing_file_bad_perms(test_file, mocker):\n error = \"foobar\"\n\n with mocker.patch(\"__builtin__.open\", side_effect=IOError(error)):\n with pytest.raises(MoulinetteError) as exception:\n write_to_file(str(test_file), \"yolo\\nswag\")\n\n translation = m18n.g(\"cannot_write_file\", file=str(test_file), error=error)\n expected_msg = translation.format(file=str(test_file), error=error)\n assert expected_msg in str(exception)\n\n\ndef test_write_cannot_write_folder(tmp_path):\n with pytest.raises(AssertionError):\n write_to_file(str(tmp_path), \"yolo\\nswag\")\n\n\ndef test_write_cannot_write_to_non_existant_folder():\n with pytest.raises(AssertionError):\n write_to_file(\"/toto/test\", \"yolo\\nswag\")\n\n\ndef test_write_to_file_with_a_list(test_file):\n write_to_file(str(test_file), [\"yolo\", \"swag\"])\n assert read_file(str(test_file)) == \"yolo\\nswag\"\n\n\ndef test_append_to_existing_file(test_file):\n append_to_file(str(test_file), \"yolo\\nswag\")\n assert read_file(str(test_file)) == \"foo\\nbar\\nyolo\\nswag\"\n\n\ndef test_append_to_new_file(tmp_path):\n new_file = tmp_path / \"newfile.txt\"\n\n append_to_file(str(new_file), \"yolo\\nswag\")\n\n assert os.path.exists(str(new_file))\n assert read_file(str(new_file)) == \"yolo\\nswag\"\n\n\ndef text_write_dict_to_json(tmp_path):\n new_file = tmp_path / \"newfile.json\"\n\n dummy_dict = {\"foo\": 42, \"bar\": [\"a\", \"b\", \"c\"]}\n write_to_json(str(new_file), dummy_dict)\n _json = read_json(str(new_file))\n\n assert \"foo\" in _json.keys()\n assert \"bar\" in _json.keys()\n\n assert _json[\"foo\"] == 42\n assert _json[\"bar\"] == [\"a\", \"b\", \"c\"]\n\n\ndef text_write_list_to_json(tmp_path):\n new_file = tmp_path / \"newfile.json\"\n\n dummy_list = [\"foo\", \"bar\", \"baz\"]\n write_to_json(str(new_file), dummy_list)\n\n _json = read_json(str(new_file))\n assert _json == [\"foo\", \"bar\", \"baz\"]\n\n\ndef test_write_to_json_bad_perms(test_json, mocker):\n error = \"foobar\"\n\n with mocker.patch(\"__builtin__.open\", side_effect=IOError(error)):\n with pytest.raises(MoulinetteError) as exception:\n write_to_json(str(test_json), {\"a\": 1})\n\n translation = m18n.g(\"cannot_write_file\", file=str(test_json), error=error)\n expected_msg = translation.format(file=str(test_json), error=error)\n assert expected_msg in str(exception)\n\n\ndef test_write_json_cannot_write_to_non_existant_folder():\n with pytest.raises(AssertionError):\n write_to_json(\"/toto/test.json\", [\"a\", \"b\"])\n\n\ndef test_remove_file(test_file):\n assert os.path.exists(str(test_file))\n rm(str(test_file))\n assert not os.path.exists(str(test_file))\n\n\ndef test_remove_file_bad_perms(test_file, mocker):\n error = \"foobar\"\n\n with mocker.patch(\"os.remove\", side_effect=OSError(error)):\n with pytest.raises(MoulinetteError) as exception:\n rm(str(test_file))\n\n translation = m18n.g(\"error_removing\", path=str(test_file), error=error)\n expected_msg = translation.format(path=str(test_file), error=error)\n assert expected_msg in str(exception)\n\n\ndef test_remove_directory(tmp_path):\n test_dir = tmp_path / \"foo\"\n test_dir.mkdir()\n\n assert os.path.exists(str(test_dir))\n rm(str(test_dir), recursive=True)\n assert not os.path.exists(str(test_dir))\n","repo_name":"Timost/moulinette","sub_path":"test/test_filesystem.py","file_name":"test_filesystem.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"41148592651","text":"from tkinter import *\r\nfrom pytube import *\r\nfrom pytube import Playlist\r\n \r\n \r\n \r\n \r\nwindow=Tk()\r\n\r\n#Tittle Of The Window\r\nwindow.title(\"Download Youtube Videos\")\r\n#size Of The Window\r\nwindow.geometry('350x200')\r\n\r\n\r\n \r\n#multiple Video i.e PLaylist Action \r\ndef lst():\r\n playlist = Playlist(url.get())\r\n #Shows The Number of Videos Available in The Playlist On The Status Box \r\n number=len(playlist.video_urls)\r\n status='Number of videos in Link:'+str(number)\r\n t1.insert(END,status)\r\n \r\n i=0 #Video Count Initialization\r\n for video in playlist.videos:\r\n video.streams.first().download()\r\n #Video count Iteration\r\n i=i+1\r\n #Shows The Status on The Status Box\r\n status =\"Video Downloaded Successfully:\"+str(i)\r\n t1.insert(END,status)\r\n \r\n#single Video link Action \r\ndef single():\r\n link =url.get()\r\n yt_obj = YouTube(link)\r\n filters = yt_obj.streams.filter(progressive=True, file_extension='mp4')\r\n filters.get_highest_resolution().download()\r\n #Showing Status Of Download To The Status Box\r\n status =\"Video Downloaded Successfully\"\r\n t1.insert(END,status) \r\n \r\n#Download Button\r\nb1=Button(window,text=\"Download\",command=single)\r\nb1.place(x=200,y=20)\r\n\r\n#Playlist Download Button\r\nb1=Button(window,width=35,text=\"Playlist Download\",command=lst)\r\nb1.place(x=10,y=50)\r\n\r\n#Taking Input OF The link in the Entry Box\r\nlbl = Label(window, text=\"Enter The Link\")\r\nurl=StringVar()\r\ne1=Entry(window,width=30,textvariable=url)\r\ne1.place(x=10,y=20)\r\n\r\n#Showing Download Status [Status Box]\r\nt1=Text(window,height=7,width=41)\r\nt1.place(x=10,y=80)\r\n\r\n#Helps To keep up the Tinkter Window Alive\r\nwindow.mainloop()\r\n","repo_name":"binary1Ne/Yt-Downloader","sub_path":"YT downloader.py","file_name":"YT downloader.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18558755376","text":"import cv2\r\nimport numpy as np\r\nfrom gtts import gTTS\r\nimport pyttsx3\r\n\r\nlanguage = 'en'\r\nengine = pyttsx3.init()\r\n\r\n\r\n# loading yolo algorithm\r\nnet = cv2.dnn.readNet(\"yolov3.weights\", \"yolov3.cfg\")\r\nprint(net)\r\nclasses = []\r\nwith open(\"yolov3.txt\", \"r\") as f:\r\n\tclasses = [line.strip() for line in f.readlines()]\r\n# Reading layers`\r\nlayer_names = net.getLayerNames()\r\noutput_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]\r\n\r\ncap = cv2.VideoCapture(1)\r\n# loading the image\r\nwhile True:\r\n ret, frame = cap.read()\r\n boxes = []\r\n class_ids = []\r\n confidences = []\r\n centers = []\r\n type(frame)\r\n height, width, channels = frame.shape\r\n blob = cv2.dnn.blobFromImage(\r\n frame, 0.00392, (416, 416), (0, 0, 0), True, False)\r\n for b in blob:\r\n for n, img_blob in enumerate(b):\r\n n = 1\r\n net.setInput(blob)\r\n outs = net.forward(output_layers)\r\n\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n confidence = scores[class_id]\r\n if confidence > 0:\r\n cx = int(detection[0] * width)\r\n cy = int(detection[1] * height)\r\n w = int(detection[2] * width)\r\n h = int(detection[3] * height)\r\n x = int(cx - w / 2)\r\n y = int(cy - h / 2)\r\n boxes.append([x, y, w, h])\r\n confidences.append(float(confidence))\r\n class_ids.append(class_id)\r\n\r\n font = cv2.FONT_HERSHEY_PLAIN\r\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\r\n print(indexes)\r\n for i in range(len(boxes)):\r\n \tif i in indexes:\r\n x, y, w, h = boxes[i]\r\n label = classes[class_ids[i]]\r\n cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (0, 255, 0), 2)\r\n cv2.putText(frame, label, (x, y + 30), font, 2, (255, 0, 255), 2)\r\n myobj = gTTS(text=label, lang=language, slow=False)\r\n engine.say(label)\r\n engine.runAndWait()\r\n cv2.imshow(\"Image\", frame)\r\n if cv2.waitKey(20) & 0xFF == ord(\" \"):\r\n exit(0)\r\n","repo_name":"Anay-45/Real-time-object-detection-for-blind-people-with-voice-feedback","sub_path":"yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15039406925","text":"from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer\n\ntokenizer = GPT2Tokenizer.from_pretrained('gpt2')\nmodel = GPT2LMHeadModel.from_pretrained('gpt2')\n\ngenerator = pipeline('text-generation', model=model, tokenizer=tokenizer)\n\ndef generate_text(input : str, max_length : int = 100 ) -> str:\n output = generator(input, max_length=max_length, num_return_sequences=5)\n \n result = output[0]['generated_text']\n last_dot = result.rfind('.')\n return result[:(last_dot+1)]","repo_name":"hecastro-epitech/GreatPath","sub_path":"api/gpt2.py","file_name":"gpt2.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8446133530","text":"# -*- codeing = utf-8 -*-\nimport os\nimport random\n\nfrom PIL import Image\nfrom faker import Faker\nfrom flask import current_app\nfrom sqlalchemy.exc import IntegrityError\n\nfrom albumy.extensions import db\nfrom albumy.models import User, Photo, Tag, Comment\n\nfake = Faker()\n\n\ndef fake_admin():\n # 创建管理员账户\n admin = User(name='',\n username='',\n email='',\n bio=fake.sentence(),\n website=fake.url(),\n confirmed=True)\n admin.set_password('12345678')\n db.session.add(admin)\n db.session.commit()\n\n\ndef fake_user(count=10):\n # 创建普通用户\n for i in range(count):\n user = User(name=fake.name(),\n confirmed=True,\n username=fake.user_name(),\n bio=fake.sentence(),\n location=fake.city(),\n website=fake.url(),\n member_since=fake.date_this_decade(),\n email=fake.email())\n user.set_password('123456')\n db.session.add(user)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef fake_follow(count=30):\n # 创建用户之间的关注关系\n for i in range(count):\n user = User.query.get(random.randint(1, User.query.count()))\n user.follow(User.query.get(random.randint(1, User.query.count())))\n db.session.commit()\n\n\ndef fake_tag(count=20):\n # 创建标签\n for i in range(count):\n tag = Tag(name=fake.word())\n db.session.add(tag)\n try:\n db.session.commit()\n # IntegrityError作用:当数据库中已经存在相同的数据时,会报错\n except IntegrityError:\n db.session.rollback()\n\n\ndef fake_photo(count=30):\n # 创建图片\n upload_path = current_app.config['ALBUMY_UPLOAD_PATH']\n for i in range(count):\n filename = 'random_%d.jpg' % i\n # lambda函数在每次调用r时都生成一个随机数\n r = lambda: random.randint(128, 255)\n img = Image.new(mode='RGB', size=(800, 800), color=(r(), r(), r()))\n img.save(os.path.join(upload_path, filename))\n \n photo = Photo(\n description=fake.text(),\n filename=filename,\n filename_m=filename,\n filename_s=filename,\n author=User.query.get(random.randint(1, User.query.count())),\n timestamp=fake.date_time_this_year()\n )\n \n # tags\n for j in range(random.randint(1, 5)):\n tag = Tag.query.get(random.randint(1, Tag.query.count()))\n if tag not in photo.tags:\n photo.tags.append(tag)\n \n db.session.add(photo)\n db.session.commit()\n\n\ndef fake_collect(count=50):\n # 创建收藏\n for i in range(count):\n user = User.query.get(random.randint(1, User.query.count()))\n user.collect(Photo.query.get(random.randint(1, Photo.query.count())))\n db.session.commit()\n\n\ndef fake_comment(count=100):\n # 创建评论\n for i in range(count):\n comment = Comment(\n author=User.query.get(random.randint(1, User.query.count())),\n body=fake.sentence(),\n timestamp=fake.date_time_this_year(),\n photo=Photo.query.get(random.randint(1, Photo.query.count()))\n )\n db.session.add(comment)\n db.session.commit()\n","repo_name":"TTTTTOMGQ/MyAlbumy","sub_path":"albumy/fakes.py","file_name":"fakes.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24889675822","text":"# Definicao da classe que ira representar o programa em memoria\n\nfrom token import NUMBER\nfrom isiVariable import IsiVariable\nfrom isiSymbolTable import IsiSymbolTable\nimport os\n\nclass AbstractCommand():\n \n def generatePythonCode(self, fIndent=\"\"):\n pass\n\n\nclass ReadCommand(AbstractCommand):\n\n def __init__(self, id: str, var: IsiVariable):\n self._identificador = id\n self._var = var # util para gerar codigo em C / Java, ja que sera necessario definir tipo de read\n\n def __str__(self):\n return \"Read Command[value = {}]\\n\".format(self._identificador)\n\n def generatePythonCode(self, fIndent=\"\"):\n \n if (self._var.getType() == IsiVariable.NUMBER):\n return fIndent + \"{} = float(input())\\n\".format(self._identificador)\n elif (self._var.getType() == IsiVariable.BOOL):\n return fIndent + \"{} = bool(input())\\n\".format(self._identificador)\n else:\n return fIndent + \"{} = input()\\n\".format(self._identificador)\n\n def generateCCode(self, fIndent=\"\"):\n if (self._var.getType() == IsiVariable.NUMBER):\n return fIndent + \"scanf(\\\"%f\\\", &{});\\n\".format(self._identificador)\n elif (self._var.getType() == IsiVariable.BOOL):\n return fIndent + \"scanf(\\\"%d\\\", &{});\\n\".format(self._identificador) # ler como int, pois C na otem booleano!\n else:\n return fIndent + \"scanf(\\\"%s\\\", &{});\\n\".format(self._identificador) # avaliar necessidade do operador & \n \n\nclass WriteCommand(AbstractCommand):\n\n def __init__(self, id: str, type=None):\n self._identificador = id\n self._type = type\n\n def __str__(self):\n return \"Write Command[value = {}]\\n\".format(self._identificador)\n\n def generatePythonCode(self, fIndent=\"\"):\n\n return fIndent + \"print({})\\n\".format(self._identificador)\n\n def generateCCode(self, fIndent=\"\"):\n if (self._type == IsiVariable.NUMBER):\n #print(\"transpilando escrita de numero para C\")\n return fIndent + \"printf(\\\"%f\\\", {});\\n\".format(self._identificador)\n elif (self._type == IsiVariable.BOOL):\n #print(\"transpilando escrita de bool para C\")\n return fIndent + \"printf(\\\"%d\\\", {});\\n\".format(self._identificador) # imprimir como int, pois C na otem booleano!\n else:\n #print(\"transpilando escrita de string para C\")\n return fIndent + \"printf({});\\n\".format(self._identificador)\n \n\n\nclass AttribCommand(AbstractCommand):\n\n def __init__(self, id: str, expr):\n self._identificador = id\n self._expr = expr\n\n if(expr == \"verdadeiro\"):\n self._expr = \"True\"\n if(expr == \"falso\"):\n self._expr = \"False\"\n\n def __str__(self):\n return \"Attribuition Command[id = {}, expr = {}]\\n\".format(self._identificador, self._expr)\n\n def generatePythonCode(self, fIndent=\"\"):\n\n return fIndent + self._identificador + \" = \" + self._expr + \"\\n\"\n\n def generateCCode(self, fIndent=\"\"):\n exprClean = str(self._expr)\n\n if(exprClean.find(\"%\") > -1):\n return fIndent + self._identificador + \" = \" + \"(int){}%(int){};\\n\".format(exprClean.split(\"%\")[0], exprClean.split(\"%\")[1])\n elif(exprClean.find(\"**\") > -1):\n return fIndent + self._identificador + \" = \" + \"pow({}, {});\\n\".format(exprClean.split(\"**\")[0], exprClean.split(\"**\")[1])\n elif(exprClean.find(\"\\\"\") > -1): # atrib de strings, precisamos adaptar o uso de strcopy em C\n return fIndent + \"strcpy({}, {});\\n\".format(self._identificador, exprClean)\n else:\n return fIndent + self._identificador + \" = \" + exprClean + \";\\n\"\n\n\nclass DecisionCommand(AbstractCommand):\n\n def __init__(self, condition : str, tlist, flist):\n self._condition = condition\n self._trueList = tlist\n self._falseList = flist\n\n self._condition = self._condition.replace('verdadeiro', 'True')\n self._condition = self._condition.replace('falso', 'False') \n\n def __str__(self):\n tlistText = [x.__str__() for x in self._trueList]\n flistText = [x.__str__() for x in self._falseList]\n return \"Decision Command[ condition = {}\\n\\ntrueList = {}\\nfalseList = {}]\\n\".format(self._condition, \"\".join(tlistText), \"\".join(flistText))\n\n\n def generatePythonCode(self, fIndent=\"\"):\n\n decisiontxt = []\n indent = \" \"\n\n decisiontxt.append(\"{}if({}):\\n\".format(fIndent, self._condition))\n #print(\"len de fIndent no if ({})= {}\".format(self._condition, len(fIndent)))\n for x in self._trueList:\n #decisiontxt.append(fIndent + indent + x.generatePythonCode())\n decisiontxt.append(x.generatePythonCode(fIndent + indent))\n\n \n if(len(self._falseList ) != 0):\n #print(\"len de fIndent no else ({})= {}\".format(self._condition, len(fIndent)))\n decisiontxt.append(\"{}else:\\n\".format(fIndent))\n for x in self._falseList:\n #decisiontxt.append(fIndent + indent + x.generatePythonCode())\n decisiontxt.append(x.generatePythonCode(fIndent + indent))\n\n return \"\".join(decisiontxt)\n\n def generateCCode(self, fIndent=\"\"):\n \n decisiontxt = []\n indent = \" \"\n\n decisiontxt.append(\"{}if({}){{\\n\".format(fIndent, self._condition))\n #print(\"len de fIndent no if ({})= {}\".format(self._condition, len(fIndent)))\n for x in self._trueList:\n #decisiontxt.append(fIndent + indent + x.generatePythonCode())\n decisiontxt.append(x.generateCCode(fIndent + indent))\n\n \n if(len(self._falseList ) != 0):\n #print(\"len de fIndent no else ({})= {}\".format(self._condition, len(fIndent)))\n decisiontxt.append(\"{}}}else{{\\n\".format(fIndent))\n for x in self._falseList:\n #decisiontxt.append(fIndent + indent + x.generatePythonCode())\n decisiontxt.append(x.generateCCode(fIndent + indent))\n\n decisiontxt.append(\"{}}}\\n\".format(fIndent))\n\n return \"\".join(decisiontxt)\n\n\nclass WhileCommand(AbstractCommand):\n\n def __init__(self, condition : str, clist):\n self._condition = condition\n self._cmdList = clist\n \n\n def __str__(self):\n clistText = [x.__str__() for x in self._cmdList]\n \n return \"While Command[ condition = {}\\n\\nCommands List:\\n{}\\n]\".format(self._condition, \"\".join(clistText))\n\n def generatePythonCode(self, fIndent=\"\"):\n \n whiletxt = []\n\n indent = \" \"\n\n whiletxt.append(\"{}while({}):\\n\".format(fIndent, self._condition))\n\n for x in self._cmdList:\n #whiletxt.append(fIndent + indent + x.generatePythonCode())\n whiletxt.append(x.generatePythonCode(fIndent + indent))\n\n return \"\".join(whiletxt)\n\n\n def generateCCode(self, fIndent=\"\"):\n\n whiletxt = []\n\n indent = \" \"\n\n whiletxt.append(\"{}while({}){{\\n\".format(fIndent, self._condition))\n\n for x in self._cmdList:\n #whiletxt.append(fIndent + indent + x.generatePythonCode())\n whiletxt.append(x.generateCCode(fIndent + indent))\n\n whiletxt.append(\"{}}}\\n\".format(fIndent))\n\n return \"\".join(whiletxt)\n\n \n\nclass IsiProgram():\n\n def __init__(self):\n self._varTable = IsiSymbolTable()\n self._comandos: AbstractCommand = []\n self._name = None\n\n\n def getVarTable(self):\n return self._varTable\n\n def setVarTable(self, vt: IsiSymbolTable):\n self._varTable = vt\n\n def getCommands(self):\n return self._comandos\n\n def setCommands(self, cmds):\n self._comandos = cmds\n\n def getProgramName(self):\n return self._name\n\n def setProgramName(self, name: str):\n self._name = name\n\n\n def __str__(self):\n programa = []\n for x in self._comandos:\n programa.append(\"Comando = {}\\n\".format(x))\n return \"\".join(programa)\n\n def generatePyTarget(self, outputname=\"stdOutput.py\"):\n\n codigoTranspilado = []\n # para indentar corretamente o codigo\n indent = \" \"\n\n codigoTranspilado.append(\"def main():\\n\\n\")\n\n # talvez precise passar a indentacao para o generate...\n for x in self._varTable._hashTable.values():\n codigoTranspilado.append( x.generatePythonCode(indent))\n\n for y in self._comandos:\n codigoTranspilado.append( y.generatePythonCode(indent))\n\n codigoTranspilado.append(\"\\nif __name__ == \\\"__main__\\\":\\n\")\n codigoTranspilado.append(\" main()\\n\\n\")\n\n resultado = \"\".join(codigoTranspilado)\n\n # cria arquivo com o codigo final\n filename = \"results/\" + outputname\n\n print(\"Resultado salvo em: {}\\n\\n\".format(filename))\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n with open(filename, \"w\") as f:\n f.write(resultado)\n\n return resultado\n\n def generateCTarget(self, outputname=\"stdOutput.c\"):\n\n codigoTranspilado = []\n # para indentar corretamente o codigo \n indent = \" \"\n\n codigoTranspilado.append(\"#include \\n\")\n codigoTranspilado.append(\"#include \\n\")\n codigoTranspilado.append(\"#include \\n\\n\")\n codigoTranspilado.append(\"int main(){\\n\\n\")\n\n codigoTranspilado.append(indent + \"int True = 1;\\n\")\n codigoTranspilado.append(indent + \"int False = 1;\\n\\n\")\n\n # talvez precise passar a indentacao para o generate...\n for x in self._varTable._hashTable.values():\n codigoTranspilado.append( x.generateCCode(indent))\n\n for y in self._comandos:\n codigoTranspilado.append( y.generateCCode(indent))\n\n \n codigoTranspilado.append(\"\\n\\n return 0;\\n\\n\")\n codigoTranspilado.append(\"}\\n\")\n\n\n resultado = \"\".join(codigoTranspilado)\n\n # cria arquivo com o codigo final\n filename = \"results/\" + outputname\n\n print(\"Resultado salvo em: {}\\n\\n\".format(filename))\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n with open(filename, \"w\") as f:\n f.write(resultado)\n\n return resultado\n\n","repo_name":"edumacsou/isilanguage-compiler","sub_path":"app/isiProgram.py","file_name":"isiProgram.py","file_ext":"py","file_size_in_byte":10306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"23543903851","text":"def solve(s,k):\n n = 0\n while True:\n try:\n first_minus = s.index('-')\n except:\n first_minus = len(s)\n s = s[first_minus:]\n if s == []:\n return n\n if len(s) < k and len(s) > 0:\n return None\n else:\n for i in range(len(s)):\n if i < k:\n s[i] = '-' if s[i] == '+' else '+'\n n += 1\n\nT = int(input())\nfor i in range(1,T+1):\n s, k = input().split(' ')\n k = int(k)\n s = list(s)\n res = solve(s,k)\n if res == None:\n print(\"Case #\"+str(i)+\": \"+\"IMPOSSIBLE\")\n else:\n print(\"Case #\"+str(i)+\": \"+str(res))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2593.py","file_name":"2593.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2024632783","text":"import unittest\n\n\ndef sum(array):\n \"\"\"\n This function will display the sum of all elements added together.\n Input: array (list) array of numbers\n Output: return the sum of all numbers in array.\n \"\"\"\n\n sumValue = 0\n for element in range(0,len(array)): \n sumValue += array[element]\n return sumValue\n\ndef calculateTax(income):\n \"\"\"\n This function calculate tax value based on income bracket.\n Those income between 0 and 10,000 will pay 0%\n 10,001 - 40,000 will pay 5%\n 40,001 - 50,000 will pay 10%\n 50,001 - 70,000 will pay 15%\n above 70,000 will pay 20%\n\n Input: income (int) \n Output: return the amount of tax needs to be paid\n \"\"\"\n\n if (income > 0 and income <= 10000) or (income <= 0):\n return 0\n elif income > 10000 and income <= 40000:\n return 0.05 * income\n elif income > 40000 and income <= 50000:\n return 0.1 * income\n elif income > 50000 and income <= 70000:\n return 0.15 * income\n else:\n return 0.2 * income\n\nclass Test(unittest.TestCase): # This can be any name!!\n def testcase1(self): # This can be any name!!\n self.assertEquals(sum([1,3,5,7]), 16)\n # self.assertTrue(sum([1,3,5,7]) == 16)\n # self.assertFalse(sum([1,3,5,7]) != 16)\n\n self.assertEquals(sum([2,4,6,8]), 20)\n self.assertEquals(sum([10,50,30,10]), 100)\n\n\n\n\n\n\n\n self.assertTrue(calculateTax(5000) == 0)\n self.assertEquals(calculateTax(10000), 0)\n self.assertTrue(calculateTax(17000) == 850)\n self.assertEquals(calculateTax(40000), 2000)\n self.assertEquals(calculateTax(-1000), 0)\n self.assertTrue(calculateTax(0) == 0)\n self.assertTrue(calculateTax(-40000) == 0)\n\n\nunittest.main()\n\n\n\n\n\n\n\n\n\n\n\n# with open(\"output.txt\", \"w\") as file:\n# for i in range(0,100000):\n# if i == 24857 or i == 9848 or i == 12345:\n# continue\n# file.write(str(i))\n# file.write(\"\\n\")\n\n# import random\n\n# with open(\"output2.txt\", \"w\") as file:\n# for i in range(0,99999):\n# out = []\n# if i == 48563 or i == 28493:\n# for i in range(0,6):\n# out.append(random.randint(0,100))\n# elif i == 193:\n# for i in range(0,3):\n# out.append(random.randint(101,200))\n# elif i == 1928 or i == 93855:\n# for i in range(0, 5):\n# out.append(random.randint(101,200))\n# else:\n# for i in range(0,5):\n# out.append(random.randint(0,100))\n \n# for i in range(0,len(out)):\n# file.write(str(out[i]))\n# if i == len(out) - 1:\n# file.write(\"\\n\")\n# else:\n# file.write(\",\")\n \n \n# print(count)\n","repo_name":"CNK-THA/InTech","sub_path":"Automated Unit Testing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15142907362","text":"data = open('input1.txt','r').read().split('\\n')\n# echo data = data[0]\nprint(data[0])\n\ndata = list(map(list, zip(*data)))\n# print(len(data))\n\nimport collections\ndata = [collections.Counter(x) for x in data]\nprint(data)\n\nepsilon = [c.most_common(2)[0][0] for c in data]\nepsilon = ''.join(epsilon)\nepsilon = int(epsilon, 2)\n\n\ngamma = [c.most_common(2)[1][0] for c in data]\ngamma = ''.join(gamma)\ngamma = int(gamma, 2)\nprint(epsilon, gamma)\nprint(gamma * epsilon)","repo_name":"cmollgaard/AdventOfCode","sub_path":"2021/3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33044359649","text":"import os\nimport re\n\nfrom setuptools import setup\n\nimport shutil\n\nlong_description = \"\"\"\nThe script attempts to detect thrashing situations and temporary stop rogue processes, \nhopefully before things get too much out of control, hopefully giving a sysadm enough time \nto investigate and handle the situation if there is a sysadm around, and if not - hopefully \nallowing boxes to become just slightly degraded instead of completely thrashed, all until the offending \nprocesses ends or the oom killer kicks in.\n\nAs of 2014-09, the development seems to have stagnated - for the very simple reason that \nit seems to work well enough for me.\n\"\"\"\n\nmodule = 'thrash_protect'\nbuild = '_build'\n\nbasedir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(basedir)\n\nif not os.path.exists(build):\n os.mkdir(build)\n\nshutil.copy('thrash-protect.py', '%s/%s.py' % (build, module))\n\nwith open(os.path.join(basedir, 'thrash-protect.py')) as f:\n _moduletext = f.read()\n\ndef readmeta(fieldname):\n return re.search(r'__%s__\\s*=\\s*\"(.*)\"' % re.escape(fieldname), _moduletext).group(1).strip()\n\nsetup(\n name='thrash-protect',\n version=readmeta('version'),\n description='Simple-Stupid user-space program doing \"kill -STOP\" and \"kill -CONT\" to protect from thrashing',\n long_description=long_description.strip(),\n license='GPLv3+',\n url='https://github.com/tobixen/thrash-protect',\n\n author=readmeta('author'),\n author_email=readmeta('email'),\n\n package_dir={'': build},\n\n py_modules=[module],\n zip_safe=False,\n include_package_data=True,\n\n extras_require=dict(\n build=['twine', 'wheel', 'setuptools-git'],\n # test=['pytest', 'testfixtures', 'pytest-cov'],\n ),\n\n entry_points={\n \"console_scripts\": ['thrash-protect=%s:main' % module]\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\",\n \"Topic :: Utilities\",\n \"Topic :: System :: Software Distribution\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.5\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.0\",\n \"Programming Language :: Python :: 3.1\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n ],\n # setup_requires=['pytest-runner'],\n # tests_require=['pytest-cov', 'pytest', 'testfixtures'],\n\n)\nos.chdir(basedir)\nshutil.rmtree(build, ignore_errors=True)\n","repo_name":"tobixen/thrash-protect","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"61"}
+{"seq_id":"1639570102","text":"# python3\n\nclass Buffer:\n\n def __init__(self, size):\n self.size = size\n self.end_time = []\n\n def delete_processed(self, request):\n \"\"\"deletes processed elements of the buffer by the request's arrival time.\"\"\"\n while self.end_time:\n if self.end_time[0] <= request.arrival_time:\n self.end_time.pop(0)\n else:\n break\n\n def process(self, request):\n \"\"\"Processes incoming request.\"\"\"\n self.delete_processed(request)\n\n if len(self.end_time) == self.size: #if buffer is full\n return Response(True, -1)\n\n if len(self.end_time) == 0: #if buffer is empty\n self.end_time = [request.arrival_time + request.process_time]\n return Response(False, request.arrival_time)\n\n response = Response(False, self.end_time[-1])\n self.end_time.append(self.end_time[-1] + request.process_time)\n return response\n\n\nclass Request:\n \"\"\"Incoming network packet.\"\"\"\n\n def __init__(self, arrival_time, process_time):\n self.arrival_time = arrival_time\n self.process_time = process_time\n\n\nclass Response:\n \"\"\"Response of the network buffer.\"\"\"\n\n def __init__(self, dropped, start_time):\n self.dropped = dropped\n self.start_time = start_time\n\n\ndef read_requests(count):\n requests = []\n for i in range(count):\n arrival_time, process_time = map(int, input().strip().split())\n requests.append(Request(arrival_time, process_time))\n return requests\n\n\ndef process_requests(requests, buffer):\n return [buffer.process(r) for r in requests]\n\n\ndef print_responses(responses):\n for response in responses:\n print(response.start_time if not response.dropped else -1)\n\n\nif __name__ == \"__main__\":\n size, count = map(int, input().strip().split())\n requests = read_requests(count)\n buffer = Buffer(size)\n responses = process_requests(requests, buffer)\n print_responses(responses)\n","repo_name":"amogchandrashekar/Data-Structures-and-Algorithms-Specialization","sub_path":"Data Structures/week1_Basic_Data_Structures/process_packages.py","file_name":"process_packages.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"43032176442","text":"from turtle import Turtle \n\nfile = open('hi.txt', 'r')\ncont = file.read()\nif cont.isnumeric():\n p = int(cont) \nfile.close()\n\nclass Score(Turtle):\n \n def __init__(self):\n super().__init__()\n self.shape('circle')\n self.hideturtle()\n self.clear()\n self.penup()\n self.pencolor('white')\n self.score = 0\n self.hscore = p\n self.goto(0, 170)\n self.write(f'Score: {self.score}. Highest score: {self.hscore}', move = False, align = 'center', font=(\"Arial\",15,\"normal\"))\n \n \n def up(self):\n self.clear()\n self.score += 1\n self.write(f'Score: {self.score}. Highest score: {self.hscore}', move = False, align = 'center', font=(\"Arial\",15,\"normal\"))\n \n \n def resset(self):\n if self.score > self.hscore:\n self.hscore = self.score\n file = open('hi.txt', 'w')\n file.write(str(self.hscore))\n file.close()\n self.score = 0 \n self.clear()\n self.write(f'Score: {self.score}. Highest score: {self.hscore}', move = False, align = 'center', font=(\"Arial\",15,\"normal\"))\n\n ","repo_name":"OmarMahmoud2/snakeGame","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12641457527","text":"from framework.base import BasePage\nfrom selenium.webdriver.common.by import By\nfrom framework.logger import Logger\n\nligger=Logger(logger=\"FourDiscuz\").getlog()\n\nclass FourDiscuz(BasePage):\n fatie_button_newtie_loc=(By.ID,'newspecial')\n toupiao_button_newtie_loc=(By.CSS_SELECTOR,'#newspecial_menu > li.poll > a')\n title_input_newtie_loc=(By.NAME,'subject')\n body1_input_newtie_loc=(By.CSS_SELECTOR,'#pollm_c_1 > p:nth-child(1) > input')\n body2_input_newtie_loc=(By.CSS_SELECTOR,'#pollm_c_1 > p:nth-child(2) > input')\n body3_inout_newtie_loc=(By.CSS_SELECTOR,'#pollm_c_1 > p:nth-child(3) > input')\n submit_button_newtie_loc=(By.ID,'postsubmit')\n\n toupiao_button_toupiao_loc=(By.ID,'option_1')\n submit_button_toupiao_loc=(By.NAME,'pollsubmit')\n\n title_text_toupiaodata_loc=(By.ID,'thread_subject')\n data1_join_loc=(By.CSS_SELECTOR,'td.pvt label')\n data2_join_loc=(By.CSS_SELECTOR,'div.pcbs form tr > td:nth-child(2)')\n\n\n # 发表投票帖子\n def newtie(self,title,body1,body2,body3):\n self.move_to_element(*self.fatie_button_newtie_loc)\n self.click(*self.toupiao_button_newtie_loc)\n self.current_window_handle()\n self.sendkeys(title,*self.title_input_newtie_loc)\n self.sendkeys(body1,*self.body1_input_newtie_loc)\n self.sendkeys(body2,*self.body2_input_newtie_loc)\n self.sendkeys(body3,*self.body3_inout_newtie_loc)\n self.click(*self.submit_button_newtie_loc)\n\n # 进行投票\n def toupiao(self):\n self.click(*self.toupiao_button_toupiao_loc)\n self.click(*self.submit_button_toupiao_loc)\n\n # 取出投票各个选项的名称以及投票比例,取出投票的主题名称\n def toupiaodata(self):\n self.current_window_handle()\n loc_title=self.find_element(*self.title_text_toupiaodata_loc)\n title=self.text(loc_title)\n print(\"投票的主题名称:\",title)\n data1_list=self.find_elements(*self.data1_join_loc)\n data2_list=self.find_elements(*self.data2_join_loc)\n for i in range(0,len(data1_list)):\n print(\"选项的名称:\",self.text(data1_list[i]),\"投票比例\",self.text(data2_list[i*2+1]))\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"huizi1998/webUI_autoTest","sub_path":"pageobjects/discuz04.py","file_name":"discuz04.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28340623083","text":"def lists(list1):\n sublist=[[]]\n for i in range(len(list1) + 1):\n for j in range(i+1,len(list1) + 1):\n s=list1[i:j]\n sublist.append(s)\n\n return sublist\nl=[1, 2, 3,4,5]\nprint(lists(l))","repo_name":"Padmajaya123/simple-python-programs","sub_path":"sublist.py","file_name":"sublist.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34006007874","text":"import tkinter as tk\r\nimport tkinter.ttk as ttk\r\nimport open3d as o3d\r\nimport numpy as np\r\nfrom tkinter import filedialog,IntVar,messagebox\r\nimport os\r\nimport simplejson as json\r\nimport xlsxwriter\r\nimport time\r\n\r\n\r\n\r\nclass NewprojectApp:\r\n def __init__(self, master=None):\r\n # build ui\r\n self.PointCloudApp = ttk.Frame(master)\r\n self.label_1 = ttk.Label(self.PointCloudApp)\r\n self.label_1.configure(anchor='w', font='{Helvetica} 14 {}', text='Welcome to the Point Cloud Application.')\r\n self.label_1.grid(columnspan='5')\r\n self.entry_1 = ttk.Entry(self.PointCloudApp)\r\n self.entry_1.grid(ipadx='30', row='2')\r\n self.entry_11 = ttk.Entry(self.PointCloudApp)\r\n self.button_1 = ttk.Button(self.PointCloudApp, command=lambda:self.fileopen(self.entry_1,self.entry_11))\r\n self.button_1.configure(text='Select File')\r\n self.button_1.grid(column='1', padx='2', row='2')\r\n self.button_2 = ttk.Button(self.PointCloudApp, command=lambda:self.clear_text(self.entry_1,self.entry_11))\r\n self.button_2.configure(text='Remove')\r\n self.button_2.grid(column='2', padx='2', row='2')\r\n self.label_2 = ttk.Label(self.PointCloudApp)\r\n self.label_2.configure(font='{Times} 11 {}', text='Please Select the Point Cloud file.')\r\n self.label_2.grid(row='1', sticky='w')\r\n self.button_3 = ttk.Button(self.PointCloudApp, command =self.view_point_cloud)\r\n self.button_3.configure(state='disabled', text='View Point Cloud')\r\n self.button_3.grid(column='1', columnspan='2', ipadx='28', pady='5', row='3')\r\n self.separator_1 = ttk.Separator(self.PointCloudApp)\r\n self.separator_1.configure(orient='horizontal')\r\n self.separator_1.grid(column='0', columnspan='3', ipadx='180', pady='5', row='4')\r\n self.label_3 = ttk.Label(self.PointCloudApp)\r\n self.label_3.configure(font='{Times} 11 {}', text='Compute distance between two points')\r\n self.label_3.grid(column='0', row='5', sticky='w')\r\n self.entry_2 = ttk.Entry(self.PointCloudApp)\r\n self.var2 = tk.IntVar()\r\n self.entry_2.configure(textvariable=self.var2)\r\n self.entry_2.grid(column='0', ipadx='30', row='6')\r\n self.entry_2.rowconfigure('6', minsize='0')\r\n self.label_4 = ttk.Label(self.PointCloudApp)\r\n self.label_4.configure(font='{Times} 10 {}', text='and')\r\n self.label_4.grid(column='0', row='7')\r\n self.entry_3 = ttk.Entry(self.PointCloudApp)\r\n self.var3 = tk.IntVar()\r\n self.entry_3.configure(textvariable=self.var3)\r\n self.entry_3.grid(column='0', ipadx='30', row='8')\r\n self.button_4 = ttk.Button(self.PointCloudApp)\r\n self.button_4.configure(text='Measure', command=self.measure_distance)\r\n self.button_4.grid(column='1', columnspan='2', ipadx='10', ipady='10', row='6', rowspan='2')\r\n self.text_1 = tk.Text(self.PointCloudApp)\r\n self.var4 = tk.DoubleVar()\r\n self.text_1.configure( height='0', width='10')\r\n self.text_1.grid(column='0', pady='5', row='9')\r\n self.label_5 = ttk.Label(self.PointCloudApp)\r\n self.label_5.configure(text='Distance: ')\r\n self.label_5.grid(column='0', padx='5', pady='5', row='9', sticky='w')\r\n self.label_6 = ttk.Label(self.PointCloudApp)\r\n self.label_6.configure(text='mm')\r\n self.label_6.grid(column='0', padx='30', pady='5', row='9', sticky='e')\r\n self.separator_2 = ttk.Separator(self.PointCloudApp)\r\n self.separator_2.configure(orient='horizontal')\r\n self.separator_2.grid(column='0', columnspan='3', ipadx='180', pady='5', row='10')\r\n self.label_7 = ttk.Label(self.PointCloudApp)\r\n self.label_7.configure(font='{Times} 11 {}', text='Calculate Coordinates of the holes.')\r\n self.label_7.grid(column='0', row='11', sticky='w')\r\n self.text_2 = tk.Text(self.PointCloudApp)\r\n self.text_2.configure(height='5', width='30')\r\n self.text_2.grid(column='0', columnspan='2', ipadx='10', row='12')\r\n self.button_5 = ttk.Button(self.PointCloudApp, command=self.find_centroid)\r\n self.button_5.configure(text='Calculate')\r\n self.button_5.grid(column='2', ipady='10', row='12')\r\n self.separator_3 = ttk.Separator(self.PointCloudApp)\r\n self.separator_3.configure(orient='horizontal')\r\n self.separator_3.grid(column='0', columnspan='3', ipadx='180', pady='5', row='13')\r\n self.label_8 = ttk.Label(self.PointCloudApp)\r\n self.label_8.configure(font='{Times} 11 {}', text='Export Data')\r\n self.label_8.grid(column='0', row='14', sticky='w')\r\n self.entry_5 = ttk.Entry(self.PointCloudApp)\r\n self.entry_5.grid(column='0', ipadx='30', row='15')\r\n self.button_6 = ttk.Button(self.PointCloudApp, command=lambda:self.folderopen(self.entry_5))\r\n self.button_6.configure(text='Select Folder')\r\n self.button_6.grid(column='1', padx='2', row='15')\r\n self.button_7 = ttk.Button(self.PointCloudApp, command= self.export_data)\r\n self.button_7.configure(text='Export')\r\n self.button_7.grid(column='2', padx='2', row='15')\r\n self.progressbar_1 = ttk.Progressbar(self.PointCloudApp)\r\n self.progressbar_1.configure(length='370', orient='horizontal')\r\n self.progressbar_1.grid(column='0', columnspan='3', pady='5', row='16')\r\n self.separator_4 = ttk.Separator(self.PointCloudApp)\r\n self.separator_4.configure(orient='vertical')\r\n self.separator_4.grid(column='3', ipady='200', padx='10', row='0', rowspan='17')\r\n self.text_3 = tk.Text(self.PointCloudApp)\r\n self.text_3.configure(font='{calibri} 10 {}', height='25', insertwidth='50', width='50', state='disabled')\r\n _text_ = '''Help Guide!\\n\r\n--Load a point cloud file by clicking on \\'Select File\\'. \\n\r\n--You can view the loaded file by clicking on \\'View Point Cloud\\'. \\n \r\n--To measure the distance between two points click on\\n \\'Measure\\'. \\n\r\n--To calculate the holes coordinates click on \\'Calculate\\'. \\n\r\n--After doing the calculations and/or measurements, click on\\n \\'Export\\' to export generated data. \\n\r\n--You can also select a specific folder to store exported data.\\n Click on \\'Select Folder\\'. \\n\r\n--The exported file will be in .xlsx format with the time stamp of\\n file generation. '''\r\n self.text_3.configure(state='normal')\r\n self.text_3.insert('0.0', _text_)\r\n self.text_3.configure(state='disabled')\r\n self.text_3.grid(column='4', row='1', rowspan='17')\r\n self.scrollbar_2 = ttk.Scrollbar(self.PointCloudApp, command=self.text_3.yview)\r\n self.scrollbar_2.configure(orient='vertical')\r\n self.scrollbar_2.grid(column='5', ipady='164', row='1', rowspan='17', sticky='e')\r\n self.text_3['yscrollcommand'] = self.scrollbar_2.set\r\n self.message_2 = tk.Message(self.PointCloudApp)\r\n self.var = tk.StringVar('')\r\n self.message_2.configure(font='{calibri} 8 {}',textvariable=self.var, width='200')\r\n self.message_2.grid(column='0', row='3')\r\n self.PointCloudApp.configure(cursor='arrow', height='500', takefocus=False, width='400')\r\n self.PointCloudApp.pack(side='top')\r\n\r\n # Main widget\r\n self.mainwindow = self.PointCloudApp\r\n self.measure_dist_lst = []\r\n self.centroid_lst = []\r\n self.out_dir = None\r\n\r\n\r\n def run(self):\r\n self.mainwindow.mainloop()\r\n \r\n def view_point_cloud(self):\r\n text = '''-- Mouse view control --\r\n Left button + drag : Rotate.\r\n Ctrl + left button + drag : Translate.\r\n Wheel button + drag : Translate.\r\n Shift + left button + drag : Roll.\r\n Wheel : Zoom in/out.\r\n\r\n-- Keyboard view control --\r\n [/] : Increase/decrease field of view.\r\n R : Reset view point.\r\n Ctrl/Cmd + C : Copy current view status into the clipboard.\r\n Ctrl/Cmd + V : Paste view status from clipboard.\r\n\r\n-- General control --\r\n Q, Esc : Exit window.\r\n H : Print help message.\r\n P, PrtScn : Take a screen capture.\r\n D : Take a depth capture.\r\n O : Take a capture of current rendering settings.\r\n Alt + Enter : Toggle between full screen and windowed mode.\r\n\r\n-- Render mode control --\r\n L : Turn on/off lighting.\r\n +/- : Increase/decrease point size.\r\n Ctrl + +/- : Increase/decrease width of geometry::LineSet.\r\n N : Turn on/off point cloud normal rendering.\r\n S : Toggle between mesh flat shading and smooth shading.\r\n W : Turn on/off mesh wireframe.\r\n B : Turn on/off back face rendering.\r\n I : Turn on/off image zoom in interpolation.\r\n T : Toggle among image render:\r\n no stretch / keep ratio / freely stretch.'''\r\n self.text_3.configure(state='normal')\r\n self.text_3.delete(3.0,tk.END)\r\n self.text_3.insert('3.0', text)\r\n self.text_3.configure(state='disabled')\r\n self.PointCloudApp.update_idletasks()\r\n o3d.visualization.draw_geometries([self.pcd], width=1080, height=700)\r\n \r\n def fileopen(self, x,y):\r\n x.delete(0, 'end')\r\n y.delete(0, 'end')\r\n self.excel1=filedialog.askopenfilename()\r\n y.insert(tk.END,self.excel1)\r\n self.excel2 = os.path.basename(self.excel1)\r\n x.insert(tk.END,self.excel2)\r\n self.pcd = o3d.io.read_point_cloud(self.excel1)\r\n self.var.set(f'{self.excel2} loaded successfully.' )\r\n self.button_3['state']=tk.NORMAL\r\n self.iteration = 1\r\n self.pcd_asarray = np.asarray(self.pcd.points)\r\n self.measure_dist_lst = []\r\n self.centroid_lst = []\r\n \r\n def folderopen(self,x):\r\n x.delete(0, 'end')\r\n self.out_dir = filedialog.askdirectory()\r\n x.insert(tk.END,self.out_dir)\r\n \r\n def clear_text(self,x,y):\r\n x.delete(0, 'end')\r\n y.delete(0, 'end')\r\n self.var.set('Point Cloud file removed.')\r\n self.button_3['state']=tk.DISABLED\r\n self.var2.set('')\r\n self.var3.set('')\r\n self.text_1.delete(1.0,tk.END)\r\n self.text_2.delete(1.0,tk.END)\r\n self.entry_5.delete(0,'end')\r\n \r\n \r\n def measure_distance(self):\r\n print(type(self.entry_2.get()),self.entry_2.get())\r\n if self.entry_2.get() == '0' or self.entry_3.get() == '0':\r\n self.eucd_distance()\r\n else:\r\n self.distance_bw_holes()\r\n \r\n def distance_bw_holes(self):\r\n self.point1 = np.asarray(json.loads(self.entry_2.get()))\r\n self.point2 = np.asarray(json.loads(self.entry_3.get()))\r\n self.distance = np.linalg.norm(self.point1-self.point2)\r\n self.text_1.delete(1.0,2.0)\r\n self.text_1.insert(1.0,self.distance)\r\n self.measure_dist_lst.append([self.point1, self.point2, self.distance])\r\n \r\n def eucd_distance(self):\r\n text = '''\\n1) Please pick two correspondences using [shift + left click]\r\n Press [shift + right click] to undo point picking\r\n2) After picking points, press 'Q' to close the window'''\r\n self.text_3.configure(state='normal')\r\n self.text_3.delete(2.0,tk.END)\r\n self.text_3.insert('2.0', text)\r\n self.text_3.configure(state='disabled')\r\n self.PointCloudApp.update_idletasks()\r\n self.vis = o3d.visualization.VisualizerWithEditing()\r\n self.vis.create_window(width=1080, height=700)\r\n self.vis.add_geometry(self.pcd)\r\n self.vis.run() # user picks points\r\n self.vis.destroy_window()\r\n # print(\"\")\r\n self.points = self.vis.get_picked_points()\r\n self.point1 = self.pcd_asarray[self.points[0]]\r\n self.var2.set(self.point1)\r\n self.point2 = self.pcd_asarray[self.points[1]]\r\n self.var3.set(self.point2)\r\n self.distance = np.linalg.norm(self.point1-self.point2)\r\n self.text_1.delete(1.0,2.0)\r\n self.text_1.insert(1.0,self.distance)\r\n self.measure_dist_lst.append([self.point1, self.point2, self.distance])\r\n \r\n # return print(f'Distance between point {a} and point {b} is {distance}')\r\n \r\n def find_centroid(self):\r\n text = '''\\n1) Please pick at least three correspondences using [shift + left click]\r\n Press [shift + right click] to undo point picking\r\n2) After picking points, press 'Q' to close the window'''\r\n self.text_3.configure(state='normal')\r\n self.text_3.delete(2.0,tk.END)\r\n self.text_3.insert('2.0', text)\r\n self.text_3.configure(state='disabled')\r\n self.PointCloudApp.update_idletasks()\r\n self.vis = o3d.visualization.VisualizerWithEditing()\r\n self.vis.create_window(width=1080, height=700)\r\n self.vis.add_geometry(self.pcd)\r\n self.vis.run() # user picks points\r\n self.vis.destroy_window()\r\n # print(\"\")\r\n self.points = self.pcd_asarray[self.vis.get_picked_points()]\r\n self.centroid = np.mean(self.points, axis=0)\r\n self.holes_cood = f'Hole {self.iteration} co-ordinaters are: {self.centroid}'\r\n self.text_2.insert(tk.END, self.holes_cood+'\\n')\r\n self.centroid_lst.append(['Hole '+str(self.iteration), self.centroid])\r\n self.iteration +=1\r\n \r\n # return print(f'The hole coordinate is {self.centroid}')\r\n \r\n def export_data(self):\r\n time_str=time.strftime(\"%y_%m_%d__%H_%M_%S\")\r\n if self.out_dir:\r\n file = self.out_dir+'/report_'+time_str+'.xlsx'\r\n workbook = xlsxwriter.Workbook(file)\r\n else:\r\n file = os.path.dirname(self.excel1)+'/report_'+time_str+'.xlsx'\r\n workbook = xlsxwriter.Workbook(file)\r\n \r\n # workbook = xlsxwriter.Workbook('report_'+time_str+'.xlsx')\r\n if self.measure_dist_lst:\r\n self.progressbar_1['value']=30\r\n self.PointCloudApp.update_idletasks()\r\n worksheet1 = workbook.add_worksheet(name='distance measurement')\r\n worksheet1.write(0,0,'Sr. No.')\r\n worksheet1.write(0,1,'Point1')\r\n worksheet1.write(0,2,'Point2')\r\n worksheet1.write(0,3,'Distance Point1 to Point2')\r\n row = 1\r\n for i, j in enumerate(self.measure_dist_lst):\r\n worksheet1.write(row,0,i+1)\r\n worksheet1.write(row,1,str(j[0]))\r\n worksheet1.write(row,2,str(j[1]))\r\n worksheet1.write(row,3,str(j[2]))\r\n row +=1\r\n if self.centroid_lst:\r\n self.progressbar_1['value']=60\r\n self.PointCloudApp.update_idletasks()\r\n worksheet2 = workbook.add_worksheet(name='Holes Centroid')\r\n worksheet2.write(0,0,'Sr. No.')\r\n worksheet2.write(0,1,'Holes')\r\n worksheet2.write(0,2,'Co-ordinates')\r\n row=1\r\n for i, j in enumerate(self.centroid_lst):\r\n worksheet2.write(row,0,i+1)\r\n worksheet2.write(row,1,str(j[0]))\r\n worksheet2.write(row,2,str(j[1]))\r\n row+=1\r\n \r\n if not (self.centroid_lst or self.measure_dist_lst):\r\n messagebox.showerror(\"Error Exporting Data\",\"There is no data to export. \\n\\nPlease make sure to generate some data before exporting it.\")\r\n else:\r\n workbook.close()\r\n self.progressbar_1['value']=100\r\n messagebox.showinfo(\"Export Successful\",f'Your data is exported to the file at the following location. \\n\\n {file}')\r\n self.progressbar_1['value']=0\r\n \r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n import tkinter as tk\r\n root = tk.Tk()\r\n root.resizable(width=False, height=False)\r\n root.title(\"Point CLoud Application\")\r\n #root.iconbitmap('logo_small.ico')\r\n app = NewprojectApp(root)\r\n app.run()\r\n\r\n","repo_name":"kaustubh-shrotri/point-cloud-app","sub_path":"PointCloudApp.py","file_name":"PointCloudApp.py","file_ext":"py","file_size_in_byte":16054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"19219540491","text":"'''Current repo vars.\n\n'''\nload(\n '//rules:rctx_util.bzl',\n 'rctx_util',\n)\n\ndef _repo_vars_impl(rctx):\n # Read track file content but doing nothing, in order to follow the track\n # file's change.\n rctx.read(rctx.attr.track_file)\n\n rctx_util.create_root_workspace(rctx)\n rctx_util.create_root_build(rctx)\n REPO_DIR = rctx_util.execute(rctx, [\n 'sh',\n '-c',\n 'cd $(dirname %s) && git rev-parse --show-toplevel' %\n rctx.path(rctx.attr.src),\n ]).strip('\\n')\n\n # Create vars.bzl.\n timestamp = rctx_util.execute(rctx, [\n 'sh',\n '-c',\n 'echo $(date +%F-%T:%N)',\n ]).strip('\\n')\n timestamp_seconds = rctx_util.execute(rctx, [\n 'sh',\n '-c',\n 'echo $(date +%s)',\n ]).strip('\\n')\n head = rctx_util.execute(rctx, [\n 'sh',\n '-c',\n 'cd %s && git rev-parse HEAD' % REPO_DIR,\n ]).strip('\\n')\n head_time = rctx_util.execute(rctx, [\n 'sh',\n '-c',\n 'cd %s && git show -s --format=%%ci HEAD' % REPO_DIR,\n ]).strip('\\n')\n branch = rctx_util.execute(rctx, [\n 'sh',\n '-c',\n 'cd %s && git rev-parse --abbrev-ref HEAD' % REPO_DIR,\n ]).strip('\\n')\n if branch == 'HEAD':\n branch = ''\n\n tag_result = rctx.execute([\n 'sh',\n '-c',\n 'cd %s && ' % REPO_DIR +\n \"git describe --exact-match --tags $(git log -n1 --pretty='%h')\",\n ])\n tag = ''\n if tag_result.return_code == 0:\n tag = tag_result.stdout.strip('\\n')\n vars_dict = {\n 'TIME_STAMP': timestamp,\n 'TIME_STAMP_SECONDS': timestamp_seconds,\n 'GIT_HEAD': head,\n 'GIT_HEAD_TIME': head_time,\n 'GIT_BRANCH': branch,\n 'GIT_TAG': tag,\n }\n _create_dict_bzl(rctx, vars_dict, 'vars.bzl')\n\n\ndef _create_dict_bzl(rctx, dict_obj, out):\n content = ''\n for key, value in dict_obj.items():\n if type(value) == 'string':\n content = content + \"%s = '%s'\\n\" % (key, value)\n else:\n content = content + '%s = %s\\n' % (key, str(value))\n rctx.file(\n out,\n content,\n )\n\n\nrepo_vars = repository_rule(\n implementation = _repo_vars_impl,\n attrs = {\n 'src': attr.label(\n mandatory = True,\n doc = 'A file in repo root dir',\n ),\n 'track_file': attr.label(\n mandatory = True,\n doc = 'A file would changed after each build',\n ),\n },\n)\n","repo_name":"mirandaprivate/smartreport","sub_path":"rules/repo_vars.bzl","file_name":"repo_vars.bzl","file_ext":"bzl","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74071613634","text":"import pytest\nimport shakedown\n\nimport sdk_cmd as cmd\nimport sdk_install as install\nimport sdk_marathon as marathon\nimport sdk_utils\n\nfrom tests.config import (\n PACKAGE_NAME,\n check_running\n)\n\n\ndef setup_module(module):\n install.uninstall(PACKAGE_NAME)\n options = install.get_package_options({ \"service\": { \"spec_file\": \"examples/taskcfg.yml\" } })\n # don't wait for install to complete successfully:\n shakedown.install_package(PACKAGE_NAME, options_json=options)\n\n\ndef teardown_module(module):\n install.uninstall(PACKAGE_NAME)\n\n\n@pytest.mark.sanity\ndef test_deploy():\n wait_time = 30\n # taskcfg.yml will initially fail to deploy because several options are missing in the default\n # marathon.json.mustache. verify that tasks are failing for 30s before continuing.\n sdk_utils.out('Checking that tasks are failing to launch for at least {}s'.format(wait_time))\n\n # we can get brief blips of TASK_RUNNING but they shouldnt last more than 2-3s:\n consecutive_task_running = 0\n def fn():\n nonlocal consecutive_task_running\n svc_tasks = shakedown.get_service_tasks(PACKAGE_NAME)\n states = [t['state'] for t in svc_tasks]\n sdk_utils.out('Task states: {}'.format(states))\n if 'TASK_RUNNING' in states:\n consecutive_task_running += 1\n assert consecutive_task_running <= 3\n else:\n consecutive_task_running = 0\n return False\n\n try:\n shakedown.wait_for(lambda: fn(), timeout_seconds=wait_time)\n except shakedown.TimeoutExpired:\n sdk_utils.out('Timeout reached as expected')\n\n # add the needed envvars in marathon and confirm that the deployment succeeds:\n config = marathon.get_config(PACKAGE_NAME)\n env = config['env']\n del env['SLEEP_DURATION']\n env['TASKCFG_ALL_OUTPUT_FILENAME'] = 'output'\n env['TASKCFG_ALL_SLEEP_DURATION'] = '1000'\n marathon.update_app(PACKAGE_NAME, config)\n\n check_running()\n","repo_name":"akshitjain/dcos-commons_edited","sub_path":"frameworks/helloworld/tests/test_taskcfg.py","file_name":"test_taskcfg.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29859768175","text":"import logging\n\nfrom fastapi import FastAPI, BackgroundTasks, Request\nfrom fastapi.responses import JSONResponse\n\nfrom models import CreateOrderResponseModel, CreateOrderModel\nfrom orders_dao import dao\nfrom orders_service import create_and_place_order\n\nlogger = logging.getLogger(__name__)\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\ndef on_startup():\n logger.info(\"Creating tables\")\n dao.create_db_and_tables()\n\n\n@app.post(\n \"/orders\",\n status_code=201,\n response_model=CreateOrderResponseModel,\n response_model_by_alias=True,\n)\nasync def create_order(create_order_request: CreateOrderModel, background_tasks: BackgroundTasks):\n return create_and_place_order(create_order_request, background_tasks)\n\n\n@app.exception_handler(Exception)\nasync def exception_handler(request: Request, exc: Exception):\n return JSONResponse(\n status_code=500,\n content={\"message\": \"Internal server error while placing the order\"}\n )\n","repo_name":"vinaynikhil313/exercise-backend","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23637529301","text":"'''\nCreated on 14.04.2012\n\n@author: max\n'''\nfi = open('B-large.in', 'r')\nfi.readline()\nfo = open('output.txt', 'w+')\n\nfor i, line in enumerate(fi):\n if not line: continue\n params = [int(p) for p in line.split()]\n surp = params[1]; p = params[2]; scores = params[3:]\n \n counter = 0\n for score in scores:\n if score < p: continue\n rest = score - p; half = rest / 2\n if half >= p:\n counter += 1\n elif half < p:\n if p - half == 1:\n counter += 1\n elif p - half == 2 and surp > 0:\n counter += 1; surp -= 1\n \n print >>fo, 'Case #%i: %i' % (i+1, counter)\n \n \n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/1144.py","file_name":"1144.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71100704513","text":"__doc__ = \"\"\"Configuration of Muon Spectrometer Standalone muon reconstruction\"\"\"\n\n###############################################################\n#\n# Configuration for Standalone\n#\n#==============================================================\nfrom AthenaCommon import CfgMgr\n\nfrom .MuonStandaloneFlags import muonStandaloneFlags,MoorelikeStrategy\nfrom .MuonRecFlags import muonRecFlags\nfrom .ConfiguredMuonRec import ConfiguredMuonRec\n\nfrom .MuonRecUtils import ExtraFlags\n\nfrom AthenaCommon.CfgGetter import getPublicTool,getPrivateTool,getPublicToolClone\nfrom RecExConfig.ObjKeyStore import cfgKeyStore\n\nfrom AtlasGeoModel.MuonGMJobProperties import MuonGeometryFlags\n#==============================================================\n\n# call setDefaults to update default flags\nmuonRecFlags.setDefaults()\nmuonStandaloneFlags.setDefaults()\n\n#\n# Tools for MuPat track building\n#\n\ndef MuonTrackSteering(name=\"MuonTrackSteering\", extraFlags=None, **kwargs):\n if extraFlags is None:\n extraFlags = ExtraFlags()\n \n extraFlags.setFlagDefault(\"namePrefix\", \"MuSt_\")\n extraFlags.setFlagDefault(\"doSegmentPhiMatching\", True)\n extraFlags.setFlagDefault(muonStandaloneFlags.optimiseMomentumResolutionUsingChi2) # take name & value from JobProperty\n extraFlags.setFlagDefault(muonStandaloneFlags.strategy)\n extraFlags.setFlagDefault(muonStandaloneFlags.trackBuilder)\n extraFlags.setFlagDefault(muonStandaloneFlags.printSummary)\n extraFlags.setFlagDefault(muonStandaloneFlags.refinementTool)\n \n if extraFlags.strategy:\n kwargs.setdefault(\"StrategyList\", extraFlags.strategy)\n else:\n kwargs.setdefault(\"StrategyList\", MoorelikeStrategy) \n\n kwargs.setdefault(\"DoSummary\", extraFlags.printSummary)\n kwargs.setdefault(\"HoleRecoveryTool\",getPublicTool(\"MuonEORecoveryTool\"))\n if \"TrackBuilderTool\" not in kwargs:\n extraFlags.setFlagDefault('UseTrackingHistory',True)\n kwargs[\"TrackBuilderTool\"] = getPublicToolClone(\"MooMuonTrackBuilder\", \"MooTrackBuilderTemplate\",\n extraFlags=extraFlags)\n if \"TrackRefinementTool\" not in kwargs:\n kwargs[\"TrackRefinementTool\"] = getPublicTool(\"MooTrackBuilderTemplate\")\n\n kwargs.setdefault(\"SegSeedQCut\", 2)\n kwargs.setdefault(\"Seg2ndQCut\", 1)\n return CfgMgr.Muon__MuonTrackSteering(name,**kwargs)\n\ndef MuonSegmentFilterAlg(name=\"MuonSegmentFilterAlg\", **kwargs):\n kwargs.setdefault(\"SegmentCollectionName\", \"TrackMuonSegments\")\n return CfgMgr.MuonSegmentFilterAlg(name, **kwargs)\n\ndef MuonSegmentFinderNCBAlg(name=\"MuonSegmentMaker_NCB\", **kwargs):\n ### Only use the TGC measurements from the current bunch crossing\n kwargs.setdefault(\"TGC_PRDs\", \"TGC_Measurements\")\n reco_cscs = muonRecFlags.doCSCs() and MuonGeometryFlags.hasCSC()\n reco_mircomegas = muonRecFlags.doMMs() and MuonGeometryFlags.hasMM()\n reco_stgc = muonRecFlags.dosTGCs() and MuonGeometryFlags.hasSTGC()\n kwargs.setdefault(\"doStgcSegments\", reco_stgc)\n kwargs.setdefault(\"doMMSegments\", reco_mircomegas)\n kwargs.setdefault(\"doMdtSegments\", False)\n kwargs.setdefault(\"CSC_clusterkey\", \"CSC_Clusters\" if reco_cscs else \"\")\n ## Define the output container\n kwargs.setdefault(\"SegmentCollectionName\", \"NCB_TrackMuonSegments\")\n ### Do not rebuild the NSW alignment segments\n kwargs.setdefault(\"NSWSegmentCollectionName\", \"\")\n kwargs.setdefault(\"SegmentQuality\", 1)\n\n ### Do not recombine the segments\n kwargs.setdefault(\"SegmentCombiner\", getPublicTool(\"MuonCurvedSegmentCombiner\"))\n kwargs.setdefault(\"RunSegmentCombiner\", False)\n ### Setup the CSC segment maker\n if reco_cscs:\n cscSegmentUtilTool = getPublicToolClone(\"CscSegmentUtilTool_NCB\",\n \"CscSegmentUtilTool\",\n TightenChi2 = False, \n IPconstraint=False) \n kwargs.setdefault(\"Csc2dSegmentMaker\", getPublicToolClone(\"Csc2dSegmentMaker_NCB\",\"Csc2dSegmentMaker\",\n segmentTool = cscSegmentUtilTool))\n kwargs.setdefault(\"Csc4dSegmentMaker\",getPublicToolClone(\"Csc4dSegmentMaker_NCB\",\"Csc4dSegmentMaker\",\n segmentTool = getPublicTool(\"CscSegmentUtilTool_NCB\")))\n ### Setup the NSW segment maker\n if reco_mircomegas or reco_stgc:\n kwargs.setdefault(\"MuonClusterCreator\", getPublicTool(\"MuonClusterOnTrackCreator\"))\n Cleaner = getPublicToolClone(\"MuonTrackCleaner_seg\",\"MuonTrackCleaner\")\n Cleaner.Extrapolator = getPublicTool(\"MuonStraightLineExtrapolator\")\n Cleaner.Fitter = getPublicTool(\"MCTBSLFitterMaterialFromTrack\")\n Cleaner.PullCut = 3\n Cleaner.PullCutPhi = 3\n Cleaner.UseSLFit = True\n kwargs.setdefault(\"NSWSegmentMaker\", getPublicToolClone(\"MuonNSWSegmentFinderTool_NCB\",\n \"MuonNSWSegmentFinderTool\",\n TrackCleaner = Cleaner,\n SeedMMStereos = False,\n IPConstraint = False) )\n\n \n return CfgMgr.MuonSegmentFinderAlg(name, **kwargs)\n\n\ndef MuonSegmentFinderAlg( name=\"MuonSegmentMaker\", **kwargs):\n from AthenaCommon.BeamFlags import jobproperties\n beamFlags = jobproperties.Beam\n \n SegmentFinder = getPublicTool(\"MuonNSWSegmentFinderTool\")\n Cleaner = getPublicToolClone(\"MuonTrackCleaner_seg\",\"MuonTrackCleaner\")\n Cleaner.Extrapolator = getPublicTool(\"MuonStraightLineExtrapolator\")\n Cleaner.Fitter = getPublicTool(\"MCTBSLFitterMaterialFromTrack\")\n Cleaner.PullCut = 3\n Cleaner.PullCutPhi = 3\n Cleaner.UseSLFit = True\n SegmentFinder.TrackCleaner = Cleaner\n # for test purposes allow parallel running of truth segment finding and new segment finder\n SegmentLocation = \"TrackMuonSegments\"\n if muonStandaloneFlags.segmentOrigin == 'TruthTracking':\n SegmentLocation = \"ThirdChainSegments\"\n reco_cscs = muonRecFlags.doCSCs() and MuonGeometryFlags.hasCSC()\n reco_mircomegas = muonRecFlags.doMMs() and MuonGeometryFlags.hasMM()\n reco_stgc = muonRecFlags.dosTGCs() and MuonGeometryFlags.hasSTGC() \n kwargs.setdefault(\"CSC_clusterkey\", \"CSC_Clusters\" if reco_cscs else \"\")\n kwargs.setdefault(\"doStgcSegments\", reco_stgc)\n kwargs.setdefault(\"doMMSegments\", reco_mircomegas)\n kwargs.setdefault(\"Csc2dSegmentMaker\", getPublicTool(\"Csc2dSegmentMaker\") if reco_cscs else \"\")\n kwargs.setdefault(\"Csc4dSegmentMaker\", getPublicTool(\"Csc4dSegmentMaker\") if reco_cscs else \"\")\n kwargs.setdefault(\"MuonClusterCreator\", getPublicTool(\"MuonClusterOnTrackCreator\") if reco_mircomegas or reco_stgc else \"\" )\n kwargs.setdefault(\"NSWSegmentMaker\", getPublicTool(\"MuonNSWSegmentFinderTool\") if reco_mircomegas or reco_stgc else \"\" )\n kwargs.setdefault(\"SegmentCollectionName\", SegmentLocation)\n kwargs.setdefault(\"MuonPatternCalibration\", getPublicTool(\"MuonPatternCalibration\"))\n kwargs.setdefault(\"PrintSummary\", muonStandaloneFlags.printSummary())\n kwargs.setdefault(\"MuonClusterSegmentFinder\", getPublicTool(\"MuonClusterSegmentFinder\"))\n kwargs.setdefault(\"TGC_PRDs\", 'TGC_MeasurementsAllBCs' if not muonRecFlags.useTGCPriorNextBC else 'TGC_Measurements')\n \n \n kwargs.setdefault(\"SegmentCombiner\", getPublicTool(\"MuonCurvedSegmentCombiner\"))\n kwargs.setdefault(\"RunSegmentCombiner\", beamFlags.beamType() == 'cosmics')\n MuonSegmentFinderAlg = CfgMgr.MuonSegmentFinderAlg( name, **kwargs ) \n \n # we check whether the layout contains any CSC chamber and if yes, we check that the user also wants to use the CSCs in reconstruction\n if reco_cscs:\n getPublicTool(\"CscSegmentUtilTool\") \n else: \n MuonSegmentFinderAlg.CSC_clusterkey = \"\"\n return MuonSegmentFinderAlg\n\n\ndef MuonStandaloneTrackParticleCnvAlg( name=\"MuonStandaloneTrackParticleCnvAlg\",**kwargs):\n from AthenaCommon.Include import include\n include(\"BeamSpotConditions/BeamCondAlgSetup.py\" )\n from xAODTrackingCnv.xAODTrackingCnvConf import xAODMaker__TrackParticleCnvAlg, xAODMaker__TrackCollectionCnvTool, xAODMaker__RecTrackParticleContainerCnvTool\n\n muonParticleCreatorTool = getPublicTool(\"MuonParticleCreatorTool\")\n muonTrackCollectionCnvTool = xAODMaker__TrackCollectionCnvTool( name = \"MuonTrackCollectionCnvTool\", TrackParticleCreator = muonParticleCreatorTool )\n muonRecTrackParticleContainerCnvTool = xAODMaker__RecTrackParticleContainerCnvTool(name = \"MuonRecTrackParticleContainerCnvTool\", TrackParticleCreator = muonParticleCreatorTool )\n\n kwargs.setdefault(\"TrackParticleCreator\", muonParticleCreatorTool)\n kwargs.setdefault(\"RecTrackParticleContainerCnvTool\", muonRecTrackParticleContainerCnvTool)\n kwargs.setdefault(\"TrackCollectionCnvTool\", muonTrackCollectionCnvTool)\n kwargs.setdefault(\"RecTrackParticleContainerCnvTool\", muonRecTrackParticleContainerCnvTool)\n kwargs.setdefault(\"TrackContainerName\", \"MuonSpectrometerTracks\")\n kwargs.setdefault(\"xAODTrackParticlesFromTracksContainerName\", \"MuonSpectrometerTrackParticles\")\n kwargs.setdefault(\"AODContainerName\", \"\")\n kwargs.setdefault(\"AODTruthContainerName\", \"\")\n kwargs.setdefault(\"xAODTruthLinkVector\", \"\")\n kwargs.setdefault(\"ConvertTrackParticles\", False)\n kwargs.setdefault(\"ConvertTracks\", True)\n\n return xAODMaker__TrackParticleCnvAlg(name,**kwargs)\n\n\n\ndef MuonStationsInterSectAlg(**kwargs):\n from MuonStationIntersectCond.MuonStationIntersectCondConf import MuonStationIntersectCondAlg\n from AthenaCommon.AlgSequence import AthSequencer\n condSequence = AthSequencer(\"AthCondSeq\")\n if hasattr(condSequence, \"MuonStationIntersectCondAlg\"): return\n from AthenaCommon.AthenaCommonFlags import athenaCommonFlags\n if athenaCommonFlags.isOnline:\n kwargs.setdefault(\"MdtCondKey\", \"\")\n condSequence += MuonStationIntersectCondAlg(\"MuonStationIntersectCondAlg\",**kwargs)\n\ndef MuonLayerHoughAlg(name=\"MuonLayerHoughAlg\", **kwargs):\n from AthenaCommon.BeamFlags import jobproperties\n beamFlags = jobproperties.Beam\n \n reco_stgc = muonRecFlags.dosTGCs() and MuonGeometryFlags.hasSTGC()\n reco_mircomegas = muonRecFlags.doMMs() and MuonGeometryFlags.hasMM()\n reco_cscs = muonRecFlags.doCSCs() and MuonGeometryFlags.hasCSC()\n \n if ( beamFlags.beamType() == 'collisions' ): \n kwargs.setdefault( \"MuonLayerScanTool\", getPublicTool(\"MuonLayerHoughTool\") )\n else:\n kwargs.setdefault( \"MuonLayerScanTool\", getPublicTool(\"MuonHoughPatternFinderTool\" ))\n \n kwargs.setdefault(\"PrintSummary\", muonStandaloneFlags.printSummary())\n kwargs.setdefault(\"CscPrepDataContainer\" ,\"CSC_Clusters\" if reco_cscs else \"\")\n kwargs.setdefault(\"sTgcPrepDataContainer\" , \"STGC_Measurements\" if reco_stgc else \"\")\n kwargs.setdefault(\"MMPrepDataContainer\" , \"MM_Measurements\" if reco_mircomegas else \"\")\n kwargs.setdefault(\"TgcPrepDataContainer\" , 'TGC_MeasurementsAllBCs' if not muonRecFlags.useTGCPriorNextBC else 'TGC_Measurements')\n return CfgMgr.MuonLayerHoughAlg(name, **kwargs)\n#\n# The top level configurator\n#\nclass MuonStandalone(ConfiguredMuonRec):\n def __init__(self,**kwargs):\n ConfiguredMuonRec.__init__(self,\"MuonStandalone\",**kwargs)\n # setup minimum config needed to get Storegate keys\n # full setup is done in configure()\n\n # keys for output of segment building stage\n self.addOutputKey(\"MuonSegments\", \"MuonSegments\")\n\n def configure(self,keys=None):\n super(MuonStandalone,self).configure(keys)\n if not self.isEnabled(): return \n # do the following in case of (at least one) NSW\n reco_stgc = muonRecFlags.dosTGCs() and MuonGeometryFlags.hasSTGC()\n reco_mircomegas = muonRecFlags.doMMs() and MuonGeometryFlags.hasMM() \n \n MuonStationsInterSectAlg()\n\n self.addAlg( MuonLayerHoughAlg(\"MuonLayerHoughAlg\"))\n if not muonStandaloneFlags.patternsOnly():\n self.addAlg(MuonSegmentFinderAlg(\"MuonSegmentMaker\"))\n \n self.addAlg(MuonSegmentFinderNCBAlg(\"MuonSegmentMaker_NCB\"))\n if not cfgKeyStore.isInInput ('xAOD::MuonSegmentContainer', 'MuonSegments_NCB'):\n self.addAlg( CfgMgr.xAODMaker__MuonSegmentCnvAlg(\"MuonSegmentCnvAlg_NCB\",\n SegmentContainerName=\"NCB_TrackMuonSegments\",\n xAODContainerName=\"NCB_MuonSegments\") )\n\n if reco_stgc or reco_mircomegas:\n self.addAlg( CfgMgr.xAODMaker__MuonSegmentCnvAlg(\"QuadNSW_MuonSegmentCnvAlg\",\n SegmentContainerName=\"TrackMuonNSWSegments\",\n xAODContainerName=\"xAODNSWSegments\") )\n\n if muonStandaloneFlags.doSegmentsOnly():\n return\t \n # Tracks builder\n #\n # add the algorithm (which uses the MuonTrackSteering)\n # \n TrackBuilder = CfgMgr.MuPatTrackBuilder(\"MuPatTrackBuilder\", \n TrackSteering=getPrivateTool(\"MuonTrackSteering\"), \n SpectrometerTrackOutputLocation=\"MuonSpectrometerTracks\", \n MuonSegmentCollection=\"TrackMuonSegments\")\n\n self.addAlg( TrackBuilder )\n #### Add a segment collection only containing only EM and EO hits\n if muonRecFlags.runCommissioningChain():\n self.addAlg(MuonSegmentFilterAlg(FilteredCollectionName=\"TrackMuonSegmentsEMEO\"))\n \n chamberRecovery_EMEO = getPublicToolClone(\"MuonChamberRecovery_EMEO\", \"MuonChamberHoleRecoveryTool\", \n sTgcPrepDataContainer=\"\",\n MMPrepDataContainer=\"\")\n\n MooTrackBuilder_EMEO = getPublicToolClone(\"MooMuonTrackBuilder_EMEO\", \n \"MooTrackBuilderTemplate\",\n ChamberHoleRecoveryTool = chamberRecovery_EMEO)\n TrackSteeringTool_EMEO = getPublicToolClone(\"MuonTrackSteering_EMEO\", \"MuonTrackSteering\", TrackBuilderTool = MooTrackBuilder_EMEO)\n \n \n TrackBuilder_EMEO = CfgMgr.MuPatTrackBuilder(\"MuPatTrackBuilder_EMEO\", \n TrackSteering=TrackSteeringTool_EMEO, \n SpectrometerTrackOutputLocation=\"EMEO_MuonSpectrometerTracks\", \n MuonSegmentCollection=\"TrackMuonSegmentsEMEO\")\n self.addAlg(TrackBuilder_EMEO)\n if muonStandaloneFlags.createTrackParticles():\n xAODTrackParticleCnvAlg_EMEO = MuonStandaloneTrackParticleCnvAlg(\"MuonStandaloneTrackParticleCnvAlg_EMEO\",\n TrackContainerName = \"EMEO_MuonSpectrometerTracks\",\n xAODTrackParticlesFromTracksContainerName=\"EMEO_MuonSpectrometerTrackParticles\")\n self.addAlg( xAODTrackParticleCnvAlg_EMEO )\n \n\n if muonStandaloneFlags.createTrackParticles():\n xAODTrackParticleCnvAlg = MuonStandaloneTrackParticleCnvAlg(\"MuonStandaloneTrackParticleCnvAlg\")\n self.addAlg( xAODTrackParticleCnvAlg )\n\n\n\n def getCalibConfig(self):\n \"\"\"Get the configuration to configure Calibration Ntuple output\"\"\"\n doTracks = not muonStandaloneFlags.doSegmentsOnly()\n if doTracks:\n tracksKey = \"MuonSpectrometerTracks\"\n else:\n tracksKey = \"\"\n \n return { 'eventTag' : muonStandaloneFlags.segmentOrigin(), \n 'patternsKey' : '',#self.dataKey(\"MuonPatterns\"), \n 'segmentsKey' : self.dataKey(\"MuonSegments\"), \n 'tracksKey' : tracksKey,\n 'doPhi' : muonStandaloneFlags.trackBuilder == 'Moore',\n 'segmentAuthor' : 5,\n 'trackAuthor' : 200\n }\n \n","repo_name":"Yusuf-Manjra/athena","sub_path":"MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonStandalone.py","file_name":"MuonStandalone.py","file_ext":"py","file_size_in_byte":16636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23374210520","text":"__all__ = (\n 'PostDocumentSerializer',\n)\n\nfrom django_elasticsearch_dsl_drf.serializers import DocumentSerializer\nfrom ..documents.post import PostDocument\n\n\nclass PostDocumentSerializer(DocumentSerializer):\n \"\"\"Serializer for post document.\"\"\"\n\n class Meta:\n \"\"\"Meta options.\"\"\"\n\n document = PostDocument\n fields = (\n 'id',\n 'title',\n 'slug',\n 'updated_on',\n 'created_on',\n 'excerpt',\n 'content',\n 'status',\n 'tags',\n 'views',\n 'reading_time',\n 'author',\n 'featured_image'\n )","repo_name":"416rehman/easy_blog","sub_path":"search/serializers/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70451759236","text":"\"\"\"\n354. Russian Doll Envelopes\n\"\"\"\n\nfrom typing import List\nimport bisect\n\nclass Solution:\n def maxEnvelopes(self, envelopes: List[List[int]]) -> int:\n l_e = sorted(envelopes, key=lambda x:(x[0], -x[1]))\n l_e = [ x[1] for x in l_e ]\n dp = []\n\n for i in range(len(l_e)):\n idx = bisect.bisect_left(dp, l_e[i])\n if idx == len(dp):\n dp.append(l_e[i])\n else:\n dp[idx] = l_e[i]\n\n return len(dp)\n\n\n","repo_name":"dictator-x/practise_as","sub_path":"algorithm/leetCode/0354_russian_doll_evelopes.py","file_name":"0354_russian_doll_evelopes.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40871103008","text":"import os\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom ..biodivsim.EmpiricalBioDivEnv import *\nfrom ..biodivsim.EmpiricalGrid import *\nfrom ..biodivsim.StateInitializer import PickleInitializer\nfrom ..biodivsim.ClimateGenerator import get_climate\n\nnp.set_printoptions(suppress=1, precision=3)\n\n# Required (initial) files\n\"\"\"\nInputs/puvsp.dat <- species,pu,amount\nInputs/pu.dat <- cost per unit and status (in case already protected)\nPlanning_Units.txt <- unit ID and coordinates\n\n\"\"\"\nfrom captain import *\nimport numpy as np\nimport pandas as pd\n\n\ndef build_empirical_env(\n wd=\"\",\n puvsp_file=None,\n pu_file=None,\n pu_info_file=None,\n # fast loading files\n hist_file=None,\n puid_file=None,\n spid_file=None,\n budget=1,\n protect_fraction=0.1,\n max_disturbance=0.95,\n observePolicy=2,\n seed=1234,\n species_sensitivities=None,\n hist_out_file=None,\n pu_id_out_file=None,\n sp_id_out_file=None\n):\n\n emp = EmpiricalGrid(species_sensitivities=species_sensitivities)\n if wd != \"\":\n f_list = [puvsp_file, hist_file, puid_file, pu_info_file, pu_file, spid_file]\n for i in range(len(f_list)):\n try:\n f_list[i] = os.path.join(wd, f_list[i])\n except:\n pass\n [puvsp_file, hist_file, puid_file, pu_info_file, pu_file, spid_file] = f_list\n\n emp.initGrid(\n puvsp_file=puvsp_file,\n hist_file=hist_file,\n pu_id_file=puid_file,\n pu_info_file=pu_info_file,\n sp_id_file=spid_file,\n hist_out_file=hist_out_file,\n pu_id_out_file=pu_id_out_file,\n sp_id_out_file=sp_id_out_file\n )\n\n cost_tbl = pd.read_csv(pu_file)\n # subset cost table to PUs included in the species histogram\n cost_array = np.array(cost_tbl[\"cost\"])[cost_tbl[\"id\"].isin(emp._pus_id)] #KD!##np.array(cost_tbl[\"cost\"])[emp._pus_id - 1]\n\n # add a minimum cost per unit and rescale\n if np.min(cost_array) == 0:\n if np.max(cost_array) == 0:\n min_cost = 1\n else:\n min_cost = 0.01 * np.min(\n cost_array[cost_array > 0]\n ) # 1% of the cheapest cell with a cost\n cost_array[cost_array == 0] = min_cost\n\n # rescale cost\n cost_array = cost_array / np.mean(cost_array)\n total_cost = np.sum(cost_array)\n # set a budget sufficient to protect 10% of cheapest PUs\n budget = budget * (np.min(cost_array) * emp._n_pus)\n disturbance_matrix = max_disturbance * cost_array / np.max(cost_array)\n emp.set_disturbance_matrix(disturbance_matrix)\n runMode = [RunMode.NOUPDATEOBS, RunMode.ORACLE, RunMode.PROTECTATONCE][\n observePolicy\n ]\n\n env = BioDivEnvEmpirical(\n emp,\n budget,\n runMode=runMode,\n cost_pu=cost_array,\n stop_at_end_budget=True,\n verbose=0,\n iterations=None,\n protect_fraction=protect_fraction,\n h_seed=seed,\n )\n return env\n\n\n# LOAD POLICY\ndef load_policy_empirical(\n obsMode=1,\n trained_model=None,\n n_NN_nodes=[8, 0],\n temperature=1,\n observe_error=0,\n sp_threshold_feature_extraction=1,\n num_output=None,\n):\n\n # load trained model\n head = next(open(trained_model)).split()\n loaded_ws = np.loadtxt(trained_model, skiprows=1)\n selected_epoch = -1\n loadedW = loaded_ws[selected_epoch]\n\n num_features = len(get_feature_indx(mode=obsMode))\n [\n num_output,\n num_meta_features,\n nodes_layer_1,\n nodes_layer_2,\n nodes_layer_3,\n _,\n ] = get_NN_model_prm(num_features, n_NN_nodes, num_output)\n coeff_meta_features = get_thresholds_reverse(loadedW[-num_meta_features:])\n ind = [head.index(s) for s in head if \"coeff_\" in s]\n coeff_features = loadedW[np.min(ind) :] # remove first columns\n\n num_features = len(get_feature_indx(mode=obsMode))\n # model_prm = [coeff_features, coeff_meta_features]\n\n policy = PolicyNN(\n num_features,\n num_meta_features,\n num_output,\n coeff_features,\n coeff_meta_features,\n temperature=temperature,\n mode=obsMode,\n observe_error=observe_error,\n nodes_l1=nodes_layer_1,\n nodes_l2=nodes_layer_2,\n nodes_l3=nodes_layer_3,\n sp_threshold=sp_threshold_feature_extraction,\n flattened=True,\n verbose=0,\n )\n\n return policy\n","repo_name":"captain-project/captain-project","sub_path":"captain/algorithms/empirical_env_setup.py","file_name":"empirical_env_setup.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"}
+{"seq_id":"73287236035","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_commah\n----------------------------------\n\nTests for `commah` module.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport commah\n\n\nclass TestCommah(object):\n def test_cosmologies(self):\n cosmolist = ['WMAP1', 'WMAP3', 'WMAP5',\n 'WMAP7', 'WMAP9',\n 'Planck13', 'Planck15']\n conclist = [8.84952005507872,\n 6.570929526400634,\n 7.663081978810533,\n 7.906741464320479,\n 8.883912548303279,\n 9.250262507139139,\n 9.044999285760362]\n for ival, cosmo in enumerate(cosmolist):\n output = commah.run(cosmo, Mi=[1e12], verbose=True)\n assert(np.allclose(output['c'].flatten()[0],\n conclist[ival], rtol=1e-3))\n\n def test_evolution(self):\n zlist = np.array([0, 1, 2])\n conclist = np.array([7.66308, 5.70009, 4.55295])\n output = commah.run('WMAP5', zi=[0], Mi=[1e12], z=zlist)\n assert(np.allclose(output['c'].flatten(), conclist, rtol=1e-3))\n\n def test_startingz(self):\n zlist = np.array([0, 1, 2])\n conclist = np.array([4.55295, 4.43175, 4.26342])\n output = commah.run('WMAP5', zi=zlist, Mi=[1e12], z=2)\n assert(np.allclose(output['c'].flatten(), conclist, rtol=1e-3))\n","repo_name":"astroduff/commah","sub_path":"commah/tests/test_commah.py","file_name":"test_commah.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"25095084282","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def hasPathSum(self, root, targetSum):\n \"\"\"\n :type root: TreeNode\n :type targetSum: int\n :rtype: bool\n \"\"\"\n \n temp = 0 # stores cur sum at cur node\n \n def dfs(root, temp, targetSum): # recursive function (DFS); this function will boil down to a leaf node, which we will return True or False, if the temp_sum == targetSum\n \n if not root: # if not root, we return False\n return False\n \n temp += root.val # increase temp_sum\n \n \n if not root.left and not root.right: # if we are at a leaf node, check condition (return True or False)\n return targetSum == temp\n \n return dfs(root.left, temp, targetSum) or dfs(root.right, temp, targetSum) # if not leaf, keep searching till we get to leaf, here we return True or False, it will return True if at least one path has temp_sum == targetSum\n \n return dfs(root, temp, targetSum) # return the function to call it\n ","repo_name":"camillegandotra/leetcode","sub_path":"easy/112. Path Sum.py","file_name":"112. Path Sum.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4761306974","text":"'''utiitiy routines for embers-like programs'''\nimport datetime\nimport random\nimport warnings\nimport itertools as it\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom verbose import verbose as v\nimport wrapgen\nimport covid\n\ndef embers_args(ap):\n '''\n call this in the getargs() function\n so these options will be added in\n '''\n paa = ap.add_argument\n\n paa(\"--daily\",type=int,default=7,\n help=\"daily=1 for daily, daily=7 (default) for weekly, daily=0 for cumulative\")\n paa(\"--weekly\",action=\"store_const\",const=7,dest='daily',\n help=\"Make weekly average plots instead of cumulative (set daily=7)\")\n paa(\"--cumulative\",action=\"store_const\",const=0,dest='daily',\n help=\"Make cumulative plots, instead of daily or weekly averaged\")\n ap.set_defaults(daily=7)\n\n g = ap.add_argument_group('Graphing options')\n gaa = g.add_argument\n gaa(\"--title\",\n help=\"Use this TITLE in plots\")\n gaa(\"--lineplot\",action=\"store_true\",\n help=\"Make log-linear line plot instead of linear stacked bar plot\")\n gaa(\"--onsets\",action=\"store_true\",\n help=\"plot onset dates for each mutant\")\n gaa(\"--legend\",type=int,default=1,choices=(0,1,2),\n help=\"0: no legend, 1: legend, 2: big legend (with seq patterns)\")\n gaa(\"--output\",\"-o\",help=\"write plot to file\")\n\ndef days_in_month(yyyy,mm):\n '''return number of days in month yyyy-mm'''\n # https://stackoverflow.com/questions/42950/how-to-get-the-last-day-of-the-month\n next_month = datetime.date(int(yyyy),int(mm),28) + datetime.timedelta(days=4)\n return (next_month - datetime.timedelta(days=next_month.day)).day\n\ndef date_fromiso(s):\n '''return datetime.date object from iso string yyyy-mm-dd'''\n ## over-rides covid.date_fromiso by giving yyyy-mm a random day\n if isinstance(s,datetime.date):\n return s\n try:\n yyyy,mm,dd = s.split(\"-\")\n dt = datetime.date(int(yyyy),int(mm),int(dd))\n return dt\n except ValueError:\n if s == \".\":\n return None\n try:\n yyyy,mm = s.split(\"-\")\n day = random.randint(1,days_in_month(yyyy,mm))\n dt = datetime.date(int(yyyy),int(mm),day)\n return dt\n except ValueError:\n return None\n warnings.warn(f'Invalid Date {s}')\n return None\n\ndef date_from_seqname(sname):\n '''parses date (as a string) from sequence name'''\n ## faster, but less robust, than covid.date_from_seqname\n tokens = sname.split('.')\n try:\n date = date_fromiso(tokens[4])\n except IndexError:\n warnings.warn(f\"bad date: {sname}\")\n date = None\n return date\n\ndef get_ord_daterange(datecounter,argsdates):\n '''\n find range of dates, in ordinal numbers, based on:\n datecounter[mutant][date] = count of mutants in date, and\n argsdates is basically args.dates;\n returns a tuple of two two-element tuples: (ord_range,ord_plot_range)\n with each range containing a (ord_min,ord_max) pair\n '''\n all_dateset=set()\n for p in datecounter:\n all_dateset.update(datecounter[p])\n\n v.vprint(\"Range of dates:\",min(all_dateset),max(all_dateset))\n ordmin = min(all_dateset).toordinal()\n ordmax = max(all_dateset).toordinal()\n\n ordplotmin = ordmin\n ordplotmax = ordmax\n if argsdates and argsdates[0] and argsdates[0] != \".\":\n ordplotmin = date_fromiso(argsdates[0]).toordinal()\n if argsdates and argsdates[1] and argsdates[1] != \".\":\n ordplotmax = date_fromiso(argsdates[1]).toordinal()\n\n ordmin = min([ordmin,ordplotmin])\n ordmax = max([ordmax,ordplotmax])\n\n return (ordmin, ordmax), (ordplotmin, ordplotmax)\n\ndef filter_seqs_by_padded_dates(seqs,args,keepfirst=False):\n '''\n filter input data to keep only data in date range specified...except\n pad the date range so that weekly or cumulative plots are still correct\n side effect: args.dates potentially gets modified\n '''\n start_date,stop_date = covid.date_range_from_args(args)\n args.dates = [start_date,stop_date]\n start_date,stop_date = covid.expand_date_range([start_date,stop_date],args.daily)\n seqs = covid.filter_by_date(seqs,start_date,stop_date,keepfirst=keepfirst)\n\n if args.nseq:\n seqs = it.islice(seqs,args.nseq+1)\n if args.verbose:\n seqs = wrapgen.keepcount(seqs,\"Sequences filtered:\")\n\n return seqs\n\ndef get_running_weekly(cumulative_counts,num_days,daysperweek=7):\n '''convert cumulative counts into running weekly counts'''\n if daysperweek==0:\n return cumulative_counts\n\n running_weekly=dict()\n for m in cumulative_counts:\n running_weekly[m] = cumulative_counts[m][:]\n for n in range(daysperweek,num_days):\n running_weekly[m][n] = cumulative_counts[m][n] \\\n - cumulative_counts[m][n-daysperweek]\n return running_weekly\n\ndef get_cumulative_counts(date_counter,ord_range,daysperweek=7):\n '''convert date_counter into list of cumulative counts'''\n ordmin, ordmax = ord_range\n num_days = ordmax+1-ordmin\n v.vprint(\"Days:\",num_days,ordmin,ordmax)\n\n cumulative_counts={m: list() for m in date_counter}\n for ord_date in range(ordmin,ordmax+1):\n day = datetime.date.fromordinal(ord_date)\n for m in date_counter:\n cumulative_counts[m].append( sum(date_counter[m][dt]\n for dt in date_counter[m] if dt <= day ) )\n\n running_weekly = get_running_weekly(cumulative_counts,num_days,\n daysperweek=daysperweek)\n return running_weekly\n\ndef mk_counts_table(date_counter,names):\n '''useful diagnostic; a table of names, counts, onsets, and patterns'''\n ## Uses 'yield' to return lines one at a time\n ## Usage:\n ## for line in mk_counts_table(...):\n ## print(line)\n ##\n maxnamelen = max(len(names[p]) for p in date_counter)\n fmt = f'%{maxnamelen}s %7s %10s %s'\n yield fmt % ('Name','Count','Onset','Pattern')\n for p in date_counter:\n name = names[p]\n count = sum(date_counter[p].values())\n onset = min(date_counter[p]) if date_counter[p] else ''\n yield fmt % (name,str(count),onset,p)\n\n\ndef get_plot_filename(args,fraction,case_is_none=True,xtra=None):\n '''return name of file in which to save plot'''\n ## eg, something like wk-f-bar-xtra-out.pdf\n if not args.output:\n return ''\n\n linbar = \"line\" if args.lineplot else \"bar\"\n if fraction:\n fc = \"f\" if case_is_none else \"t\"\n else:\n fc = \"c\"\n wk = \"wk\" if args.daily==7 \\\n else \"dy\" if args.daily == 1 \\\n else \"cm\"\n\n pplist = [fc,wk,linbar,\"\"]\n if xtra:\n pplist.insert(3,xtra)\n prepend = \"-\".join(pplist)\n outfile = covid.filename_prepend(prepend,args.output)\n return outfile\n\ndef date_friendly(dt):\n '''simple string format for datetime.date object used on x-axis of plots'''\n mmm = dt.strftime(\"%b\")\n dd = dt.strftime(\"%-d\")\n return \"%3s %2d\" % (mmm, int(dd))\n\ndef half_labels(labels,n=2):\n '''replace list of labels [\"one\",\"two\",\"three\",\"four\",\"five\"]\n with [\"one\", \"\", \"three\", \"\", \"five\"]\n '''\n if n<2:\n return labels\n return [label if i%n==0 else \"\"\n for i,label in enumerate(labels)]\n\ndef skip_elements(elist,n=1):\n '''only include every n'th element of a list of elements'''\n if n<1:\n return elist\n return [elt for i,elt in enumerate(elist) if i%n==0]\n\ndef embersplot(counts,\n fullnames,\n mcolors,\n ordmin,\n ordplotrange=None,\n num_cases=None,\n title=None,legendtitle=None,legend=0,onsets=False,\n fraction=False,lineplot=False,show=False,daily=7):\n '''\n embersplot is an embers-style plot of counts vs date for multiple variants\n counts: dictionary of arrays; each array is counts vs date;\n keys of dictionary correspond to different variants\n fullnames: dictionary of names associated with dictionary keys (=None if keys are names)\n mcolors: dictionary of colors\n ordmin: first day in array, as ordinal number\n ordplotrange: tuple (ordplotmin, ordplotmax) indicates range of days to plot\n '''\n\n patterns = list(counts)\n fullnames = fullnames if fullnames else {m:m for m in patterns}\n maxnamelen = max(len(n) for n in fullnames.values())\n namefmt = \"%%-%ds\" % maxnamelen\n mnames = {m : namefmt % fullnames[m] for m in patterns}\n\n num_days = len(counts[patterns[0]])\n for m in patterns: ## should make sure len is same for all patterns\n assert num_days == len(counts[m])\n\n if legend == 0:\n plt.figure(figsize=(6,3))\n elif legend == 1:\n F = 4.7 if lineplot else 5.0\n plt.figure(figsize=(12,1+len(patterns)/F))\n elif legend == 2:\n F = 4.7 if lineplot else 4.5\n plt.figure(figsize=(12,1+len(patterns)/F))\n #plt.figure(figsize=(12,0.5+0.5*len(patterns)/F))\n else:\n raise RuntimeError(f\"legend should be 0, 1, or 2: not {legend}\")\n\n if title:\n #title = title + \": %d sequences\" % (Nsequences,)\n plt.title(title,fontsize='x-large')\n\n counts_bottom = dict()\n counts_total = [0] * len(counts[patterns[0]])\n for m in counts:\n counts_bottom[m] = counts_total\n counts_total = [x+y for x,y in zip(counts_total,counts[m])]\n\n ## at this point counts_total is array (vs date) of total counts over all patterns\n\n if legendtitle and legend>1 and not lineplot:\n plt.bar(range(num_days),[0]*num_days,width=1,\n label=legendtitle,color=\"white\")\n\n Y = []\n Ylabels = []\n Ycolors = []\n\n name_color_sofar = set()\n for m in patterns: #patterns[::-1]:\n\n name = mnames[m] ## mnames\n #if legend>1: ### take this outside\n # name += \" \" + mrelpatt[m]\n name = \" \" + name ## hack! leading underscore doesn't make it to legend??\n\n if lineplot:\n kwargs = dict(color=mcolors[m],label=name)\n else:\n ## For bar plot, repeated name,color tuples only appear once in legend\n name_color = (name,mcolors[m])\n kwargs = dict(color=mcolors[m])\n if name_color not in name_color_sofar:\n name_color_sofar.add(name_color)\n kwargs['label']=name\n\n if fraction:\n fm = [a/(b+0.001) for a,b in zip(counts[m],counts_total)]\n bm = [a/(b+0.001) for a,b in zip(counts_bottom[m],counts_total)]\n if num_cases is not None:\n fm = [c*f for c,f in zip(fm,num_cases)]\n bm = [c*f for c,f in zip(bm,num_cases)]\n\n else:\n fm = counts[m]\n bm = counts_bottom[m]\n\n if lineplot:\n dy = np.array(range(num_days))\n fm = np.array(fm)\n dy = dy[fm>0]\n fm = fm[fm>0]\n plt.semilogy(dy,fm,lw=2, **kwargs)\n else:\n #plt.bar(range(num_days),fm,bottom=bm,width=1,**kwargs)\n Y.append(fm)\n Ylabels.append(kwargs.get('label',None))\n Ycolors.append(kwargs['color'])\n\n if not lineplot:\n ## instead of multiple calls to plt.bar, single call to plt.stackplot\n plt.stackplot(range(num_days),Y,\n labels=Ylabels,colors=Ycolors)\n\n if fraction and not lineplot and num_cases is None:\n plt.ylim([0,1.05])\n\n #if fraction:\n # plt.yticks([0.0001,0.001,0.01,0.1,1])\n # plt.yticks([0.0001,0.01,1])\n\n if legend:\n ## reverse the order of the handles/labels in the legend\n ## deleting the empty labels (corresponding to repeated colors)\n handles, labels = plt.gca().get_legend_handles_labels()\n hl_reverse_list = list(zip(handles,labels))[::-1]\n handles = []\n labels = []\n for h,l in hl_reverse_list:\n if l is not None:\n handles.append(h)\n labels.append(l)\n\n plt.legend(handles,labels,\n bbox_to_anchor=(1.02, 1),\n #handlelength=3,\n #markerfirst=False,\n frameon=False,\n handletextpad=0,\n labelspacing=0.45,\n loc='upper left', borderaxespad=0.,\n prop={'family' : 'monospace'})\n\n if not ordplotrange:\n ordplotmin = ordmin+1\n ordplotmax = ordmin+num_days\n else:\n ordplotmin, ordplotmax = ordplotrange\n\n plt.xlim(ordplotmin-ordmin-1,ordplotmax-ordmin+1)\n xticks = list(range(ordplotmin-ordmin,ordplotmax-ordmin+1,7)) ## was n+6\n xlabels = [datetime.date.fromordinal(int(ord+ordmin)) for ord in xticks]\n min_date = datetime.date.fromordinal(ordplotmin)\n max_date = datetime.date.fromordinal(ordplotmax)\n xlabels = [date_friendly(dt) for dt in xlabels]\n nhalf = 1 + len(xlabels)//16\n if nhalf<4:\n xlabels = half_labels(xlabels,nhalf)\n else:\n xlabels = skip_elements(xlabels,nhalf)\n xticks = skip_elements(xticks, nhalf)\n plt.xticks(xticks,xlabels,fontsize='medium', ## was small\n rotation=45,ha='right',position=(0,0.01))\n\n plt.xlabel(f'{min_date} to {max_date}')\n\n if onsets:\n ylo,yhi = plt.gca().get_ylim()\n for m in patterns:\n if m not in onsets:\n continue\n x = onsets[m].toordinal() - ordmin\n kwargs=dict(lw=1,color=mcolors[m])\n if not fraction:\n kwargs['zorder']=0\n plt.plot([x,x],[ylo,yhi],**kwargs)\n\n if lineplot:\n if legend==0:\n plt.subplots_adjust(bottom=0.15,right=0.95) ## hardcoded hack!\n else:\n #plt.subplots_adjust(bottom=0.15,right=0.8) ## still hardcoded!\n #plt.subplots_adjust(bottom=0.15,right=0.7) ## still hardcoded!\n pass\n\n def ytickformat(x,pos):\n if x>1:\n return \"\" if (fraction and num_cases is None) else \"%g\" % x\n else:\n return \"%.1g\" % x\n\n plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(ytickformat))\n\n plt.yticks(fontsize=12)\n\n if daily==7:\n ylabel_prefix=\"Weekly\"\n elif daily==1:\n ylabel_prefix=\"Daily\"\n elif daily==0:\n ylabel_prefix=\"Cumulative\"\n else:\n ylabel_prefix=f'{daily}-day'\n\n\n\n plt.ylabel(\"Fraction\" if fraction else ylabel_prefix+\" Sequence Counts\")\n if fraction and num_cases is not None:\n plt.ylabel(ylabel_prefix+\" Cases\")\n plt.tight_layout()\n if show:\n plt.show()\n\n\ndef make_emberstyle_plots(args,extra_id,cum_counts,\n names,colors,ordmin,ordplotrange,\n num_cases=None,\n title=None,nmatches=0,ncases=0,\n daily=7,\n onsets=None):\n '''\n plot fraction, sequence counts, and cases\n against time for variants of interest\n '''\n\n ## restrict data to within the plot range\n ## then, auto scaling on the y-axis will be based on available data\n ordplotmin,ordplotmax = ordplotrange\n assert ordmin <= ordplotmin\n for m in cum_counts:\n cum_counts[m] = cum_counts[m][ordplotmin-ordmin: ordplotmax-ordmin+1]\n ordmin = ordplotmin\n\n ## Now make three plots, one w/ seq-counts, one w/ fractions,...\n ## and one w/ case counts (if num_counts is available)\n fractions = (False,True)\n cases = (None,None)\n totals = (f'{nmatches} sequences',\n f'{nmatches} sequences')\n if num_cases is not None:\n fractions = (False,True,True)\n cases = (None,None,num_cases)\n totals = (f'{nmatches} sequences',\n f'{nmatches} sequences',\n #f'{nmatches} sequences',\n f'{ncases} cases',\n )\n\n for fraction,case,total in zip(fractions,cases,totals):\n the_title = title + f': {total}'\n embersplot(cum_counts,names,colors,ordmin,\n ordplotrange = ordplotrange,\n num_cases=case,\n legend=args.legend,lineplot=args.lineplot,\n title = the_title, onsets=onsets,\n fraction=fraction, daily=daily)\n\n if args.output:\n outfile = get_plot_filename(args,fraction,\n case is None,xtra=extra_id)\n plt.savefig(outfile)\n\n if not args.output:\n plt.show()\n","repo_name":"jt-lanl/cov-voc","sub_path":"embersutil.py","file_name":"embersutil.py","file_ext":"py","file_size_in_byte":16465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"4996717009","text":"import mysql.connector\nfrom classes.sejour import Sejour\nfrom datetime import datetime\nclass Manage_sejour:\n def __init__(self):\n self.conn = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\", database=\"gestion_Patients\")\n # connexion à la base de donnée\n self.curseurBDD = self.conn.cursor()\n\n def ajouter_sejour(self, sejour : Sejour):\n print(\"test\")\n # methode pour ajouter un séjour associé à un patient + lit\n instructionBDD = f\"INSERT INTO Sejour (date_entree_sejour, date_sortie_sejour, probleme, num_patient, num_lit) VALUES ('{sejour.dateEntree}', '{sejour.dateSortie}', '{sejour.probleme}', {sejour.patient}, {sejour.idLit});\"\n print(instructionBDD)\n self.curseurBDD.execute(instructionBDD)\n self.conn.commit()\n print(\"test1\")\n #id du sejour généré\n idsejour = self.curseurBDD.lastrowid\n\n #chambre\n instructionBDD2 = f\"INSERT INTO sejour_chambre (num_sejour, num_chambre, date_entree_chambre) VALUES ({idsejour},{sejour.idChambre},'{sejour.dateEntree}');\"\n self.curseurBDD.execute(instructionBDD2)\n self.conn.commit()\n print(\"test2\")\n #service\n instructionBDD3 = f\"INSERT INTO service_sejour (date_entree_service, num_sejour, num_service) VALUES ('{sejour.dateEntree}', {idsejour}, {sejour.service});\"\n self.curseurBDD.execute(instructionBDD3)\n self.conn.commit()\n print(\"test3\")\n \n\n def afficher_liste_sejour(self):\n # methode pour afficher tous les sejour\n instructionBDD = f\"SELECT sejour.id_sejour, nom, prenom, date_entree_sejour, date_sortie_sejour, nom_service, numero_chambre, numero_lit, probleme FROM ((Patient INNER JOIN Sejour ON patient.id_patient = sejour.num_patient INNER JOIN lit ON sejour.num_lit=lit.id_lit)INNER JOIN sejour_chambre ON sejour.id_sejour=sejour_chambre.num_sejour INNER JOIN chambre ON sejour_chambre.num_chambre=chambre.id_chambre) INNER JOIN service_sejour ON sejour.id_sejour=service_sejour.num_sejour INNER JOIN service ON service_sejour.num_service=service.id_service\"\n self.curseurBDD.execute(instructionBDD)\n resultat = self.curseurBDD.fetchall()\n \n print(resultat)\n \n retour = []\n for sejour in resultat:\n retour.append({\"id_sejour\":sejour[0], \"nom\": sejour[1], \"prenom\": sejour[2], \"date_entree_sejour\": sejour[3], \"date_sortie_sejour\": sejour[4], \"nom_service\": sejour[5], \"num_chambre\":sejour[6], \"num_lit\":sejour[7], \"probleme\": sejour[8]})\n return retour\n \n def infoSejour(self, id):\n instructionBDD = f\"SELECT sejour.id_sejour, nom, prenom, date_entree_sejour, date_sortie_sejour, service.id_service, chambre.id_chambre, lit.id_lit, probleme FROM ((Patient INNER JOIN Sejour ON patient.id_patient = sejour.num_patient INNER JOIN lit ON sejour.num_lit=lit.id_lit)INNER JOIN sejour_chambre ON sejour.id_sejour=sejour_chambre.num_sejour INNER JOIN chambre ON sejour_chambre.num_chambre=chambre.id_chambre) INNER JOIN service_sejour ON sejour.id_sejour=service_sejour.num_sejour INNER JOIN service ON service_sejour.num_service=service.id_service WHERE sejour.id_sejour={id}\"\n self.curseurBDD.execute(instructionBDD)\n sejour = self.curseurBDD.fetchone()\n return {\"id_sejour\":sejour[0], \"prenom\": sejour[1], \"nom\": sejour[2], \"date_entree_sejour\": sejour[3].strftime(\"%Y-%m-%d %H:%M\"), \"date_sortie_sejour\": sejour[4].strftime(\"%Y-%m-%d %H:%M\"), \"service\": sejour[5], \"chambre\":sejour[6], \"lit\":sejour[7], \"probleme\": sejour[8]}\n\n def modif_sejour(self, sejour, id):\n instructionBDD = f\"UPDATE Sejour SET num_lit={sejour.idLit}, date_entree_sejour='{sejour.dateEntree}', date_sortie_sejour='{sejour.dateSortie}', probleme='{sejour.probleme}' WHERE id_sejour={id}\"\n self.curseurBDD.execute(instructionBDD)\n self.conn.commit()\n \n instructionBDD2 = f\"UPDATE sejour_chambre SET num_chambre={sejour.idChambre} WHERE num_sejour={id}\"\n self.curseurBDD.execute(instructionBDD2)\n self.conn.commit()\n \n instructionBDD3 = f\"UPDATE service_sejour SET num_service={sejour.service} WHERE num_sejour={id}\"\n self.curseurBDD.execute(instructionBDD3)\n self.conn.commit()","repo_name":"DenisJongmanee/API_projet_hopital","sub_path":"manage/manage_sejour.py","file_name":"manage_sejour.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29240710473","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('asa18', '0062_auto_20170315_1552'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='lineitem',\n name='event_rego',\n field=models.OneToOneField(null=True, to='asa18.EventRegistration'),\n ),\n migrations.AlterField(\n model_name='lineitem',\n name='meeting_rego',\n field=models.OneToOneField(null=True, to='asa18.Registration'),\n ),\n ]\n","repo_name":"Samreay/ASA2018_django","sub_path":"asa18/migrations/0063_auto_20170315_1557.py","file_name":"0063_auto_20170315_1557.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40174301551","text":"from tkinter import *\n\nbase = Tk()\nbase.title(\"Calculator\")\nbase.geometry(\"300x400+300+300\")\n\nf = (\"Arial Black\",20)\n\nvar=\"\"\n\ndef back():\n global var\n var=var[0:-1]\n lbl1.config(text=var)\n return var\n\ndef Perform_addition():\n global var\n var=var + \"+\"\n lbl1.config(text=var)\n return var\n # a=int( txt1.get())\n # b=int ( txt2.get())\n # c=a+b\n # result.configure(text=\"addition is \"+str(c))\n\n\ndef Perform_substraction():\n global var\n var = var + \"-\"\n lbl1.config(text=var)\n return var\n\n # a=int( txt1.get())\n # b=int ( txt2.get())\n # c=a-b\n # result.configure(text=\"Substraction is\"+str(c))\n\n\ndef Perform_multiplication():\n global var\n var = var + \"*\"\n lbl1.config(text=var)\n return var\n # a=int( txt1.get())\n # b=int ( txt2.get())\n # c=a*b\n # result.configure(text=\"Multipication is \"+str(c))\n\ndef Perform_Divide():\n global var\n var = var + \"/\"\n lbl1.config(text=var)\n return var\n # a=int( txt1.get())\n # b=int ( txt2.get())\n # c=a/b\n # result.configure(text=\"Division is\"+str(c))\n\n\ndef clr_scr():\n global var\n var=\"\"\n lbl1.config(text=var)\n return var\n # data.get()\n # data.delete(0,END)\n # b= txt2.get()\n # txt2.delete(0,END)\n # lbl1.configure(text=\"Result is\")\n#\ndef Ans():\n global var\n lbl1.config(text=\"Syntax Error\")\n a=eval(var)\n var=str(a)\n lbl1.config(text=var)\n return var\n # if operator == \"+\":\n # x=int(var2.split(\"+\")[1])\n # c=A+x\n # data.set(c)\n # var=str(c)\n #\n # elif operator == \"-\":\n # x = int(var2.split(\"-\")[1])\n # c = A - x\n # data.set(c)\n # var = str(c)\n # elif operator == \"*\":\n # x=int(var2.split(\"*\")[1])\n # c=A*x\n # data.set(c)\n # var=str(c)\n # elif operator == \"/\":\n # x=int((var2.split(\"/\")[1]))\n # if x==0:\n # messagebox.showerror(\"Error\",\"invalid\")\n # A=\"\"\n # var=\"\"\n # data.set(var)\n # else:\n # c=int(A/x)\n # data.set(c)\n # x=int(var2.split(\"+\")[1])\n # c=A*x\n # data.set(c)\n # var=str(c)\ndef button1():\n global var\n var=var + \"1\"\n lbl1.config(text=var)\n return var\n\ndef button2():\n global var\n var = var + \"2\"\n lbl1.config(text=var)\n return var\n\n\ndef button3():\n global var\n var = var + \"3\"\n lbl1.config(text=var)\n return var\n\ndef button4():\n global var\n var = var + \"4\"\n lbl1.config(text=var)\n return var\n\ndef button5():\n global var\n var = var + \"5\"\n lbl1.config(text=var)\n return var\n\ndef button6():\n global var\n var = var + \"6\"\n lbl1.config(text=var)\n return var\n\ndef button7():\n global var\n var = var + \"7\"\n lbl1.config(text=var)\n return var\n\ndef button8():\n global var\n var = var + \"8\"\n lbl1.config(text=var)\n return var\n\ndef button9():\n global var\n var = var + \"9\"\n lbl1.config(text=var)\n return var\n\ndef button0():\n global var\n var = var + \"0\"\n lbl1.config(text=var)\n return var\n\n\n\n\n\n\n# screen= Text(base,width=\"45\",height=\"5\")\n# screen.pack()\n# txt1=Entry(base,width=\"50\")\n# txt1.focus()\n# txt1.pack()\n# txt2= Entry(base,width=\"50\")\n# txt2.pack()\n\n\nadd= Button(base,text=\"+\",command=Perform_addition,font=f)\nadd.place(x=170,y=100)\n\n\nsub= Button(base,text=\" -\",command=Perform_substraction,font=f)\nsub.place(x=170,y=180)\n\n\nmul= Button(base,text=\"*\",command=Perform_multiplication,font=f)\nmul.place(x=170,y=260)\n\ndiv= Button(base,text=\"/ \",command=Perform_Divide,font=f)\ndiv.place(x=170,y=330)\n\n\nclr= Button(base,text=\"C\",command=clr_scr,font=f)\nclr.place(x=67,y=330)\n\n\nAns= Button(base,text=\"=\",command=Ans,font=f)\nAns.place(x=119,y=330)\n\n\n\nlbl1=Label(base,text=\"\",width=\"45\",height=\"3\",font= (\"Arial Black\",16),bg=\"white\")\nlbl1.pack()\n\n\nbt1=Button(base,text=\"1\",font=f,command=button1)\nbt1.place(x=10,y=100)\n\nbt2=Button(base,text=\"2\",font=f,command=button2)\nbt2.place(x=67,y=100)\n\nbt3=Button(base,text=\"3\",font=f,command=button3)\nbt3.place(x=119,y=100)\n\nbt4=Button(base,text=\"4\",font=f,command=button4)\nbt4.place(x=10,y=180)\n\nbt5=Button(base,text=\"5\",font=f,command=button5)\nbt5.place(x=67,y=180)\n\nbt6=Button(base,text=\"6\",font=f,command=button6)\nbt6.place(x=119,y=180)\n\nbt7=Button(base,text=\"7\",font=f,command=button7)\nbt7.place(x=10,y=260)\n\nbt8=Button(base,text=\"8\",font=f,command=button8)\nbt8.place(x=67,y=260)\n\nbt9=Button(base,text=\"9\",font=f,command=button9)\nbt9.place(x=119,y=260)\n\nbt0=Button(base,text=\"0\",font=f,command=button0)\nbt0.place(x=10,y=330)\n\nbt10=Button(base,text=\"DEL\",font=(\"Times new roman\",14),command=back)\nbt10.place(x=230,y=100)\n\nbase.mainloop()","repo_name":"SaurabhJagdambe/Calculator-Gui-","sub_path":"GUI_Calci.py","file_name":"GUI_Calci.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"39614437294","text":"import re\n\nfrom django import template\n\nregister = template.Library()\n\nhtml_tag = re.compile(r\"<.*?>\")\n\n\ndef format_highlight(highlighted_fragments):\n \"\"\"Strip HTML, then transform back into html with highlighted fragments only.\n\n :param highlighted_fragments: list of fragments from elasticsearch\n :type highlighted_fragments: list\n :return: the formatted string\n :rtype: str\n \"\"\"\n\n fragments = []\n for fragment in highlighted_fragments:\n if fragment:\n fragments.append(\n html_tag.sub(\"\", fragment).replace(\"[hl]\", '').replace(\"[/hl]\", \" \")\n )\n\n return \" … \".join(fragments)\n\n\nclass HighlightNode(template.Node):\n \"\"\"For a elasticsearch result, looks into ``.meta.highlight`` if something has been highlighted. If so, use that\n information. Otherwise, just give back the text.\n\n See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-highlighting.html\n\n Note that the class expects ``\"pre_tags\" : [\"[hl]\"], \"post_tags\" : [\"[/hl]\"]``, since all HTML is stripped.\n \"\"\"\n\n def __init__(self, search_result, field):\n self.search_result = search_result\n self.field = field\n\n def render(self, context):\n search_result = context[self.search_result]\n\n if self.field[0] in ['\"', \"'\"]:\n field = self.field[1:-1]\n else:\n field = template.Variable(self.field).resolve(context)\n\n if field not in search_result:\n raise template.VariableDoesNotExist(f\"field {field} is not a member of the search result\")\n\n text = \"\"\n\n if search_result[field]:\n text = html_tag.sub(\"\", search_result[field])\n\n if \"highlight\" in search_result.meta:\n if field in search_result.meta.highlight:\n text = format_highlight(search_result.meta.highlight[field])\n\n return text\n\n\n@register.tag\ndef highlight(parser, token):\n part = token.split_contents()\n\n if len(part) != 3:\n raise template.TemplateSyntaxError(\n \"'highlight' tag must be of the form: {% highlight %}\"\n )\n\n return HighlightNode(part[1], part[2])\n","repo_name":"zestedesavoir/zds-site","sub_path":"zds/utils/templatetags/elasticsearch.py","file_name":"elasticsearch.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"61"}
+{"seq_id":"27653814976","text":"class Binary:\n def __init__(self, arr):\n self.arr = arr\n def search(self, val, low, high):\n mid = round((low+high)/2)\n if low == high and self.arr[mid] != val:\n return -1\n if self.arr[mid] == val:\n return mid\n elif self.arr[mid] < val:\n return self.search(val, mid, high)\n elif self.arr[mid] > val:\n return self.search(val, low, mid)\n\nobj = Binary([4, 8, 12, 16, 20])\nsearchValue = 20\nresult = obj.search(searchValue, 0, len(obj.arr)-1)\nif result == -1:\n print(\"Element not found.\")\nelse:\n print(\"Element found at {} position.\".format(result+1))","repo_name":"mecharan14/Data-Structures","sub_path":"BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42149568487","text":"import argparse\nimport random\nimport urllib.request\nfrom lxml import html\n\n\n\nASCII_ART_HOME=\"https://asciiart.website/index.php\"\n\ndef get_ascii(art_path):\n url = '{0}?art={1}'.format(ASCII_ART_HOME, art_path)\n with urllib.request.urlopen(url) as response:\n tree = html.fromstring(response.read())\n asciis = tree.xpath('//pre[contains(@id, \"p\")]')\n ascii = random.choice(asciis)\n return ascii.text\n\ndef print_ascii(text, header = '', wrap_before = '', wrap_after = ''):\n print('{0}\\n{1}{2}{3}'.format(\n header, wrap_before, text, wrap_after\n ))\n\ndef run(args):\n text = get_ascii(args.artpath[0])\n print_ascii(\n text,\n args.header[0],\n args.before[0],\n args.after[0],\n )\n\n\n\ndef getargs():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--artpath', type=str, nargs=1, help='The ascii art path to use. For example, transportation/nautical')\n parser.add_argument('--header', type=str, nargs=1, help='Header to print on a single line before the ascii art. For example, Ship It!', default = [''])\n parser.add_argument('--before', type=str, nargs=1, help='Prefix ascii in this. For example, ```', default = [''])\n parser.add_argument('--after', type=str, nargs=1, help='Postfix ascii in this. For example, ```', default = [''])\n\n return parser.parse_args()\n\nif __name__ == '__main__':\n run(getargs())\n\n","repo_name":"nmiodice/asciiart-cli","sub_path":"ascii.py","file_name":"ascii.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24807117030","text":"from selenium import webdriver\nfrom chromedriver_autoinstaller import install\nimport time\n\nbrowser = webdriver.Chrome(install())\nbrowser.get(\"https://www.hanteochart.com/chart/world/weekly\")\ntime.sleep(2)\n\nfor i in range(4):\n see_more_btn = browser.find_element_by_css_selector(\"div.see-more-btn\")\n see_more_btn.click()\n time.sleep(0.3)\n\nitems = browser.find_elements_by_css_selector(\"div.chart-item-b.rank-data.single-col.single-stat.long\")\nnum = 1\nfor item in items:\n title = item.find_element_by_css_selector(\"div.center div.top\").text\n try:\n team = item.find_element_by_css_selector(\"pre.left\").text\n except:\n team = \"\"\n worldindex = item.find_element_by_css_selector(\"div.stat-container > div.left\").text\n print(f\"<{num}>\")\n print(f\"가수 : {title}\")\n print(f\"소속사 : {team}\")\n print(f\"wordIndex : {worldindex}\")\n print(\"-----------------------------------------\")\n num += 1","repo_name":"Oseojin/KoreaCodePair_Hackathon","sub_path":"10-웹크롤링-헌터차트.py","file_name":"10-웹크롤링-헌터차트.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1951042518","text":"import numpy as np\n\n# przykładowe\nNNZ = 10 # liczba niezerowych\nM = N = 5 # wymiary macierzy N x M\n\nA_OG = np.matrix ([[1, 0, 0],\n [0, 4, 1],\n [0, 0, 3],\n [0, 0, 2]])\n\n### Współrzedne\n\nA1 = [{'x': 0, 'y': 0, 'v': 0}]\nA2 = {(0, 0): 0}\nA3 = np.zeros((3, NNZ))\n# A3[0] - x\n# A3[1] - y\n# A3[2] - v\n\ndef to_coords(A) -> np.ndarray:\n A = np.asarray(A)\n N, M = A.shape\n A3 = []\n for y in range(M):\n for x in range(N):\n if A[x][y] !=0:\n A3.append([x, y, A[x][y]])\n A3 = np.asarray(A3)\n return A3\n\nA_C = to_coords(A_OG)\nprint(A_C)\n\n### CSC\nAV = np.zeros(NNZ) # wartości wpisywane kolumnami\nAI = np.zeros(NNZ) # numery wierszy odpowiadające wartościom w AV\nAJ = np.zeros(M) # w którym miejscu kończy się kolumna\n\ndef to_csc(A):\n A = np.asarray(A)\n N, M = A.shape\n\n AV = [] # wartości wpisywane kolumnami\n AI = [] # numery wierszy odpowiadające wartościom w AV\n AJ = np.zeros(M) # w którym miejscu kończy się kolumna\n i = 0\n\n for y in range(M):\n for x in range(N):\n if A[x][y] != 0:\n AV.append(A[x][y])\n AI.append(x)\n i += 1\n AJ[y] = i-1\n AV = np.asarray(AV)\n AI = np.asarray(AI)\n return (AV, AI, AJ)\n\nA_CSC = to_csc(A_OG)\nprint(A_CSC[0])\nprint(A_CSC[1])\nprint(A_CSC[2])\n\nclass SparseVector:\n def __init__(self, A=((),)):\n A = np.asarray(A)\n if len(A.shape) > 2:\n raise Exception(\"To many dimensions\")\n elif len(A.shape) == 2:\n if A.shape[0] != 1 and A.shape[1] != 1:\n raise Exception(\"To many dimensions\")\n else:\n A = A.flatten()\n\n self.v = []\n self.x = []\n for x in range(A.shape[0]):\n if A[x] != 0:\n self.x.append(x)\n self.v.append(A[x])\n\n def __len__(self):\n return len(self.v)\n\n def __iter__(self):\n for x in range(len(self)):\n yield (self.x[x], self.v[x])\n\n def __getitem__(self, item):\n return self.x[item], self.v[item]\n\n def get_value(self, key):\n for i, x in enumerate(self.x):\n if x == key:\n return self.v[i]\n raise KeyError(key)\n\n def __truediv__(self, other):\n nv = SparseVector()\n nv.v = [v/other for v in self.v]\n nv.x = [x for x in self.x]\n return nv\n\n def __mul__(self, other):\n nv = SparseVector()\n nv.v = [v * other for v in self.v]\n nv.x = [x for x in self.x]\n\n return nv\n\n def __add__(self, other):\n nv = SparseVector()\n for x1 in range(len(self)):\n found = False\n for x2 in range(len(other)):\n if self.x[x1] == other.x[x2]:\n nv.v.append(self.v[x1] + other.v[x2])\n nv.x.append(self.x[x1])\n found = True\n break\n if not found:\n nv.v.append(self.v[x1])\n nv.x.append(self.x[x1])\n for i, x in enumerate(other.x):\n if x not in nv.x:\n nv.v.append(other.v[i])\n nv.x.append(x)\n return nv\n\n def dot(self, other):\n nv = SparseVector()\n for x1 in range(len(self)):\n for x2 in range(len(other)):\n if self.x[x1] == other.x[x2]:\n nv.v.append(self.v[x1] * other.v[x2])\n nv.x.append(self.x[x1])\n break\n return nv\n\nclass SparseMatrix:\n def __init__(self, A=((),)):\n A = np.asarray(A)\n self.shape = A.shape\n if len(A.shape) > 2:\n raise Exception(\"To many dimensions\")\n\n self.v = []\n self.x = []\n self.y = []\n for x in range(A.shape[0]):\n for y in range(A.shape[1]):\n if A[x][y] != 0:\n self.x.append(x)\n self.v.append(A[x][y])\n self.y.append(y)\n\n def __len__(self):\n return len(self.v)\n\n def __iter__(self):\n for x in range(len(self)):\n yield self.x[x], self.y[x], self.v[x]\n\n def __getitem__(self, item):\n return self.x[item], self.y[item], self.v[item]\n\n def get_value(self, key):\n for i in range(len(self.x)):\n if self.x[i] == key[0] and self.y[i] == key[1]:\n return self.v[i]\n raise KeyError(key)\n\n def __truediv__(self, other):\n nv = SparseMatrix()\n nv.v = [v/other for v in self.v]\n nv.x = [x for x in self.x]\n nv.y = [y for y in self.y]\n return nv\n\n def __mul__(self, other):\n nv = SparseMatrix()\n nv.v = [v * other for v in self.v]\n nv.x = [x for x in self.x]\n nv.y = [y for y in self.y]\n return nv\n\n def __add__(self, other):\n if self.shape != other.shape:\n raise Exception(\"Wrong matrix dimensions\")\n nv = SparseMatrix()\n nv.shape = self.shape\n for x1 in range(len(self)):\n found = False\n for x2 in range(len(other)):\n if self.x[x1] == other.x[x2] and self.y[x1] == other.y[x2]:\n nv.v.append(self.v[x1] + other.v[x2])\n nv.x.append(self.x[x1])\n nv.y.append(self.y[x1])\n found = True\n break\n if not found:\n nv.v.append(self.v[x1])\n nv.x.append(self.x[x1])\n nv.y.append(self.y[x1])\n\n for i, x in enumerate(other.x):\n exist = False\n for j in range(len(self)):\n\n if x == self.x[j] and other.y[i] == self.y[j]:\n exist = True\n break\n if not exist:\n nv.v.append(other.v[i])\n nv.x.append(x)\n nv.y.append(other.y[i])\n return nv\n\n def dot(self, other):\n if self.shape[1] != other.shape[0]:\n raise Exception(\"Wrong matrix dimensions\")\n\n nv = SparseMatrix()\n nv.shape = (self.shape[1], other.shape[0])\n for k1, v1 in enumerate(self.v):\n for k2, v2 in enumerate(other.v):\n if self.y[k1] == other.x[k2]:\n exist = False\n for i in range(len(nv)):\n if nv.x[i] == self.x[k1] and nv.y[i] == other.y[k2]:\n exist = True\n nv.v[i] += v1 * v2\n break\n if not exist:\n nv.v.append(v1 * v2)\n nv.x.append(self.x[k1])\n nv.y.append(other.y[k2])\n\n return nv\n\n def __str__(self):\n res = \"\"\n for i in range(len(self)):\n res += str((self.x[i], self.y[i], self.v[i])) + '\\n'\n return res\n\nA = np.matrix([[1], [2], [3]])\nprint(A)\nA = np.asarray(A)\nA = A.flatten()\nprint(A)\n\nSM = SparseMatrix(A_OG)\nprint(SM)\n\nA_OG2 = np.matrix ([[1, 0, 0],\n [0, 4, 1],\n [0, 0, 3],\n [0, 10, 2]])\nA_OG3 = np.matrix ([[1, 0, 0],\n [0, 4, 1],\n [0, 10, 2]])\n\nSM3 = SparseMatrix(A_OG3)\nSM2 = SparseMatrix(A_OG2)\nSM_Add = SM + SM2\nprint(SM_Add)\nSM_Dot = SM.dot(SM3)\nprint(SM_Dot)","repo_name":"Roshoy/Mownit","sub_path":"Iteracyjne/rzedkie_macierze.py","file_name":"rzedkie_macierze.py","file_ext":"py","file_size_in_byte":7385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"34945702821","text":"\nfrom typing import List, Sequence, Union, Optional\n\nimport pytest\nimport ulist as ul\nfrom ulist.utils import check_test_result\n\n\n@pytest.mark.parametrize(\n \"test_method, nums, expected_value\",\n [\n ('__repr__', [1, 2], 'IndexList([1, 2])'),\n ('__repr__', range(0, 100), 'IndexList([0, 1, 2, ..., 97, 98, 99])'),\n\n ('__str__', [1, 2], '[1, 2]'),\n ('__str__', range(0, 100), '[0, 1, 2, ..., 97, 98, 99]'),\n\n ('back', [1, 2, 5], 5),\n\n ('to_list', [1, 2], [1, 2]),\n ],\n)\ndef test_methods(\n test_method: str,\n nums: Sequence[int],\n expected_value: Union[Optional[int], List[Optional[int]]],\n) -> None:\n dtype = 'IndexList'\n arr = ul.IndexList(nums)\n result = getattr(arr, test_method)()\n if hasattr(result, 'to_list'):\n result = result.to_list()\n check_test_result(dtype, test_method, result, expected_value)\n","repo_name":"Rust-Data-Science/ulist","sub_path":"tests/test_index.py","file_name":"test_index.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"}
+{"seq_id":"39182095048","text":"\"\"\"Given a list of numbers, return True if first and \nlast number of a list is same\"\"\"\n\nn = int(input(\"number of elements in a list= \"))\nl = []\nfor i in range(0,n):\n x = int(input(\"Enter elements : \"))\n l.append(x)\nif l[0]==l[-1]:\n print(\"True\")\nelse:\n print(\"False\")","repo_name":"prasoonsoni/Python-Questions-1st-Semester","sub_path":"EXERCISES/Given a list of numbers, return True if first and last number of a list is same.py","file_name":"Given a list of numbers, return True if first and last number of a list is same.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"}
+{"seq_id":"24752397050","text":"import random\nfrom player_base import PlayerBase\n\n\nclass Player(PlayerBase):\n \"\"\"\n A model for stock player in the market.\n \"\"\"\n\n #############################\n # Overrided Object property #\n #############################\n\n @property\n def target_sell_price(self):\n \"\"\"\n Return the target price for sell.\n Returning a None value means no price adjustment in next period.\n \"\"\"\n return super(Player, self).target_sell_price\n\n @property\n def target_buy_price(self):\n \"\"\"\n Return the target price for buy.\n Returning a None value means no price adjustment in next period.\n \"\"\"\n if self.period_tick == 0:\n return random.randint(1, 10)\n elif self.period_tick % self.perseverance == 0:\n # Player runs out of patience and decides to change target price.\n (avg_price,\n max_price,\n min_price) = self.market.get_stock_price_last_period()\n\n power = self.period_tick / self.perseverance\n target_price = min(min_price + power, self.money_balance * 0.5)\n return target_price\n else:\n return None\n","repo_name":"DON1101/Simulators","sub_path":"Stock/player/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28896142992","text":"import json \nimport pandas as pd\nimport os\n\n# 读取 json 返回 df\ndef load_json(filepath):\n with open(filepath) as json_file:\n data = json.load(json_file)\n\n words_result = data['words_result']\n\n list = []\n for word in words_result:\n list.append([word['words'], \n word['location']['top'],\n word['location']['left'],\n word['location']['width'],\n word['location']['height'],\n ])\n\n df = pd.DataFrame(list, columns=['words', 'top', 'left', 'width', 'height'])\n return df\n df.to_csv(os.path.splitext(filepath)[0] + '.csv', index=False)","repo_name":"zman2013/extract_table_by_ocr","sub_path":"src/load_json.py","file_name":"load_json.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7250845421","text":"import sys\nfrom colorama import Fore,Style\nimport ai\nfrom utils import get_coord_count\nimport minimax\n\nAVAILABLE_GAME_MODES = {\n \"RANDOM_CHOICE_AI\": \"RANDOM_CHOICE_AI\",\n \"WINNING_MOVE_AI\": \"WINNING_MOVE_AI\",\n \"BLOCK_WINNING_MOVE_AI\": \"BLOCK_WINNING_MOVE_AI\",\n \"MINIMAX_AI\": \"MINIMAX_AI\",\n \"TWO_PLAYER\": \"TWO_PLAYER\"\n}\n\nallowed_states = [\n \"X\",\n \"O\"\n]\n\ndef new_board():\n #return a new board state\n return [[None for i in range(3)] for i in range(3)]\n\ndef render(board):\n display_states = {\n None: \" \",\n \"X\": Fore.GREEN + \"X\",\n \"O\": Fore.YELLOW + \"O\"\n }\n\n lines = []\n print(Fore.LIGHTWHITE_EX + \" 0 1 2 \")\n print(Fore.LIGHTWHITE_EX + \" ------- \")\n for k,row in enumerate(board):\n line = Fore.LIGHTWHITE_EX + str(k)+\"|\"\n for col in row:\n line += \" \" + display_states[col]\n line += Fore.LIGHTWHITE_EX + \" |\"\n lines.append(line)\n print(\"\\n\".join(lines))\n print(Fore.LIGHTWHITE_EX + \" ------- \")\n print(Style.RESET_ALL)\n\n\ndef get_human_moves():\n x_coord = input(\"What is your move's X co-ordinate?: \")\n y_coord = input(\"What is your move's Y co-ordinate?: \")\n\n if int(x_coord) < 0 or int(x_coord) > 2 or int(y_coord) < 0 or int(y_coord) > 2:\n print(\"Error: Invalid input! Co-ordinates must have a value between 0 and 2.\")\n sys.exit(0)\n\n return (x_coord, y_coord)\n\ndef get_move(board, mode, player_id):\n\n if player_id % 2 != 0:\n if mode == AVAILABLE_GAME_MODES[\"RANDOM_CHOICE_AI\"]:\n return ai.random_move(board)\n elif mode == AVAILABLE_GAME_MODES[\"WINNING_MOVE_AI\"]:\n return ai.finds_winning_moves_ai(board, allowed_states[player_id % 2])\n elif mode == AVAILABLE_GAME_MODES[\"BLOCK_WINNING_MOVE_AI\"]:\n my_move = allowed_states[player_id % 2]\n block_move = allowed_states[0]\n \n if my_move == allowed_states[0]:\n block_move = allowed_states[1]\n \n return ai.blocks_winning_moves_ai(board, my_move, block_move)\n elif mode == AVAILABLE_GAME_MODES[\"MINIMAX_AI\"]:\n return minimax.minimax_ai(board, allowed_states[player_id % 2])\n elif mode == AVAILABLE_GAME_MODES[\"TWO_PLAYER\"] or player_id % 2 == 0:\n return get_human_moves()\n\n\ndef is_valid_move(board, coordinates):\n x_coord = int(coordinates[0])\n y_coord = int(coordinates[1])\n \n if board[x_coord][y_coord] is not None:\n print(\"Error! Can't make move ({},{}), square already taken!\".format(coordinates[0], coordinates[1]))\n sys.exit(0)\n\n return True\n\ndef make_move(board, coordinates, user_move):\n\n is_valid_move(board ,coordinates)\n\n x_coord = int(coordinates[0])\n y_coord = int(coordinates[1])\n\n board[x_coord][y_coord] = user_move\n\n\ndef has_winner(board):\n grouped_coords = get_coord_count()\n\n for line in grouped_coords:\n board_values = []\n for (x,y) in line:\n board_values.append(board[x][y])\n if len(set(board_values)) == 1 and board_values[0] is not None:\n return board_values[0]\n\n return None\n\n\ndef is_board_full(board):\n for row in board:\n for col in row:\n if col is None:\n return False\n \n return True\n\ndef select_mode():\n user_inp = input(\"Enter a choice: \\n0 for Random choice AI,\\n1 for Winning Moves AI,\\n2 for Block winning moves AI,\\n3 for minimax AI,\\nAny other number for Two player mode: \\n\")\n\n if int(user_inp) > 3:\n print(\"Invalid input! Please enter a valid input!\")\n sys.exit(0)\n if int(user_inp) == 0:\n return AVAILABLE_GAME_MODES[\"RANDOM_CHOICE_AI\"]\n elif int(user_inp) == 1:\n return AVAILABLE_GAME_MODES[\"WINNING_MOVE_AI\"]\n elif int(user_inp) == 2:\n return AVAILABLE_GAME_MODES[\"BLOCK_WINNING_MOVE_AI\"]\n elif int(user_inp) == 3:\n return AVAILABLE_GAME_MODES[\"MINIMAX_AI\"]\n else:\n return AVAILABLE_GAME_MODES[\"TWO_PLAYER\"]\n\n\ndef main():\n\n selected_mode = select_mode()\n user_0 = input(\"Enter Player 1's name: \")\n user_1 = \"Zelda\"\n\n print(selected_mode)\n\n if selected_mode == AVAILABLE_GAME_MODES[\"TWO_PLAYER\"]:\n user_1 = input(\"Enter Player 2's name: \")\n\n print(\"Player 1 will use X\")\n print(\"Player 2 will use O\")\n\n user_player_map = {\n \"X\": user_0,\n \"O\": user_1\n }\n\n player_id = 0\n\n board = new_board()\n # board = [\n # [\"X\", \"O\", \"X\"],\n # [\"O\", None, None],\n # [\"O\", \"X\", None]\n # ]\n\n move_coords = None\n\n while True:\n\n render(board)\n user_move = allowed_states[player_id % 2]\n\n move_coords = get_move(board, selected_mode, player_id)\n\n make_move(board, move_coords, user_move)\n\n winner = has_winner(board)\n if winner:\n render(board)\n print(Fore.GREEN + \"The WINNER is {}!\".format(user_player_map[winner]))\n break\n\n if is_board_full(board):\n render(board)\n print(Fore.MAGENTA + \"It is a DRAW!\")\n break\n\n player_id += 1\n \n print(Style.RESET_ALL)\n\n\nif __name__ == \"__main__\":\n \n main()","repo_name":"aki21j/tic-tac-toe","sub_path":"bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43047178450","text":"from datetime import datetime, timedelta\nfrom typing import Any, Dict\n\nfrom models import Event\n\nfrom core.config import COLORS\n\n\ndef ceil_dt(dt: datetime, delta: timedelta) -> str:\n \"\"\"round up function\"\"\"\n return (dt + (datetime.min - dt) % delta).strftime(\"%H:%M\")\n\ndef set_style_button(state: str, x: int, y: int, offset: int) -> str:\n \"\"\"function sets styles for buttons\"\"\"\n return f\"\"\"\n div:nth-child({offset}) > div:nth-child({y}) > div:nth-child(1) > div > div:nth-child({x}) > div > button {{\n border-color: {COLORS[state]};\n border-width: 2px;\n\n width:100px;\n border-radius: 10px;\n height:2em;\n color:#fffffff;\n }}\n div:nth-child({offset}) > div:nth-child({y}) > div:nth-child(1) > div > div:nth-child({x}) > div > button:hover {{\n background-color: #ff4c4c;\n border-width: 2px;\n width:100px;\n border-radius: 10px;\n height:2em;\n color:#ffffff;\n }} \n \"\"\"\n\n\ndef calculate_index(start: datetime, end: datetime) -> float:\n \"\"\"Function counts the number of time slots between two dates\"\"\"\n return (end - start) / timedelta(minutes=30)\n\n\ndef to_readable_format(date: datetime) -> str:\n \"\"\"the function converts the date to a human-readable format\"\"\"\n return date.strftime(r\"%d.%m.%Y %H:%M\")\n\n\ndef format_events(event: Event) -> str:\n \"\"\"event formatting function\"\"\"\n return f\"{to_readable_format(event.start_date)} — {to_readable_format(event.end_date)} Комната: {event.room} место: {event.place}\"\n\n\ndef format_dict_events(event: Dict[str, Any]) -> str:\n \"\"\"event formatting function\"\"\"\n return f\"{to_readable_format(event.get('start_date'))} — {to_readable_format(event.get('end_date'))} Комната: {event.get('room')} место: {event.get('place')}\"\n","repo_name":"selezGit/novatickets","sub_path":"src/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24459848790","text":"import bitarray\n\nspecial_round = [0, 1, 8, 15] # define special rounds where the block is shift one position on the left\n\npc1 = [57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36,\n 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4]\n\npc2 = [14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47,\n 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32] #hard code the compression p-box\n\n\ndef round_key_generator(cipherkey_64bit):\n cipherkey_56bit = parity_drop(cipherkey_64bit) # pass the cipherkey through a parity drop function\n left = cipherkey_56bit[:28] # divide the key into two\n right = cipherkey_56bit[28:]\n\n rkey = []\n # rkey = bitarray.bitarray(rkey)\n\n for i in range(16):\n new_left = \"\" # new left is the left block to be used for the next round\n new_right = \"\" # new right is the right block to be used for the next round\n\n if i in special_round:\n new_left = shift_one(left) # since it is a special round, shift left by one position\n new_right = shift_one(right)\n else:\n new_left = shift_two(left) # since it is a special round, shift left by two positions\n new_right = shift_two(right)\n\n new_56bit_key = new_left + new_right\n rkey.append(compression_pbox(new_56bit_key))\n left = new_left\n right = new_right\n\n return rkey\n\n\ndef shift_one(input_28bit):\n output_28bit = input_28bit[1:] + input_28bit[:1]\n return output_28bit\n\n\ndef shift_two(input_28bit):\n output_28bit = input_28bit[2:] + input_28bit[:2]\n return output_28bit\n\n\ndef parity_drop(input_64bit):\n input_64bit = bitarray.bitarray(input_64bit)\n output_56bit = []\n output_56bit = bitarray.bitarray(output_56bit)\n\n for i in pc1:\n output_56bit.append(input_64bit[i - 1])\n\n return output_56bit\n\n\ndef compression_pbox(input_56bit):\n output_48bit = []\n output_48bit = bitarray.bitarray(output_48bit)\n\n for i in pc2:\n output_48bit.append(input_56bit[i - 1]) # perform the permutation\n\n return output_48bit\n\n","repo_name":"dhruvparekh01/PySafe","sub_path":"key_scheduler.py","file_name":"key_scheduler.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}
+{"seq_id":"39510307329","text":"import MetaTrader5 as mt5\nfrom datetime import datetime\nimport pandas as pd\n\npd.set_option('display.max_columns', 500) # number of columns to be displayed\npd.set_option('display.width', 1500) # max table width to display\n# 显示有关MetaTrader 5程序包的数据\nprint(\"MetaTrader5 package author: \", mt5.__author__)\nprint(\"MetaTrader5 package version: \", mt5.__version__)\nprint()\n# 建立与MetaTrader 5程序端的连接\nif not mt5.initialize():\n print(\"initialize() failed, error code =\", mt5.last_error())\n quit()\n\n# 获取历史中的交易数量\nfrom_date = datetime(2020, 1, 1)\nto_date = datetime.now()\n# 获取指定时间间隔内且名称包含\"GBP\"的交易品种的交易\ndeals = mt5.history_deals_get(from_date, to_date, group=\"*GBP*\")\nif deals == None:\n print(\"No deals with group=\\\"*USD*\\\", error code={}\".format(mt5.last_error()))\nelif len(deals) > 0:\n print(\"history_deals_get({}, {}, group=\\\"*GBP*\\\")={}\".format(from_date, to_date, len(deals)))\n\n# 获取名称中既不包含\"EUR\"也不包含\"GBP\"的交易品种的交易\ndeals = mt5.history_deals_get(from_date, to_date, group=\"*,!*EUR*,!*GBP*\")\nif deals == None:\n print(\"No deals, error code={}\".format(mt5.last_error()))\nelif len(deals) > 0:\n print(\"history_deals_get(from_date, to_date, group=\\\"*,!*EUR*,!*GBP*\\\") =\", len(deals))\n # display all obtained deals 'as is'\n for deal in deals:\n print(\" \", deal)\n print()\n # display these deals as a table using pandas.DataFrame\n df = pd.DataFrame(list(deals), columns=deals[0]._asdict().keys())\n df['time'] = pd.to_datetime(df['time'], unit='s')\n print(df)\nprint(\"\")\n\n# 获取#530218319持仓相关的所有交易\nposition_id = 530218319\nposition_deals = mt5.history_deals_get(position=position_id)\nif position_deals == None:\n print(\"No deals with position #{}\".format(position_id))\n print(\"error code =\", mt5.last_error())\nelif len(position_deals) > 0:\n print(\"Deals with position id #{}: {}\".format(position_id, len(position_deals)))\n # display these deals as a table using pandas.DataFrame\n df = pd.DataFrame(list(position_deals), columns=position_deals[0]._asdict().keys())\n df['time'] = pd.to_datetime(df['time'], unit='s')\n print(df)\n\n# 断开与MetaTrader 5程序端的连接\nmt5.shutdown()","repo_name":"ThrallOtaku/stockAnalysis","sub_path":"MT5_demo/demos/history_deals_get.py","file_name":"history_deals_get.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"32571796648","text":"\n\nmy_file = open(\"fruits.txt\", \"r\") # open file in read mode\n\n# print(my_file.read()) # read the file\ncontent = my_file.read()\nprint(content)\n\n# move the cursor to the beginning of the file\nmy_file.seek(0)\n\nmy_file.close() # close the file\n\n\nwith open (\"fruits.txt\", \"r\") as my_file:\n content = my_file.read()\n \nprint(content)","repo_name":"mayfiete/python_mega_course","sub_path":"reading_text_from_file.py","file_name":"reading_text_from_file.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"98867226","text":"parties = []\nflag = True\nwith open('input.txt', 'r', encoding='utf-8') as in_file:\n for line in in_file:\n line = line.strip()\n if line == 'VOTES:':\n flag = False\n votes = [0]*len(parties)\n elif flag:\n if line == 'PARTIES:':\n continue\n parties.append(line)\n else:\n index = parties.index(line)\n votes[index] += 1\nresult = []\nc = len(parties)\nfor i in range(c):\n result.append((-votes[i], parties[i]))\nresult.sort()\nfor i in range(c):\n print(result[i][1])\n","repo_name":"virdgin/Lean-and-Traning","sub_path":"couersera/python/week 6/sort_win.py","file_name":"sort_win.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19052391960","text":"#coding:utf-8\n#Write a Python program to parse a given CSV string and get the list of lists of string values. Use csv.reader\nimport csv\ncsv_string = \"\"\"1,2,3\n4,5,6\n7,8,9\n\"\"\"\nprint(csv_string)\nlines = csv_string.splitlines()\nprint(lines)\nreader = csv.reader(lines)\nparsed_csv = list(reader)\nprint(parsed_csv)","repo_name":"DonaFidele/PythonExercices","sub_path":"Modules/exo_23.py","file_name":"exo_23.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}