diff --git "a/244.jsonl" "b/244.jsonl" new file mode 100644--- /dev/null +++ "b/244.jsonl" @@ -0,0 +1,442 @@ +{"seq_id":"41597244920","text":"_ts_filetype = FileType([\".ts\", \".js\"])\n_json_filetype = FileType([\".json\"])\n\nTSCONFIG = \"\"\"\n{\n \"compilerOptions\": {\n \"experimentalDecorators\": true,\n \"module\": \"commonjs\",\n \"declaration\": true,\n \"noImplicitAny\": true,\n \"noEmitOnError\": true,\n \"target\": \"es5\",\n \"lib\": [\"es5\", \"es6\", \"es2015.collection\", \"es2015.iterable\", \"dom\"],\n \"jsx\": \"react\",\n \"types\": [\"node\", \"mocha\"],\n \"strictNullChecks\": true\n },\n \"files\": [\n {files}\n ]\n}\n\"\"\"\n\ndef ts_library_impl(ctx):\n node = ctx.executable._node\n tsc = ctx.file._tsc\n tsickle = ctx.file._tsickle\n compiler = tsickle if ctx.attr.sick else tsc\n tsconfig = ctx.file.tsconfig\n\n srcs = ctx.files.srcs\n data = []\n\n for d in ctx.attr.data:\n for file in d.files:\n data.append(file)\n\n for dep in ctx.attr.deps:\n lib = dep.ts_library\n srcs += lib.srcs\n\n srcs_js = [ctx.new_file(\"%s.js\" % src.basename.rsplit('.', 1)[0]) for src in srcs]\n\n args = [\n node.path,\n compiler.path,\n \"--outdir\", \"%s/%s\" % (ctx.bin_dir.path, ctx.label.package)\n ]\n\n # if ctx.attr.sick:\n # args += [\"--externs=\" + extfile.path]\n # args += [\"--\"]\n\n #args += [\"--module\", \"amd\"]\n\n if (srcs):\n for file in srcs:\n args.append(file.path)\n else:\n args += [\"-p\", tsconfig.dirname]\n\n print(\"args: %r\" % args)\n\n ctx.action(\n mnemonic = \"TypesciptCompile\",\n inputs = [node, compiler, tsc, tsconfig] + srcs,\n outputs = srcs_js,\n command = \" \".join(args),\n env = {\n \"NODE_PATH\": tsc.dirname + \"/..\",\n },\n )\n\n return struct(\n files = set(srcs_js),\n ts_library = struct(\n srcs = srcs,\n ),\n )\n\nts_library = rule(\n ts_library_impl,\n attrs = {\n \"srcs\": attr.label_list(\n allow_files = _ts_filetype,\n ),\n \"data\": attr.label_list(\n allow_files = True,\n cfg = \"data\",\n ),\n \"deps\": attr.label_list(\n providers = [\"ts_library\"],\n ),\n \"tsconfig\": attr.label(\n single_file = True,\n allow_files = _json_filetype,\n default = Label(\"//ts:tsconfig.json\"),\n ),\n \"sick\": attr.bool(\n default = False,\n ),\n \"_node\": attr.label(\n default = Label(\"@org_pubref_rules_node_toolchain//:node_tool\"),\n single_file = True,\n allow_files = True,\n executable = True,\n cfg = \"host\",\n ),\n \"_tsc\": attr.label(\n default = Label(\"@typescript//:bin/tsc\"),\n single_file = True,\n allow_files = True,\n cfg = \"host\",\n ),\n \"_tsickle\": attr.label(\n default = Label(\"@typescript//:bin/tsickle\"),\n single_file = True,\n allow_files = True,\n cfg = \"host\",\n ),\n },\n)\n","repo_name":"pubref/rules_typescript","sub_path":"ts/internal/ts_library.bzl","file_name":"ts_library.bzl","file_ext":"bzl","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"71495018833","text":"\nimport numpy as np\nimport math\n\n\n\ndef longitudinal_evolve(turns, phi_list_ini, dE_list_ini, sin_phi_s=0, E0_ini=100e9, mass=938e6, e_volt=5e6, alphac=0.002, harm=360.0, update_eta=True, energy_change=False, gamma_jump=[0, 0, 0.0], phase_jump=[-1,0.0]):\n '''\n The function of longitudinal map.\n \n Parameters:\n turns: number of turns used in the simulation\n phi_list_ini: list of values for initial phase\n dE_list_ini: list of values for initial energy deviation\n sin_phi_s: sine value of the phi_s, default 0\n E0_ini: Initial energy at tune zero, default 100e9 eV\n mass: Rest energy of the particle, default 938e6 eV\n e_volt: The voltage of the cavity including transit time factor, default 5e6 V\n alphac: \\alpha_c of the ring, default 0.002\n harm: harmonic of the ring, default 1\n update_eta: Always update phase slip factor due to energy change, default True\n energy_change: The beam get acceleration/ deceleration due to non zero sin_phi_s, default False\n \n return: the tuple of stacked numpy arrays: (phi, de, delta)\n '''\n E0 = E0_ini\n p0_ini=np.sqrt(E0_ini*E0_ini-mass*mass)\n p0=p0_ini\n gamma0 = E0 / mass\n beta0 = math.sqrt(1 - 1.0 / gamma0 / gamma0)\n phi_list=[]\n phi_list.append(phi_list_ini)\n\n dE_list=[]\n dE_list.append(dE_list_ini)\n\n delta_list=[]\n e_temp=np.array(dE_list_ini)+E0\n\n dl_ini= np.sqrt(e_temp*e_temp-mass*mass)/p0_ini-1.0\n delta_list.append(dl_ini)\n\n #nus = math.sqrt(harm * abs(ita) * e_volt / 2 / np.pi / E0_ini / beta / beta)\n\n for ii in range(turns):\n #yield (phi_list, delta_list)\n pl=phi_list[-1]*1.0\n dEl=dE_list[-1]*1.0\n dEl += e_volt * (np.sin(pl) - sin_phi_s )\n\n if energy_change:\n E0 += e_volt * sin_phi_s\n p0 = np.sqrt(E0 * E0 - mass * mass)\n \n dl = np.sqrt((E0 + dEl) * (E0 + dEl) - mass * mass) / p0 - 1\n gamma0 = E0 / mass\n beta0 = math.sqrt(1 - 1.0 / gamma0 / gamma0)\n\n if update_eta:\n delta_gamma = dEl / mass\n else:\n delta_gamma =0\n \n \n eta = alphac - 1.0/(gamma0+delta_gamma)/(gamma0+delta_gamma)\n\n if ii > gamma_jump[0] and ii < gamma_jump[1]:\n eta += gamma_jump[2]\n \n \n\n pl += 2.0 * np.pi * harm * eta * dl\n \n if ii == phase_jump[0]:\n pl += phase_jump[1]\n phi_list.append(pl)\n dE_list.append(dEl)\n delta_list.append(dl)\n\n return np.vstack(phi_list), np.vstack(dE_list), np.vstack(delta_list)\n\n\n'''\n\nE00=100e9 \nmass=938e6\ngamma=E00/mass\nbeta=pylab.sqrt(1-1.0/gamma/gamma)\n\neV=5e6\nalphac=0.002\ngammat=pylab.sqrt(1/alphac)\n\nh=360\nita=alphac-1/(E00*E00/mass/mass)\nita0=ita\nprint(ita)\nnus=pylab.sqrt(h*abs(ita)*eV/2/pylab.pi/E00/beta/beta)\nprint(nus)\n\n\nturns=5000\n#inideltas=[0.001, 0.003, 0.008, 0.013, 0.017, 0.0175]\n#iniphis=[phi_s, phi_s, phi_s, phi_s, phi_s, phi_s]\ninideltas=[0.003]\niniphis=[phi_s, phi_s, phi_s,-pylab.pi*0.5,pylab.pi-phi_s]\n#iniphis=[phi_s, phi_s, phi_s,pylab.pi*1.5,pylab.pi-phi_s]\nfor id in range(len(inideltas)):\n ini_delta=inideltas[id]\n ini_phi=iniphis[id]\n deltalist=[ini_delta,]\n philist=[ini_phi,]\n plotlist=[0,]\n E0=E00\n mass=938e6\n gamma=E0/mass\n beta=pylab.sqrt(1-1.0/gamma/gamma)\n beta0=beta\n eV=5e6\n relative_omega=1\n alphac=0.002\n gammat=pylab.sqrt(1/alphac)\n\n h=360\n ita=alphac-1/(E0*E0/mass/mass)\n for i in range(turns):\n dE0=deltalist[-1]*E0*beta*beta\n #dE0=deltalist[-1]\n phi0=philist[-1]\n \n dE1=dE0+eV*(pylab.sin(phi0)-pylab.sin(phi_s))\n oldE0=E0\n E0+=eV*pylab.sin(phi_s)\n \n delta_omega=-ita*relative_omega*(E0-oldE0)/oldE0/beta/beta\n relative_omega+=delta_omega\n gamma=E0/mass\n beta=pylab.sqrt(1-1.0/gamma/gamma)\n delta1=dE1/(E0*beta*beta)\n \n ita=alphac-1/gamma/gamma\n phi1=phi0+2*pylab.pi*h*ita*delta1\n plotlist.append(dE1/relative_omega)\n deltalist.append(delta1)\n philist.append(phi1)\n\n pylab.plot(philist[1:],plotlist[1:])\n pylab.plot(philist[0:1],deltalist[0:1],'r+')\n\n\npylab.xlabel(\"phase\")\npylab.ylabel(\"energy deviation\")\npylab.title('Turn {}'.format(turns))\n#pylab.xlim([-pylab.pi/2,1.5*pylab.pi])\n#pylab.ylim([-0.02,0.02])\npylab.show()\n \n'''\n","repo_name":"yuehao/USPAS_AP_ComputerLab","sub_path":"longitudinal.py","file_name":"longitudinal.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"27531103029","text":"from rest_framework import serializers\nfrom core.models import Post\n\n\nclass PostSerializer(serializers.ModelSerializer):\n\n user = serializers.ReadOnlyField(source=\"user.email\")\n\n class Meta:\n model = Post\n fields = (\n \"id\",\n \"text\",\n \"user\",\n \"created\",\n \"likes_count\",\n )\n read_only_fields = (\"id\",)\n","repo_name":"tuipik/starnavi","sub_path":"app/post/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"422477552","text":"import sys\nfrom bisect import bisect_left, bisect_right\n\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**7)\n\nINF = 1<<60\nMOD = 10**9+7\n#MOD = 998244353\n\n\ndef main():\n n, m, q = map(int, input().split())\n nimotu = []\n for _ in range(n):\n w, v = map(int, input().split())\n nimotu.append((v, w))\n nimotu.sort(reverse=True)\n\n X = list(map(int, input().split()))\n\n\n\n for _ in range(q):\n l, r = map(int, input().split())\n l -= 1\n box = X[:l] + X[r:]\n if len(box) == 0:\n print(0)\n continue\n\n box.sort()\n seen_box = [False]*len(box)\n ans = 0\n for v, w in nimotu:\n flag = False\n for i in range(len(box)):\n if flag:\n break\n if box[i] >= w:\n if not seen_box[i]:\n seen_box[i] = True\n ans += v\n flag = True\n else:\n continue\n print(ans)\n\n\n \nif __name__ == '__main__':\n main()","repo_name":"isseii10/atcoder","sub_path":"abc/195/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"17972349503","text":"from rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom profileApp.models import Industry\nfrom profileApp.serializers import IndustrySerializer\n\n\n@api_view(['GET', 'POST'])\ndef industry_without_id(request):\n \"\"\"\n Retrieve all industries or create new one\n \"\"\"\n if request.method == 'GET': # profileApp requesting data\n industry = Industry.objects.all()\n serializer = IndustrySerializer(industry, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST': # profileApp creating data\n serializer = IndustrySerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef industry_with_id(request, pk):\n \"\"\"\n Retrieve, update or delete a profileApp by id.\n \"\"\"\n try:\n industry = Industry.objects.get(pk=pk)\n except Industry.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = IndustrySerializer(industry)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = IndustrySerializer(industry, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n industry.delete()\n return Response(status=status.HTTP_200_OK)\n\n\n","repo_name":"dhritix1999/Covid19-Vaccine-Booking","sub_path":"Project/profileApp/api/industryApi.py","file_name":"industryApi.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"40661493373","text":"import optparse\nimport os\nimport sys\n\nfrom automaton.converters import pydot\n\nfrom ironic.common import states\n\ntop_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),\n os.pardir))\nsys.path.insert(0, top_dir)\n\n\ndef print_header(text):\n print(\"*\" * len(text))\n print(text)\n print(\"*\" * len(text))\n\n\ndef map_color(text, key='fontcolor'):\n \"\"\"Map the text to a color.\n\n The text is mapped to a color.\n\n :param text: string of text to be mapped to a color. 'error' and\n 'fail' in the text will map to 'red'.\n :param key: in returned dictionary, the key to use that corresponds to\n the color\n :returns: A dictionary with one entry, key = color. If no color is\n associated with the text, an empty dictionary.\n \"\"\"\n\n # If the text contains 'error'/'fail' then we'll return red...\n if 'error' in text or 'fail' in text:\n return {key: 'red'}\n else:\n return {}\n\n\ndef main():\n parser = optparse.OptionParser()\n parser.add_option(\"-f\", \"--file\", dest=\"filename\",\n help=\"write output to FILE\", metavar=\"FILE\")\n parser.add_option(\"-T\", \"--format\", dest=\"format\",\n help=\"output in given format (default: png)\",\n default='png')\n parser.add_option(\"--no-labels\", dest=\"labels\",\n help=\"do not include labels\",\n action='store_false', default=True)\n (options, args) = parser.parse_args()\n if options.filename is None:\n options.filename = 'states.%s' % options.format\n\n def node_attrs(state):\n \"\"\"Attributes used for drawing the nodes (states).\n\n The user can perform actions on stable states (and in a few other\n cases), so we distinguish the stable states from the other states by\n highlighting the node. Non-stable states are labelled with gray.\n\n This is a callback method used by pydot.convert().\n\n :param state: name of state\n :returns: A dictionary with graphic attributes used for displaying\n the state.\n \"\"\"\n attrs = map_color(state)\n if source.is_stable(state):\n attrs['penwidth'] = 1.7\n else:\n if 'fontcolor' not in attrs:\n attrs['fontcolor'] = 'gray'\n return attrs\n\n def edge_attrs(start_state, event, end_state):\n \"\"\"Attributes used for drawing the edges (transitions).\n\n There are two types of transitions; the ones that the user can\n initiate and the ones that are done internally by the conductor.\n The user-initiated ones are shown with '(via API'); the others are\n in gray.\n\n This is a callback method used by pydot.convert().\n\n :param start_state: name of the start state\n :param event: the event, a string\n :param end_state: name of the end state (unused)\n :returns: A dictionary with graphic attributes used for displaying\n the transition.\n \"\"\"\n if not options.labels:\n return {}\n\n translations = {'delete': 'deleted', 'deploy': 'active'}\n attrs = {}\n attrs['fontsize'] = 12\n attrs['label'] = translations.get(event, event)\n if (source.is_stable(start_state) or 'fail' in start_state\n or event in ('abort', 'delete')):\n attrs['label'] += \" (via API)\"\n else:\n attrs['fontcolor'] = 'gray'\n return attrs\n\n source = states.machine\n graph_name = '\"Ironic states\"'\n graph_attrs = {'size': 0}\n g = pydot.convert(source, graph_name, graph_attrs=graph_attrs,\n node_attrs_cb=node_attrs, edge_attrs_cb=edge_attrs)\n\n print_header(graph_name)\n print(g.to_string().strip())\n\n g.write(options.filename, format=options.format)\n print_header(\"Created %s at '%s'\" % (options.format, options.filename))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"openstack/ironic","sub_path":"tools/states_to_dot.py","file_name":"states_to_dot.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":436,"dataset":"github-code","pt":"84"} +{"seq_id":"39462285986","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\nnl = input().split()\r\nnd = {}\r\na = 0\r\nfor i in range(n):\r\n nd[nl[i]] = 0\r\nfor i in range(n):\r\n s = input().split()\r\n for i in s:\r\n for key, value in nd.items():\r\n if key == i:\r\n nd[i]+=1\r\nnd1 = dict(sorted(nd.items(), key = lambda x:x[1], reverse = True))\r\nfor i,v in nd1.items():\r\n print(i, v)","repo_name":"juns0720/baekjoon","sub_path":"백준/Silver/25325. 학생 인기도 측정/학생 인기도 측정.py","file_name":"학생 인기도 측정.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"3265632541","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\n# Load positive and negative words from text files\r\ndef load_words(file_path):\r\n with open(file_path, 'r') as file:\r\n words = [line.strip() for line in file]\r\n return words\r\n\r\npositive_words = load_words('positive_words.txt')\r\nnegative_words = load_words('negative_words.txt')\r\n\r\n# Simple sentiment analysis function\r\ndef analyze_sentiment(text):\r\n words = text.lower().split()\r\n positive_count = sum(1 for word in words if word in positive_words)\r\n negative_count = sum(1 for word in words if word in negative_words)\r\n neutral_count = len(words) - positive_count - negative_count\r\n\r\n total_count = len(words)\r\n positive_percentage = (positive_count / total_count) * 100\r\n negative_percentage = (negative_count / total_count) * 100\r\n neutral_percentage = (neutral_count / total_count) * 100\r\n\r\n return positive_percentage, negative_percentage, neutral_percentage\r\n\r\n# Tkinter GUI\r\ndef analyze_button_click():\r\n input_text = input_textbox.get(\"1.0\", \"end-1c\")\r\n positive_percent, negative_percent, neutral_percent = analyze_sentiment(input_text)\r\n result_message = (\r\n f\"Positive: {positive_percent:.2f}%\\n\"\r\n f\"Negative: {negative_percent:.2f}%\\n\"\r\n f\"Neutral: {neutral_percent:.2f}%\"\r\n )\r\n messagebox.showinfo(\"Sentiment Analysis\", result_message)\r\n\r\n# Create the main window\r\nroot = tk.Tk()\r\nroot.title(\"Sentiment Detector\")\r\n\r\n# Create input textbox\r\ninput_textbox = tk.Text(root, height=10, width=40)\r\ninput_textbox.pack(padx=10, pady=10)\r\n\r\n# Create analyze button\r\nanalyze_button = tk.Button(root, text=\"Analyze Sentiment\", command=analyze_button_click)\r\nanalyze_button.pack()\r\n\r\n# Start the GUI event loop\r\nroot.mainloop()\r\n","repo_name":"monds1320/sentiment-analysis-project-","sub_path":"sentimentDetector.py","file_name":"sentimentDetector.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"22133462088","text":"from random import choice\n\nn = int(input(\"n? \"))\nx, y, dx, dy = 0, 0, 0, 0\n\nfor _ in range(n):\n \n directions = [(0, -10), (10, 0), (0, 10), (-10, 0)]\n opposite = (-dx, -dy)\n if opposite in directions:\n directions.remove(opposite) # No turning back\n\n dx, dy = choice(directions)\n #assert((dx, dy) != opposite) # check, always true\n x += dx\n y += dy\n print(x, y, dx, dy, sep=\"\\t\")","repo_name":"gallons29/PythonUNI","sub_path":"PythonTest/rwalk.py","file_name":"rwalk.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"31558138015","text":"import numpy as np\n\ndef accel_wrapper(x, y, x_vel, y_vel, g):\n def accel(t):\n x_pos = x + x_vel * t\n y_pos = y + y_vel * t + 0.5 * g * t**2\n y_v = y_vel + g * t\n return [x_pos, y_pos, y_v]\n def get_hit_time_for_line(m, c):\n hit_time = [((m*x_vel-y_vel) + np.sqrt((y_vel-m*x_vel)**2 - 4*0.5*g*(y-m*x-c)))/g,\n ((m*x_vel-y_vel) - np.sqrt((y_vel-m*x_vel)**2 - 4*0.5*g*(y-m*x-c)))/g]\n x_p = [x + x_vel * hit_time[0], x + x_vel * hit_time[1]]\n return [x_p, hit_time]\n def get_y_t_for_x(x_pos):\n hit_time = np.array(x_pos - x)/x_vel\n y_p = y + y_vel * hit_time + 0.5 * g * hit_time**2\n return [y_p, hit_time]\n return [accel, get_y_t_for_x, get_hit_time_for_line]\n\nclass Wall():\n def __init__(self, x, y, width, angle, bounce):\n self.x = x\n self.y = y\n self.w = width\n self.angle = angle\n if self.angle == 90:\n self.is_v = True\n else:\n self.is_v = False\n self.bounce = bounce\n self.m = np.tan(self.angle * np.pi / 180)\n self.c = self.y - self.x * self.m\n \n \ndef get_movement(x, y, xv, yv, g, walls):\n movement_intervals = []\n movement_functions = []\n safety = -1\n while True:\n safety += 1\n cur_acc = accel_wrapper(x, y, xv, yv, g)\n hit_times = []\n for w in walls:\n hit_time = 9999999\n if w.is_v:\n hit = cur_acc[1](w.x)\n if hit[1] > 10e-6 and hit[0] >= w.y and hit[0] <= w.y + w.w:\n hit_time = hit[1]\n else:\n hit = cur_acc[2](w.m, w.c)\n for i, ht in enumerate(hit[1]):\n if w.angle < 90:\n if ht > 10e-6 and hit[0][i] >= w.x and hit[0][i] <= w.x + np.cos(w.angle * np.pi / 180) * w.w:\n hit_time = ht\n break\n else:\n if ht > 10e-6 and hit[0][i] <= w.x and hit[0][i] >= w.x + np.cos(w.angle * np.pi / 180) * w.w:\n hit_time = ht\n break\n hit_times.append(hit_time)\n next_hit = np.argmin(hit_times)\n if hit_times[next_hit] == 9999999:\n next_hit = -1\n break\n if len(movement_intervals) > 0:\n movement_intervals.append([movement_intervals[-1][1],\n movement_intervals[-1][1] + hit_times[next_hit]])\n else:\n movement_intervals.append([0, hit_times[next_hit]])\n movement_functions.append(cur_acc[0])\n x, y, yv = cur_acc[0](hit_times[next_hit])\n total_speed = np.sqrt(yv**2 + xv**2)\n in_angle = (180 * (np.arctan2(yv, xv) / np.pi)) % 360\n out_angle = (in_angle + (2 * (walls[next_hit].angle - in_angle))) % 360\n bounce_angle = np.minimum(180 - np.abs(out_angle - in_angle) / 2,\n np.abs(out_angle - in_angle) / 2) / 90\n bounce_fac = 1 - (1 - walls[next_hit].bounce) * bounce_angle\n #print(bounce_fac, in_angle, out_angle, bounce_angle)\n #bounce_fac = walls[next_hit].bounce\n xv = bounce_fac * total_speed * np.cos(out_angle * np.pi / 180)\n yv = bounce_fac * total_speed * np.sin(out_angle * np.pi / 180)\n if safety > 100 or (movement_intervals[-1][1] - movement_intervals[-1][0]) < 0.01:\n break\n return movement_intervals, movement_functions\n\n","repo_name":"Lucas-He/Bouncing","sub_path":"bouncing3.py","file_name":"bouncing3.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"74290921233","text":"from tkinter import *\nfrom tkinter import ttk\n\n################# cores ###############\nco1 = \"#feffff\" # white/branca\nco2 = \"#6f9fbd\" # blue/azul\nco3 = \"#38576b\" # valor\n\nfundo = \"#3b3b3b\"\nco10 =\"#ECEFF1\"\n\ncor1='#FFAB40'\ncor2='#ff333a'\ncor3='#6bd66f'\ncor4=\"#ab8918\"\n\njanela = Tk()\njanela.title('')\njanela.geometry('235x318')\njanela.configure(bg=co1)\n\n\nstyle = ttk.Style(janela)\nstyle.theme_use(\"clam\")\n\n################# Frames ####################\n\nttk.Separator(janela, orient=HORIZONTAL).grid(row=0, columnspan=1, ipadx=280)\n\nframe_score = Frame(janela, width=300, height=56,bg=co3, pady=0, padx=0, relief=\"flat\",)\nframe_score.grid(row=1, column=0, sticky=NW)\n\nframe_quadros = Frame(janela, width=300, height=340,bg=fundo, pady=0, padx=0, relief=\"flat\",)\nframe_quadros.grid(row=2, column=0, sticky=NW)\n\n\n################# Funções ####################\n\ndef entering_values(event):\n\tglobal all_values\n\tall_values = all_values + str(event)\n\tvalue_text.set(all_values)\n\ndef calculate():\n\tglobal all_values\n\tresult = str(eval(all_values))\n\tvalue_text.set(result)\n\tall_values = \"\"\n\ndef scream_clear():\n global all_values\n all_values = \"\"\n value_text.set(\"\")\n\n#for storing all the expressions that will be evalueted\nall_values = \"\"\n# for single value entering\nvalue_text = StringVar()\n\n################# Label ####################\n\napp_scream = Label(frame_score,width=16,height=2,textvariable = value_text , padx=7, relief=\"flat\", anchor=\"e\",bd=0, justify=RIGHT, font=('Ivy 18 '), bg='#37474F', fg=co1)\napp_scream.place(x=0, y=0)\n\n################# Buttons ####################\n\nb_1 = Button(frame_quadros, text=\"C\", width=11, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: scream_clear())\nb_1.place(x=0, y=0)\nb_2 = Button(frame_quadros, text=\"%\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values('%'))\nb_2.place(x=118, y=0)\nb_3 = Button(frame_quadros, text=\"/\", width=5, height=2, bg=cor1, fg=co1,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values('/'))\nb_3.place(x=177, y=0)\n\nb_4 = Button(frame_quadros, text=\"7\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(7))\nb_4.place(x=0, y=52)\nb_5 = Button(frame_quadros, text=\"8\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(8))\nb_5.place(x=59, y=52)\nb_6 = Button(frame_quadros, text=\"9\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(9))\nb_6.place(x=118, y=52)\nb_7 = Button(frame_quadros, text=\"*\", width=5, height=2, bg=cor1, fg=co1,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values('*'))\nb_7.place(x=177, y=52)\n\nb_8 = Button(frame_quadros, text=\"4\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(4))\nb_8.place(x=0, y=104)\nb_9 = Button(frame_quadros, text=\"5\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(5))\nb_9.place(x=59, y=104)\nb_10 = Button(frame_quadros, text=\"6\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(6))\nb_10.place(x=118, y=104)\nb_11 = Button(frame_quadros, text=\"-\", width=5, height=2, bg=cor1, fg=co1,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values('-'))\nb_11.place(x=177, y=104)\n\nb_12 = Button(frame_quadros, text=\"1\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(1))\nb_12.place(x=0, y=156)\nb_13 = Button(frame_quadros, text=\"2\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(2))\nb_13.place(x=59, y=156)\nb_14 = Button(frame_quadros, text=\"3\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(3))\nb_14.place(x=118, y=156)\nb_15 = Button(frame_quadros, text=\"+\", width=5, height=2, bg=cor1, fg=co1,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values('+'))\nb_15.place(x=177, y=156)\n\nb_16 = Button(frame_quadros, text=\"0\", width=11, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values(0))\nb_16.place(x=0, y=208)\nb_17 = Button(frame_quadros, text=\".\", width=5, height=2, bg=co10, fg=fundo,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: entering_values('.'))\nb_17.place(x=118, y=208)\nb_18 = Button(frame_quadros, text=\"=\", width=5, height=2, bg=cor1, fg=co1,font=('Ivy 13 bold'),relief=RAISED, overrelief=RIDGE,command = lambda: calculate())\nb_18.place(x=177, y=208)\n\njanela.mainloop()\n","repo_name":"GuilhermeFornaciari/Extras","sub_path":"Python/Calculadora_julia/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19307202910","text":"import urllib3\r\nimport os\r\nimport requests\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nimport sys\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nurl=\"https://www.imdb.com/chart/top?ref_=nv_mv_250\"\r\n# os.environ['NO_PROXY'] = 'imdb.com'\r\nreq = requests.get(url)\r\npage = req.text\r\n\r\nsoup = BeautifulSoup(page, 'html.parser')\r\n\r\nlinks=[]\r\nfor a in soup.find_all('a'): #, href=True):\r\n links.append(a.get('href'))\r\nlinks=['https://www.imdb.com'+a.strip() for a in links if a is not None and a.startswith('/title/tt') ]\r\n\r\n#---------------------------Remove duplicates in links\r\ntop_250_links=[]\r\nfor c in links:\r\n if c not in top_250_links:\r\n top_250_links.append(c)\r\n#top_250_links=top_250_links[2:]\r\n\r\n# print(len(top_250_links))\r\ntop_250_links[0:5]\r\n\r\n# column_list=['Rank','Movie_name' ,'URL' ,'Release_Year' ,'IMDB_Rating' ,\r\n# 'Reviewer_count' ,'Censor_Board_Rating' ,'Movie_Length' ,'Genre_1' ,\r\n# 'Genre_2' ,'Genre_3' ,'Genre_4' ,'Release_Date' ,'Story_Summary' ,\r\n# 'Director' ,'Writer_1' ,'Writer_2' ,'Writer_3' ,'Star_1' ,\r\n# 'Star_2' ,'Star_3' ,'Star_4' ,'Star_5' ,'Plot_Keywords' ,'Budget' ,\r\n# 'Gross_USA' ,'Cum_Worldwide_Gross' ,'Production_Company' \r\n# ]\r\n# df = pd.DataFrame(columns=column_list)#,index=t) \r\n\r\ncountryset = set()\r\n# genredict = {}\r\n# directordict = {}\r\n# actordict = {}\r\n\r\n\r\nfor x in np.arange(0, len(top_250_links)):\r\n\r\n \r\n #---------------------------Load html page for 1st movie in top 250 movies \r\n url=top_250_links[x]\r\n # print(url)\r\n req = requests.get(url)\r\n page = req.text\r\n soup = BeautifulSoup(page, 'html.parser')\r\n \r\n #---------------------------Retrieve Movie details from html page\r\n # Movie_name=(soup.find(\"div\",{\"class\":\"title_wrapper\"}).get_text(strip=True).split('|')[0]).split('(')[0]\r\n \r\n # year_released=((soup.find(\"div\",{\"class\":\"title_wrapper\"}).get_text(strip=True).split('|')[0]).split('(')[1]).split(')')[0]\r\n \r\n # imdb_rating=soup.find(\"span\",{\"itemprop\":\"ratingValue\"}).text\r\n \r\n # reviewer_count=soup.find(\"span\",{\"itemprop\":\"ratingCount\"}).text\r\n # box_office_details = []\r\n # box_office_dictionary = {'Country'}\r\n # for details in soup.find_all(\"div\",{\"class\":\"txt-block\"}):\r\n # detail = details.get_text(strip=True).split(':')\r\n # # print(detail)\r\n \r\n # if detail[0] == \"Country\":\r\n # # box_office_details.append(detail)\r\n # # print(detail[0])\r\n # # print(detail[1])\r\n \r\n # country = detail[1].split(\"|\")\r\n # # countrydict[x] = country\r\n # for c in country:\r\n # # print(\"(\"+str(x+1)+\", \\\"\"+c+\"\\\"),\")\r\n # countryset.add(c)\r\n # print(country)\r\n # print(countryset)\r\n # for detail in box_office_details:\r\n # if detail[0] in box_office_dictionary:\r\n # box_office_dictionary.update({detail[0] : detail[1]})\r\n \r\n # while len(country) < 4: \r\n # country.append(' ')\r\n # subtext= soup.find(\"div\",{\"class\":\"subtext\"}).get_text(strip=True).split('|') #Censor_rating\r\n # if len(subtext)<4:\r\n # censor_rating='Not Rated'\r\n # movie_len=subtext[0]\r\n # genre_list=subtext[1].split(',')\r\n # # while len(genre_list)<4: genre_list.append(\" \")\r\n # # genre_1,genre_2,genre_3,genre_4=genre_list\r\n # release_date=subtext[2]\r\n # else:\r\n # censor_rating=subtext[0]\r\n # movie_len=subtext[1]\r\n # genre_list=subtext[2].split(',')\r\n # # while len(genre_list)<4: genre_list.append(\" \")\r\n # # genre_1,genre_2,genre_3,genre_4=genre_list\r\n \r\n # # release_date=subtext[3]\r\n # # genredict[x] = genre_list\r\n # for i in range(len(genre_list)):\r\n # print('('+(str)(x+1)+', \"'+genre_list[i]+'\"),')\r\n # story_summary=soup.find(\"div\",{\"class\":\"summary_text\"}).get_text(strip=True).strip()\r\n \r\n #---------------------------Director,Writer and Actor details\r\n # summary = soup.find(\"div\", {\"class\":\"summary_text\"}).get_text( strip=True ).strip()\r\n # # Getting the credits for the director and writers\r\n # credit_summary = []\r\n # for summary_item in soup.find_all(\"div\",{ \"class\" : \"credit_summary_item\" }):\r\n # credit_summary.append(re.split( ',|:|\\|' ,summary_item.get_text( strip=True )))\r\n \r\n # stars = credit_summary.pop()[1:4]\r\n # writers = credit_summary.pop()[1:3]\r\n # director = credit_summary.pop()[1:]\r\n\r\n # print(\"(\" + str(x+1) + \",\\\"\" + director[0] + \"\\\", \\'M\\', \"+\"\\\"1970-1-1\"+ \"\\\"),\")\r\n\r\n castlist = soup.find(\"table\", {\"class\":\"cast_list\"})\r\n cast = castlist.find_all(\"tr\", {\"class\":\"odd\"})[0:3]\r\n for c in cast:\r\n t = c.get_text().split('...')\r\n actor = t[0].strip()\r\n role = c.find(\"td\", {\"class\":\"character\"}).find(\"a\").get_text()\r\n # role = t[1].strip()\r\n print(\"(\" + str(x+1) + \", \\\"\" + actor +\"\\\", \"+ '\\'F\\'' + \", \\\"1977-4-1\\\", \\\"\" + role + \"\\\"),\")\r\n\r\n\r\n\r\n\r\n\r\n #---------------------------Plot Keywords\r\n # b=[]\r\n # for a in soup.find_all(\"span\",{\"class\":\"itemprop\"}): b.append(a.get_text(strip=True)) \r\n \r\n # plot_keywords='|'.join(b)\r\n \r\n # #---------------------------Commercial details and Prod Company\r\n \r\n \r\n # b=[] #---------------------------Remove unwanted entries\r\n # d={'Budget':'', 'Opening Weekend USA':'','Gross USA':'','Cumulative Worldwide Gross':'','Production Co':''}\r\n # for a in soup.find_all(\"div\",{\"class\":\"txt-block\"}):\r\n # c=a.get_text(strip=True).split(':')\r\n # if c[0] in d:\r\n # b.append(c)\r\n \r\n # for i in b: #---------------------------Update default values if entries are found\r\n # if i[0] in d: \r\n # d.update({i[0]:i[1]}) \r\n #print(d)\r\n \r\n # production_company=d['Production Co'].split('See more')[0]\r\n # cum_world_gross=d['Cumulative Worldwide Gross'].split(' ')[0]\r\n # gross_usa=d['Gross USA'].split(' ')[0]\r\n # budget=d['Budget']\r\n \r\n # print(x,\":\",Movie_name)\r\n #---------------------------Dictionary to holds all details\r\n # movie_dict={\r\n # 'Rank':x+1,\r\n # 'Movie_name' : Movie_name,\r\n # 'URL' : url,\r\n # 'Release_Year' : year_released,\r\n # 'IMDB_Rating' : imdb_rating,\r\n # 'Reviewer_count' : reviewer_count,\r\n # 'Censor_Board_Rating' : censor_rating,\r\n # 'Movie_Length' : movie_len,\r\n # 'Genre_1' : genre_1,\r\n # 'Genre_2' : genre_2,\r\n # 'Genre_3' : genre_3,\r\n # 'Genre_4' : genre_4,\r\n # 'Release_Date' : release_date,\r\n # 'Story_Summary' : story_summary,\r\n # 'Director' : director,\r\n # 'Writer_1' : writer_1,\r\n # 'Writer_2' : writer_2,\r\n # 'Writer_3' : writer_3,\r\n # 'Star_1' : star_1,\r\n # 'Star_2' : star_2,\r\n # 'Star_3' : star_3,\r\n # 'Star_4' : star_4,\r\n # 'Star_5' : star_5,\r\n # 'Plot_Keywords' : plot_keywords,\r\n # 'Budget' : budget,\r\n # 'Gross_USA' : gross_usa,\r\n # 'Cum_Worldwide_Gross' : cum_world_gross,\r\n # 'Production_Company' : production_company\r\n # }\r\n # #print(movie_dict['Rank'],\":\",movie_dict['Movie_name'])\r\n \r\n # #---------------------------Append rows to dataframes using dictionary\r\nprint(countryset)\r\n # df = df.append(pd.DataFrame.from_records([movie_dict],columns=movie_dict.keys() ) )","repo_name":"rangwang/DatabaseSystem2021Spring","sub_path":"Lab1/crawl_imdb.py","file_name":"crawl_imdb.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"26724555022","text":"import math\n\n\ndef last_fragment_size(\n messageSize_bytes, overheadPerPacket_bytes, maximumNPacketSize_bytes\n):\n s = messageSize_bytes\n o = overheadPerPacket_bytes\n m = maximumNPacketSize_bytes\n return s % (m - o) + o\n\n\nprint(last_fragment_size(10_000, 20, 1_500))\n","repo_name":"Crispyfries345/COSC264","sub_path":"quiz_4/q17.py","file_name":"q17.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"21555983734","text":"from re import I\n\n\na = int(input(\"Enter the number of which multiplication is to be found\\n\"))\nprint(\"The table of\" , a , \"is given as\")\ni = 15\nwhile (i <= 10): \n b = (a*i)\n i = i + 1\n print(a, \"x\" , i-1 , \"=\" , b)\n","repo_name":"mahilreshi/Python-files-V1","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"24231616360","text":"import yaml\nimport logging\nimport pytest\n\n\n\"\"\"\nRun as:\npytests -sxv ', r'', r'<[^>]+>']\n for filter_rerule in filter_rerule_list:\n html_labels = re.findall(filter_rerule, text)\n for h in html_labels:\n text = text.replace(h, ' ')\n filter_char_list = [\n u'\\x85', u'\\xa0', u'\\u1680', u'\\u180e', u'\\u2000', u'\\u200a', u'\\u2028', u'\\u2029', u'\\u202f', u'\\u205f',\n u'\\u3000', u'\\xA0', u'\\u180E', u'\\u200A', u'\\u202F', u'\\u205F', '\\t', '\\n', '\\r', '\\f', '\\v',\n ]\n for f_char in filter_char_list:\n text = text.replace(f_char, '')\n text = re.sub(' +', ' ', text).strip()\n return text\n\n\n\n def filter_text(self, input_text):\n filter_list = [u'\\x85', u'\\xa0', u'\\u1680', u'\\u180e', u'\\u2000-', u'\\u200a',\n u'\\u2028', u'\\u2029', u'\\u202f', u'\\u205f', u'\\u3000', u'\\xA0', u'\\u180E',\n u'\\u200A', u'\\u202F', u'\\u205F']\n for index in filter_list:\n input_text = input_text.replace(index, \"\").strip()\n return input_text\n\n def start_requests(self):\n url_list = [\n 'https://www.onano77.com/products',\n ]\n for url in url_list:\n print(url)\n yield scrapy.Request(\n url=url,\n callback=self.parse_list,\n )\n\n #url = \"http://onano77.com/\"\n #yield scrapy.Request(\n # url=url,\n #)\n\n # def parse(self, response):\n # url_list = response.xpath(\"\").getall()\n # url_list = [response.urljoin(url) for url in url_list]\n # for url in url_list:\n # print(url)\n # yield scrapy.Request(\n # url=url,\n # callback=self.parse_list,\n # )\n\n\n\n #这个网站不需要找分类页 所有商品都在一页\n\n def parse_list(self, response):\n \"\"\"列表页\"\"\"\n url_list = response.xpath(\"//div[@id='product_list']/article/a/@href\").getall()\n url_list = [response.urljoin(url) for url in url_list]\n for n in range(0,len(url_list)):\n url_list[n]=url_list[n]\n for url in url_list:\n print('商品详情url:')\n print(url)\n yield scrapy.Request(\n url=url,\n callback=self.parse_detail,\n )\n\n # response_url = parse.unquote(response.url)\n # split_str = ''\n # base_url = response_url.split(split_str)[0]\n # page_num = int(response_url.split(split_str)[1])+1\n # next_page_url = base_url + split_str + str(page_num)\n #next_page_url = response.xpath(\"\").get()\n #if next_page_url:\n # next_page_url = response.urljoin(next_page_url)\n # print(\"下一页:\"+next_page_url)\n # yield scrapy.Request(\n # url=next_page_url,\n # callback=self.parse_list,\n # )\n\n def parse_detail(self, response):\n print('进入解析')\n \"\"\"详情页\"\"\"\n items = ShopItem()\n items[\"url\"] = response.url\n # price = re.findall(\"\", response.text)[0]\n original_price = response.xpath(\"//section[@class='product_price']/h3//text()\").getall()\n original_price=''.join(original_price)\n current_price=''\n original_price=self.filter_text(self.filter_html_label(original_price)).split(\"$\")[-1].replace(',', '').strip()\n if(original_price[1].isdigit()):\n print('商品原价')\n print(original_price)\n current_price = original_price\n items[\"original_price\"] = \"\" + str(original_price) if original_price else \"\" + str(current_price)\n items[\"current_price\"] = \"\" + str(current_price) if current_price else \"\" + str(original_price)\n items['is_deleted'] = 0\n else:\n items[\"original_price\"] = '0'\n items[\"current_price\"] = '0'\n items['is_deleted'] = 1\n\n # items[\"original_price\"] = \"\" + str(original_price) if original_price else \"\" + str(current_price)\n # items[\"current_price\"] = \"\" + str(current_price) if current_price else \"\" + str(original_price)\n\n items[\"brand\"] = 'onano77'\n items[\"name\"] = response.xpath(\"//header[@id='content_header']/h1/text()\").get()\n\n attributes = list()\n items[\"attributes\"] = attributes\n\n #items[\"about\"] = response.xpath(\"\").get()\n #items[\"description\"] = response.xpath(\"\").get()\n #items[\"care\"] = response.xpath(\"\").get()\n #items[\"sales\"] = response.xpath(\"\").get()\n items[\"source\"] = website\n images_list = response.xpath(\"//section[@class='product_images galy']/ul/li/img/@src\").getall()\n items[\"images\"] = images_list\n\n # Breadcrumb_list = response.xpath(\"\").getall()\n items[\"cat\"] = ''\n items[\"detail_cat\"] = ''\n\n\n region_list=[]\n label_list = response.xpath(\"//select[@class='product_option_select']/option\")\n if label_list:\n label_list.pop(0)\n #print('lable_list')\n #print(label_list)\n #print('label表')\n #print(label_list)\n for label in label_list:\n reigion_name = label.xpath('./text()').get().strip().replace(':', '').lower()\n reigion_price=label.xpath('./@data-price').get().strip().replace(':', '').lower()\n region_list.append({'reigion': reigion_name,'price':reigion_price})\n\n sku_list = list()\n for region in region_list:\n sku_item = SkuItem()\n sku_item[\"original_price\"] = region[\"price\"]\n sku_item[\"current_price\"] = region[\"price\"]\n #sku_item[\"inventory\"] = sku[\"inventory\"]\n #sku_item[\"sku\"] = sku[\"sku\"]\n #imgs = list()\n #sku_item[\"imgs\"] = imgs\n sku_item[\"url\"] = response.url\n #sku_item[\"sku\"] = sku\n attributes = SkuAttributesItem()\n #attributes[\"colour\"] = sku[\"name\"]\n #attributes[\"size\"] = sku[\"size\"]\n other = dict()\n other.update({'reigion':region['reigion']})\n attributes[\"other\"] = other\n sku_item[\"attributes\"] = attributes\n sku_list.append(sku_item)\n\n items[\"sku_list\"] = sku_list\n items[\"measurements\"] = [\"Weight: None\", \"Height: None\", \"Length: None\", \"Depth: None\"]\n status_list = list()\n status_list.append(items[\"url\"])\n status_list.append(items[\"original_price\"])\n status_list.append(items[\"current_price\"])\n status_list = [i for i in status_list if i]\n status = \"-\".join(status_list)\n items[\"id\"] = md5(status.encode(\"utf8\")).hexdigest()\n\n items[\"lastCrawlTime\"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n items[\"created\"] = int(time.time())\n items[\"updated\"] = int(time.time())\n items['is_deleted'] = 0\n\n #print(items)\n # check_item(items)\n yield items\n\n # detection_main(\n # items=items,\n # website=website,\n # num=self.settings[\"CLOSESPIDER_ITEMCOUNT\"],\n # skulist=True,\n # skulist_attributes=True,\n # )\n #print(items)\n","repo_name":"Chenye115/weshop_scrapy","sub_path":"spiders/onano77.py","file_name":"onano77.py","file_ext":"py","file_size_in_byte":9697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"21174334597","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nwread.py\n\nVERSION AND LAST UPDATE:\n v1.0 04/04/2022\n v1.1 01/06/2023\n v1.2 10/12/2023\n\nPURPOSE:\n Group of python functions to Read Wave data: \n WAVEWATCHIII results, and NDBC and Copernicus buoys.\n Prefix meaning:\n tseriesnc = time series (table of integrated parameters versus time).\n spec = wave spectrum.\n Users can import as a standard python function, and use it accordingly:\n For example:\n import wread\n wread.tseriesnc_ww3(filename.nc,stationID)\n Users can help() each function to obtain information about inputs/outputs\n help(wread.tseriesnc_ww3)\n\nUSAGE:\n functions\n readconfig\n mask\n cyclonemap\n tseriesnc_ndbc\n tseriesnc_copernicus\n aodn_altimeter\n tseriesnc_ww3\n bull\n bull_tar\n ts\n station_tar\n spec_ndbc\n spec_ww3\n Explanation for each function is contained in the headers\n\nOUTPUT:\n Dictionary containing arrays and info.\n Description of variables is contained in the header of each function.\n\nDEPENDENCIES:\n See setup.py and the imports below.\n\nAUTHOR and DATE:\n 04/04/2022: Ricardo M. Campos, first version.\n 01/06/2023: Ricardo M. Campos, new file formats added.\n 10/12/2023: Ricardo M. Campos & Maryam Mohammadpour, new function readconfig\n to read the configuration file ww3tools.yaml. And a new function aodn_altimeter\n to read AODN altimeter data.\n\nPERSON OF CONTACT:\n Ricardo M Campos: ricardo.campos@noaa.gov\n\n\"\"\"\n\nimport matplotlib\nimport time\nimport timeit\nfrom time import strptime\nfrom calendar import timegm\nimport pandas as pd\nimport xarray as xr\nimport netCDF4 as nc\nimport numpy as np\nfrom pylab import *\nimport yaml\nimport re\nimport os\nimport sys\nfrom matplotlib import ticker\n# import pickle\nimport sys\nimport warnings; warnings.filterwarnings(\"ignore\")\n\n\ndef readconfig(fname):\n \"\"\"\n Reads the configuration file ww3tools.yaml and returns a dictionary\n containing all the information in the file.\n User can enter the file name 'ww3tools.yaml' or the name including\n the full path '/home/user/ww3tools.yaml'\n \"\"\"\n\n try: \n with open(fname, 'r') as file:\n\t wconfig = yaml.safe_load(file)\n except:\n raise ValueError(\"wproc.readconfig: ww3tools.yaml not found.\")\n else:\n\n # paths\n if \"path_out\" in wconfig:\n\t if str(wconfig['path_out']) != '/':\n\t\t wconfig['path_out']=str(wconfig['path_out'])+\"/\"\n else:\n wconfig['path_out']=str(os.getcwd())+\"/\"\n\n if \"path_alt\" in wconfig:\n\t if str(wconfig['path_alt']) != '/':\n\t\t wconfig['path_alt']=str(wconfig['path_alt'])+\"/\"\n else:\n wconfig['path_alt']=str(os.getcwd())+\"/\"\n print(\"Warning: path_alt not found, using local directory \"+wconfig['path_alt'])\n\n if \"path_ndbc\" in wconfig:\n\t if str(wconfig['path_ndbc']) != '/':\n\t\t wconfig['path_ndbc']=str(wconfig['path_ndbc'])+\"/\"\n else:\n wconfig['path_ndbc']=str(os.getcwd())+\"/\"\n print(\"Warning: path_ndbc not found, using local directory \"+wconfig['path_ndbc'])\n\n if \"path_copernicus\" in wconfig:\n\t if str(wconfig['path_copernicus']) != '/':\n\t\t wconfig['path_copernicus']=str(wconfig['path_copernicus'])+\"/\"\n else:\n wconfig['path_copernicus']=str(os.getcwd())+\"/\"\n print(\"Warning: path_copernicus not found, using local directory \"+wconfig['path_copernicus'])\n\n # returns a dictionary containing the information given by ww3tools.yaml\n return wconfig\n\n\ndef mask(*args):\n '''\n Read gridmask netcdf file generated with prepGridMask.py\n Input: file name (example: gridInfo_GEFSv12.nc)\n Output: dictionary containing the arrays and string names\n '''\n if len(args) == 1:\n fname=str(args[0])\n else:\n sys.exit(' Too many inputs')\n\n print(\" reading ww3_tools mask ...\")\n try:\n f=nc.Dataset(fname)\n # build dictionary\n result={'latitude':np.array(f.variables['latitude'][:]),'longitude':np.array(f.variables['longitude'][:]),'mask':np.array(f.variables['mask'][:,:])}\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n if 'distcoast' in f.variables.keys():\n result['distcoast'] = np.array(f.variables['distcoast'][:,:])\n if 'depth' in f.variables.keys():\n result['depth'] = np.array(f.variables['depth'][:,:]) \n if 'GlobalOceansSeas' in f.variables.keys():\n result['GlobalOceansSeas'] = np.array(f.variables['GlobalOceansSeas'][:,:]) \n if 'HighSeasMarineZones' in f.variables.keys():\n result['HighSeasMarineZones'] = np.array(f.variables['HighSeasMarineZones'][:,:]) \n if 'names_GlobalOceansSeas' in f.variables.keys():\n result['names_GlobalOceansSeas'] = f.variables['names_GlobalOceansSeas'][:]\n if 'names_HighSeasMarineZones' in f.variables.keys():\n result['names_HighSeasMarineZones'] = f.variables['names_HighSeasMarineZones'][:]\n\n f.close(); del f\n print(\" GridMask Read. \"+fname)\n return result\n del result\n\ndef cyclonemap(*args):\n '''\n Read cyclonemap netcdf file generated with procyclmap.py\n Input: file name (example: CycloneMap2020.nc)\n Output: dictionary containing the arrays and string names\n '''\n if len(args) == 1:\n fname=str(args[0])\n else:\n sys.exit(' Too many inputs')\n\n print(\" reading ww3_tools cyclonemap ...\")\n try:\n f=nc.MFDataset(fname, aggdim='time')\n at=f.variables['time'][:]; adate=[]\n for j in range(0,at.shape[0]):\n adate=np.append(adate,date2num(datetime.datetime(time.gmtime(at[j])[0],time.gmtime(at[j])[1],time.gmtime(at[j])[2],time.gmtime(at[j])[3],time.gmtime(at[j])[4]))) \n # --------\n # build dictionary\n result={'latitude':np.array(f.variables['lat'][:]),'longitude':np.array(f.variables['lon'][:]),\n 'time':np.array(at).astype('double'),'date':np.array(adate).astype('double'),\n 'cmap':f.variables['cmap'], 'info':str(f.info), 'netcdf':f}\n # it does not allocate the data of cmap using [:,:,:] yet as it can take a lot of data/memory and time.\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n print(\" CycloneInfo Read. \"+fname)\n return result\n del result\n\n\n# ================= OBSERVATIONS ================= \n# --- Buoys ---\n# Observations NDBC, netcdf format\ndef tseriesnc_ndbc(fname=None,anh=None):\n '''\n Observations NDBC, time series/table, netcdf format\n Input: file name (example: 46047h2016.nc), and anemometer height (optional)\n Output: dictionary containing the arrays: time(seconds since 1970),time(datetime64),lat,lon, \n and arrays sst,mslp,dwp,tmp,gst(10-m height),wsp(10-m height),wdir,hs,tm,tp,dm\n '''\n if fname==None:\n raise ValueError(\"NDBC file name must be informed.\")\n\n try:\n ds = xr.open_dataset(fname); f=nc.Dataset(fname)\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n btm = f.variables['average_wpd'][:,0,0]; btp = f.variables['dominant_wpd'][:,0,0]\n btime = np.array(f.variables['time'][:]).astype('double')\n f.close(); del f\n bsst = ds['sea_surface_temperature'].values[:,0,0]\n bmslp = ds['air_pressure'].values[:,0,0]\n bdwp = ds['dewpt_temperature'].values[:,0,0]\n btmp = ds['air_temperature'].values[:,0,0]\n bgst = ds['gust'].values[:,0,0] \n\n if 'wind_spd' in ds.keys():\n bwsp = ds['wind_spd'].values[:,0,0]\n\n if anh==None:\n try:\n from urllib.request import urlopen\n url = \"https://www.ndbc.noaa.gov/station_page.php?station=\"+str(fname).split('/')[-1].split('h')[0]\n page = urlopen(url)\n html_bytes = page.read()\n html = html_bytes.decode(\"utf-8\")\n except:\n anh=4.0 # assuming most of anemometer heights are between 3.7 to 4.1.\n print('Information about the Anemometer height, for wind speed conversion to 10m, could not be obtained.')\n else:\n if \"Anemometer height\" in html:\n anh=np.float(html.split('Anemometer height')[1][0:15].split(':')[1].split('m')[0])\n else:\n print('Information about the Anemometer height, for wind speed conversion to 10m, could not be found.')\n anh=4.0 # assuming most of anemometer heights are between 3.7 to 4.1.\n\n del url,page,html_bytes,html\n\n # convert wind speed to 10 meters (DNVGL C-205 Table 2-1, confirmed by https://onlinelibrary.wiley.com/doi/pdf/10.1002/er.6382)\n bwsp = np.copy(((10./anh)**(0.12)) * bwsp)\n bgst = np.copy(((10./anh)**(0.12)) * bgst)\n\n bwdir = ds['wind_dir'].values[:,0,0]\n bhs = ds['wave_height'].values[:,0,0]\n bdm = ds['mean_wave_dir'].values[:,0,0]\n\n # Automatic and basic Quality Control\n bsst[np.abs(bsst)>70]=np.nan\n bmslp[(bmslp<500)|(bmslp>1500)]=np.nan\n bdwp[np.abs(bdwp)>80]=np.nan\n btmp[np.abs(btmp)>80]=np.nan\n bgst[(bgst<0)|(bgst>200)]=np.nan\n bwsp[(bwsp<0)|(bwsp>150)]=np.nan\n bwdir[(bwdir<-180)|(bwdir>360)]=np.nan\n bhs[(bhs<0)|(bhs>30)]=np.nan\n btm[(btm<0)|(btm>40)]=np.nan\n btp[(btp<0)|(btp>40)]=np.nan\n bdm[(bdm<-180)|(bdm>360)]=np.nan\n\n result={'latitude':np.array(ds['latitude'].values[:]),'longitude':np.array(ds['longitude'].values[:]),\n 'time':btime,'date':ds['time'].values[:],\n 'sst':bsst, 'mslp':bmslp, 'dewpt_temp':bdwp,\n 'air_temp':btmp, 'gust':bgst, 'wind_spd':bwsp, \n 'wind_dir':bwdir, 'hs':bhs, 'tm':btm, \n 'tp':btp, 'dm':bdm, 'tm':btm}\n\n return result\n ds.close()\n del ds,btime,bsst,bmslp,bdwp,btmp,bgst,bwsp,bwdir,bhs,btm,btp,bdm\n\n# Observations NDBC, text format\ndef tseriestxt_ndbc(fname=None,anh=None):\n '''\n Observations NDBC, time series/table, stdmet format\n Input: file name (example: NDBC_historical_stdmet_41004.txt), and anemometer height (optional)\n Output: dictionary containing the arrays: time(seconds since 1970),time(datetime64),lat,lon, \n and arrays sst,mslp,dwp,tmp,gst,wsp,wdir,hs,tm,tp,dm\n '''\n if fname==None:\n raise ValueError(\"NDBC file name must be informed.\")\n\n try:\n ds = pd.read_csv(fname,comment='#',delimiter=r\"\\s+\")\n btime=np.zeros(ds.shape[0],'d')\n if 'mm' in ds.keys():\n ds = pd.read_csv(fname,comment='#',delimiter=r\"\\s+\",parse_dates= {\"date\" : [\"YY\",\"MM\",\"DD\",\"hh\",\"mm\"]})\n ds['date']=pd.to_datetime(ds['date'],format='%Y %m %d %H %M')\n else:\n ds = pd.read_csv(fname,comment='#',delimiter=r\"\\s+\",parse_dates= {\"date\" : [\"YY\",\"MM\",\"DD\",\"hh\"]})\n ds['date']=pd.to_datetime(ds['date'],format='%Y %m %d %H')\n\n for i in range(0,btime.shape[0]):\n btime[i]=double(ds['date'][i].timestamp()) \n\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n\n bwdir=np.array(ds['WDIR'].values[:]).astype('float')\n bgst=np.array(ds['GST'].values[:]).astype('float')\n bhs=np.array(ds['WVHT'].values[:]).astype('float')\n btp=np.array(ds['DPD'].values[:]).astype('float')\n btm=np.array(ds['APD'].values[:]).astype('float')\n bdm=np.array(ds['MWD'].values[:]).astype('float')\n bmslp=np.array(ds['PRES'].values[:]).astype('float')\n btmp=np.array(ds['ATMP'].values[:]).astype('float')\n bsst=np.array(ds['WTMP'].values[:]).astype('float')\n bdwp=np.array(ds['DEWP'].values[:]).astype('float')\n\n if 'WSPD' in ds.keys():\n bwsp=np.array(ds['WSPD'].values[:]).astype('float')\n\n try:\n from urllib.request import urlopen\n url = \"https://www.ndbc.noaa.gov/station_page.php?station=\"+str(fname).split('/')[-1].split('h')[0].split('_')[-1]\n page = urlopen(url)\n html_bytes = page.read()\n html = html_bytes.decode(\"utf-8\")\n auxlatlon=html.split('payload')[1][16:33]\n if 'S' in auxlatlon:\n blat=-np.float(auxlatlon[0:6])\n else:\n blat=np.float(auxlatlon[0:6])\n\n if 'W' in auxlatlon:\n blon=-np.float(auxlatlon[8:16])\n else:\n blon=np.float(auxlatlon[8:16]) \n\n except:\n if anh==None:\n anh=4.0 # assuming most of anemometer heights are between 3.7 to 4.1.\n print('Information about the Anemometer height, for wind speed conversion to 10m, could not be found. Assuming 4.0 meters.')\n\n blat=np.nan; blon=np.nan\n print('Information of Lat and Lon could not be obtained.')\n else:\n if anh==None:\n if \"Anemometer height\" in html:\n anh=np.float(html.split('Anemometer height')[1][0:15].split(':')[1].split('m')[0])\n else:\n print('Information about the Anemometer height, for wind speed conversion to 10m, could not be found. Assuming 4.0 meters.')\n anh=4.0 # assuming most of anemometer heights are between 3.7 to 4.1.\n\n del url,page,html_bytes,html\n\n # convert wind speed to 10 meters (DNVGL C-205 Table 2-1, confirmed by https://onlinelibrary.wiley.com/doi/pdf/10.1002/er.6382)\n bwsp = np.copy(((10./anh)**(0.12)) * bwsp)\n bgst = np.copy(((10./anh)**(0.12)) * bgst)\n\n # Automatic and basic Quality Control\n bsst[np.abs(bsst)>70]=np.nan\n bmslp[(bmslp<500)|(bmslp>1500)]=np.nan\n bdwp[np.abs(bdwp)>80]=np.nan\n btmp[np.abs(btmp)>80]=np.nan\n bgst[(bgst<0)|(bgst>200)]=np.nan\n bwsp[(bwsp<0)|(bwsp>150)]=np.nan\n bwdir[(bwdir<-180)|(bwdir>360)]=np.nan\n bhs[(bhs<0)|(bhs>30)]=np.nan\n btm[(btm<0)|(btm>40)]=np.nan\n btp[(btp<0)|(btp>40)]=np.nan\n bdm[(bdm<-180)|(bdm>360)]=np.nan\n\n result={'latitude':blat,'longitude':blon,\n 'time':btime,'date':ds['date'].values[:],\n 'sst':bsst, 'mslp':bmslp, 'dewpt_temp':bdwp,\n 'air_temp':btmp, 'gust':bgst, 'wind_spd':bwsp, \n 'wind_dir':bwdir, 'hs':bhs, 'tm':btm, \n 'tp':btp, 'dm':bdm, 'tm':btm}\n\n return result\n del ds,btime,blat,blon,bsst,bmslp,bdwp,btmp,bgst,bwsp,bwdir,bhs,btm,btp,bdm\n\n# Observations Copernicus, netcdf format\ndef tseriesnc_copernicus(*args):\n '''\n Observations NDBC, time series/table, netcdf format\n Input: file name (example: 46047h2016.nc)\n Output: dictionary containing the arrays: time(seconds since 1970),time(datetime64),lat,lon, \n and arrays with the environmental variables available.\n '''\n if len(args) == 1:\n fname=str(args[0])\n elif len(args) > 1:\n sys.exit(' Too many inputs')\n\n try:\n ds = xr.open_dataset(fname); f=nc.Dataset(fname)\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n btime = np.array(f.variables['TIME'][:]*24*3600 + timegm( strptime('195001010000', '%Y%m%d%H%M') )).astype('double')\n f.close(); del f\n blat = np.nanmean(ds['LATITUDE'].values[:])\n blon = np.nanmean(ds['LONGITUDE'].values[:])\n # dictionary\n result={'latitude':np.array(blat),'longitude':np.array(blon),\n 'time':btime,'date':ds['TIME'].values[:]}\n\n if 'DEPH' in ds.keys(): \n bdepth = np.nanmean(ds['DEPH'].values[:,:],axis=1) # Depth \n result['depth']=np.array(bdepth)\n\n if 'VHM0' in ds.keys():\n bhs = np.nanmean(ds['VHM0'].values[:,:],axis=1) # Hs\n bhs[(bhs<0)|(bhs>30)]=np.nan\n result['hs']=np.array(bhs)\n elif 'VGHS' in ds.keys():\n bhs = np.nanmean(ds['VGHS'].values[:,:],axis=1) # Hs\n bhs[(bhs<0)|(bhs>30)]=np.nan\n result['hs']=np.array(bhs)\n\n if 'VAVH' in ds.keys(): \n bvavh = np.nanmean(ds['VAVH'].values[:,:],axis=1) # H 1/3 vavh\n bvavh[(bvavh<0)|(bvavh>30)]=np.nan\n result['hs_vavh']=np.array(bvavh)\n\n if 'VZMX' in ds.keys():\n bhmax = np.nanmean(ds['VZMX'].values[:,:],axis=1) # Hmax\n bhmax[(bhmax<0)|(bhmax>40)]=np.nan\n result['hmax']=np.array(bhmax)\n\n if 'VTM02' in ds.keys():\n btm = np.nanmean(ds['VTM02'].values[:,:],axis=1) # Tm\n btm[(btm<0)|(btm>40)]=np.nan\n result['tm']=np.array(btm)\n elif 'VGTA' in ds.keys():\n btm = np.nanmean(ds['VGTA'].values[:,:],axis=1) # Tm\n btm[(btm<0)|(btm>40)]=np.nan\n result['tm']=np.array(btm)\n\n if 'VTPK' in ds.keys():\n btp = np.nanmean(ds['VTPK'].values[:,:],axis=1) # Tp\n btp[(btp<0)|(btp>40)]=np.nan\n result['tp']=np.array(btp)\n\n if 'TEMP' in ds.keys(): \n bsst = np.nanmean(ds['TEMP'].values[:,:],axis=1) # SST \n bsst[np.abs(bsst)>70]=np.nan\n result['sst']=np.array(bsst)\n\n if 'ATMS' in ds.keys():\n bmslp = np.nanmean(ds['ATMS'].values[:,:],axis=1) # Pressure\n bmslp[(bmslp<500)|(bmslp>1500)]=np.nan\n result['mslp']=np.array(bmslp)\n\n if 'DEWT' in ds.keys():\n bdwp = np.nanmean(ds['DEWT'].values[:,:],axis=1) # Dewpoint\n bdwp[np.abs(bdwp)>80]=np.nan\n result['dewpt_temp']=np.array(bdwp)\n\n if 'DRYT' in ds.keys():\n btmp = np.nanmean(ds['DRYT'].values[:,:],axis=1) # air temperature\n btmp[np.abs(btmp)>80]=np.nan\n result['air_temp']=np.array(btmp)\n\n if 'GSPD' in ds.keys():\n bgst = np.nanmean(ds['GSPD'].values[:,:],axis=1) # gust\n bgst=np.copy(((10./4.0)**(0.12))*bgst) # conversion to 10m, approximation DNVGL C-205 Table 2-1\n bgst[(bgst<0)|(bgst>200)]=np.nan\n result['gust']=np.array(bgst)\n\n if 'WSPD' in ds.keys():\n bwsp = np.nanmean(ds['WSPD'].values[:,:],axis=1) # wind speed \n bwsp=np.copy(((10./4.0)**(0.12))*bwsp) # conversion to 10m, approximation DNVGL C-205 Table 2-1\n bwsp[(bwsp<0)|(bwsp>150)]=np.nan\n result['wind_spd']=np.array(bwsp)\n\n if 'WDIR' in ds.keys():\n bwdir = np.nanmean(ds['WDIR'].values[:,:],axis=1) # wind direction \n bwdir[(bwdir<-180)|(bwdir>360)]=np.nan\n result['wind_dir']=np.array(bwdir)\n\n if 'VCMX' in ds.keys():\n bhcmax = np.nanmean(ds['VCMX'].values[:,:],axis=1) # Hcrest max\n bhcmax[(bhcmax<0)|(bhcmax>40)]=np.nan\n result['hc_max']=np.array(bhcmax)\n\n if 'VMDR' in ds.keys(): \n bdm = np.nanmean(ds['VMDR'].values[:,:],axis=1) # Mean direction\n bdm[(bdm<-180)|(bdm>360)]=np.nan\n result['dm']=np.array(bdm)\n\n if 'VPED' in ds.keys():\n bdp = np.nanmean(ds['VPED'].values[:,:],axis=1) # Peak direction\n bdp[(bdp<-180)|(bdp>360)]=np.nan\n result['dp']=np.array(bdp)\n\n return result\n ds.close(); del ds\n\n# --- Altimeter ---\n# Satellite data from Integrated Marine Observing System (IMOS), Australian Ocean Data Network (AODN)\ndef aodn_altimeter(satname,wconfig,datemin,datemax):\n '''\n Read AODN altimeter data\n http://thredds.aodn.org.au/thredds/catalog/IMOS/SRS/Surface-Waves/Wave-Wind-Altimetry-DM00/catalog.html\n https://portal.aodn.org.au/\n Altimeter information: https://doi.org/10.1038/s41597-019-0083-9\n Inputs:\n (1) satellite mission name. Select only one:\n JASON3,JASON2,CRYOSAT2,JASON1,HY2,SARAL,SENTINEL3A,ENVISAT,ERS1,ERS2,GEOSAT,GFO,TOPEX,SENTINEL3B,CFOSAT\n (2) wconfig dictionary, from wread.readconfig('ww3tools.yaml')\n (3) initial date ('YYYYMMDDHH')\n (4) final date ('YYYYMMDDHH')\n Output: pandas dataframe containing: TIME (seconds since 1970), LATITUDE, LONGITUDE, WDEPTH, DISTCOAST,\n HS, HS_CAL, WSPD, WSPD_CAL\n Maryam Mohammadpour & Ricardo M. Campos\n '''\n\n # start time\n start = timeit.default_timer()\n\n # date interval in seconds since 1970, user selection\n adatemin= np.double(timegm( time.strptime(datemin, '%Y%m%d%H')))\n adatemax= np.double(timegm( time.strptime(datemax, '%Y%m%d%H')))\n\n # Satellite missions available at AODN dataset, select only one.\n sdname=np.array(['JASON3','JASON2','CRYOSAT2','JASON1','HY2','SARAL','SENTINEL3A','ENVISAT','ERS1','ERS2','GEOSAT','GFO','TOPEX','SENTINEL3B','CFOSAT'])\n # Individual mission-specific Quality Control parameters\n min_swh_numval = np.array([17,17,17,17,17,17,17,17,17,17,-inf,3,7,17,-inf])\n\n if satname in sdname:\n s=int(np.where(sdname==satname)[0])\n wconfig['min_swh_numval'] = min_swh_numval[s]\n else:\n raise ValueError(\"wread.aodn_altimeter; \"+aodn_altimeter+\" not included in the satellite missions available: \"+\", \".join(sdname) )\n\n # name format for AODN reading\n nsatname = re.sub(r'(\\D)(\\d)', r'\\1-\\2', satname)\n\n # Sat files (squares) considering the domain (lat lon from ww3tools.yaml) of interest, for the AODN file names\n auxlat=np.array(np.arange(wconfig['latmin'],wconfig['latmax']+1.,1)).astype('int')\n auxlon=np.array(np.arange(wconfig['lonmin'],wconfig['lonmax']+1.,1)).astype('int')\n\n # Read and allocate satellite data into arrays\n ast=np.double(np.zeros((10**wconfig['pia']),'d')); aslat=np.zeros((10**wconfig['pia']),'f'); aslon=np.zeros((10**wconfig['pia']),'f')\n ahsk=np.zeros((10**wconfig['pia']),'f'); ahskcal=np.zeros((10**wconfig['pia']),'f')\n awnd=np.zeros((10**wconfig['pia']),'f'); awndcal=np.zeros((10**wconfig['pia']),'f'); asig0knstd=np.zeros((10**wconfig['pia']),'f')\n aswhknobs=np.zeros((10**wconfig['pia']),'f'); aswhknstd=np.zeros((10**wconfig['pia']),'f'); aswhkqc=np.zeros((10**wconfig['pia']),'f')\n aswdepth=np.zeros((10**wconfig['pia']),'f'); asdistcoast=np.zeros((10**wconfig['pia']),'f')\n ii=0\n for j in auxlat:\n for k in auxlon:\n\n if j>=0:\n hem='N'\n else:\n hem='S'\n\n try: \n fu=nc.Dataset(wconfig['path_alt']+satname+'/IMOS_SRS-Surface-Waves_MW_'+nsatname+'_FV02_'+str(np.abs(j)).zfill(3)+hem+'-'+str(k).zfill(3)+'E-DM00.nc')\n except:\n print(' '+wconfig['path_alt']+satname+'/IMOS_SRS-Surface-Waves_MW_'+nsatname+'_FV02_'+str(np.abs(j)).zfill(3)+hem+'-'+str(k).zfill(3)+'E-DM00.nc does not exist')\n else:\n st=np.double(fu.variables['TIME'][:]*24.*3600.+float(timegm( time.strptime('1985010100', '%Y%m%d%H') )))\n indt=np.where((st>=adatemin-wconfig['maxti']) & (st<=adatemax+wconfig['maxti']))\n # check if there is valid records inside the time range of interest\n if np.size(indt)>10:\n indt=indt[0]\n # it does not read using the indexes because it is much slower\n slat=fu.variables['LATITUDE'][:]\n slon=fu.variables['LONGITUDE'][:]\n swdepth=fu.variables['BOT_DEPTH'][:]\n sdistcoast=fu.variables['DIST2COAST'][:]\n wnd=fu.variables['WSPD'][:]\n wndcal=fu.variables['WSPD_CAL'][:]\n try: \n hsk=fu.variables['SWH_KU'][:]\n hskcal=fu.variables['SWH_KU_CAL'][:]\n sig0knstd=fu.variables['SIG0_KU_std_dev'][:]\n swhknobs=fu.variables['SWH_KU_num_obs'][:]\n swhknstd=fu.variables['SWH_KU_std_dev'][:]\n swhkqc=fu.variables['SWH_KU_quality_control'][:]\n except:\n print(' error reading KU, pick KA')\n hsk=fu.variables['SWH_KA'][:]\n hskcal=fu.variables['SWH_KA_CAL'][:]\n sig0knstd=fu.variables['SIG0_KA_std_dev'][:]\n swhknobs=fu.variables['SWH_KA_num_obs'][:]\n swhknstd=fu.variables['SWH_KA_std_dev'][:]\n swhkqc=fu.variables['SWH_KA_quality_control'][:]\n\n if ii+len(indt) <= ast.shape[0] :\n # check the file is correct\n if (st.shape[0]==wnd.shape[0]) & (slat.shape[0]==slon.shape[0]) & (hsk.shape[0]==hskcal.shape[0]) : \n ast[ii:ii+len(indt)]=np.array(st[indt]).astype('double')\n aslat[ii:ii+len(indt)]=np.array(slat[indt]).astype('float')\n aslon[ii:ii+len(indt)]=np.array(slon[indt]).astype('float')\n aswdepth[ii:ii+len(indt)]=np.array(swdepth[indt]).astype('float')\n asdistcoast[ii:ii+len(indt)]=np.array(sdistcoast[indt]).astype('float')\n ahsk[ii:ii+len(indt)]=np.array(hsk[indt]).astype('float')\n ahskcal[ii:ii+len(indt)]=np.array(hskcal[indt]).astype('float')\n awnd[ii:ii+len(indt)]=np.array(wnd[indt]).astype('float')\n awndcal[ii:ii+len(indt)]=np.array(wndcal[indt]).astype('float')\n asig0knstd[ii:ii+len(indt)]=np.array(sig0knstd[indt]).astype('float')\n aswhknobs[ii:ii+len(indt)]=np.array(swhknobs[indt]).astype('float')\n aswhknstd[ii:ii+len(indt)]=np.array(swhknstd[indt]).astype('float')\n aswhkqc[ii:ii+len(indt)]=np.array(swhkqc[indt]).astype('float')\n ii=ii+len(indt)\n\n else:\n raise ValueError(\"gridSat_Altimeter.py; Small array to allocate the satellite data! Increase the power of initial array in ww3tools.yaml (pia)\")\n\n del indt,st,slat,slon,swdepth,sdistcoast,hsk,hskcal,wnd,wndcal,sig0knstd,swhknobs,swhknstd,swhkqc\n fu.close(); del fu\n\n # print(repr(j)+\" \"+repr(k))\n\n # print(' Done reading and allocating satellite data '+satname)\n del ii\n\n # water depth is positive by definition\n aswdepth=aswdepth*-1.\n\n # Quality Control Check (optional) ----\n if wconfig['qc']==0:\n indq = np.where( (ast>=adatemin) & (ast<=adatemax) )\n else:\n indq = np.where( (aswdepth>=wconfig['mindepth']) & (asdistcoast>=wconfig['mindfc']) & (aswhknstd<=wconfig['max_swh_rms']) &\n (asig0knstd<=wconfig['max_sig0_rms']) & (aswhknobs>=wconfig['min_swh_numval']) & (aswhkqc<=wconfig['max_swh_qc']) &\n (ahsk>0.1) & (ahsk0.2) & (awnd0.1) & (ahskcal0.2) & (awndcal=adatemin) & (ast<=adatemax) )\n\n del asig0knstd,aswhknobs,aswhknstd,aswhkqc,adatemin,adatemax\n\n if np.size(indq)>2:\n indq=indq[0]\n ast=np.double(np.copy(ast[indq]))\n aslat=np.copy(aslat[indq]); aslon=np.copy(aslon[indq])\n aswdepth=np.copy(aswdepth[indq]); asdistcoast=np.copy(asdistcoast[indq])\n ahsk=np.copy(ahsk[indq]); ahskcal=np.copy(ahskcal[indq])\n awnd=np.copy(awnd[indq]); awndcal=np.copy(awndcal[indq])\n # dictionary\n daodn = {'TIME': ast, 'LATITUDE': aslat, 'LONGITUDE': aslon,\n 'WDEPTH': aswdepth, 'DISTCOAST': asdistcoast,\n 'HS': ahsk, 'HS_CAL': ahskcal,\n 'WSPD': awnd, 'WSPD_CAL': awndcal}\n\n else:\n daodn = {'TIME': [], 'LATITUDE': [], 'LONGITUDE': [],\n 'WDEPTH': [], 'DISTCOAST': [],\n 'HS': [], 'HS_CAL': [],\n 'WSPD': [], 'WSPD_CAL': []}\n\n AODN = pd.DataFrame(daodn)\n stop = timeit.default_timer()\n print('wread.aodn_altimeter successfully completed in '+repr(int(round(stop - start,0)))+' seconds. '+satname)\n return AODN\n\n\n# ========== MODEL ======================\n# --- WAVEWATCH III ---\n\n# WAVEWATCH III point output, netcdf format\ndef tseriestxt_ww3(*args):\n '''\n WAVEWATCH III, time series/table, text tab format\n This file format has all point outputs (results) in the same file (not divided by point/buoy).\n Input: file name (example: tab50.ww3), and number of point ouputs (example: 4)\n Output: dictionary containing the arrays: time(seconds since 1970),time(datetime64),lat,lon, \n and arrays with the wave variables available. Inside the dictionary, the arrays of wave variables\n have dimension (point_outputs, time).\n '''\n if len(args) == 2:\n fname=str(args[0]); tnb=int(args[1])\n elif len(args) < 2 :\n sys.exit(' Two inputs are required: file name and station name')\n elif len(args) > 2:\n sys.exit(' Too many inputs')\n\n try:\n mcontent = open(fname).readlines()\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n\n tt = int(np.size(mcontent)/(7+tnb)+1)\n myear = []; mmonth = [] ; mday = [] ; mhour = []; mmin = []\n mlon = np.zeros((tnb,tt),'f'); mlat = np.zeros((tnb,tt),'f'); mhs = np.zeros((tnb,tt),'f'); mL = np.zeros((tnb,tt),'f') \n mtm = np.zeros((tnb,tt),'f'); mdm = np.zeros((tnb,tt),'f'); mspr = np.zeros((tnb,tt),'f')\n atp = np.zeros((tnb,tt),'f'); mdp = np.zeros((tnb,tt),'f'); mpspr = np.zeros((tnb,tt),'f')\n for i in range(0,tt):\n j = i*(7+tnb)\n myear = np.append(myear, int(mcontent[j].split(':')[1].split(' ')[1].split('/')[0]) )\n mmonth = np.append(mmonth, int(mcontent[j].split(':')[1].split(' ')[1].split('/')[1]) )\n mday = np.append(mday, int(mcontent[j].split(':')[1].split(' ')[1].split('/')[2]) )\n mhour = np.append(mhour, int(mcontent[j].split(':')[1].split(' ')[2]) )\n mmin = np.append(mmin, int(mcontent[j].split(':')[2]) )\n for k in range(0,tnb):\n mlon[k,i] = mcontent[j+tnb+1+k].strip().split()[0]\n mlat[k,i] = mcontent[j+tnb+1+k].strip().split()[1]\n mhs[k,i] = mcontent[j+tnb+1+k].strip().split()[2]\n mL[k,i] = mcontent[j+tnb+1+k].strip().split()[3]\n mtm[k,i] = mcontent[j+tnb+1+k].strip().split()[4]\n mdm[k,i] = mcontent[j+tnb+1+k].strip().split()[5]\n mspr[k,i] = mcontent[j+tnb+1+k].strip().split()[6]\n atp[k,i] = mcontent[j+tnb+1+k].strip().split()[7]\n mdp[k,i] = mcontent[j+tnb+1+k].strip().split()[8]\n mpspr[k,i] = mcontent[j+tnb+1+k].strip().split()[9]\n\n mtp = np.zeros((atp.shape[0],atp.shape[1]),'f')*np.nan\n for i in range(0,mtp.shape[0]): \n #mtp[i,atp[i,:]>0.0] = 1./atp[i,atp[i,:]>0.0]\n indtp=np.where(atp[i,:]>0.0)\n if np.size(indtp)>0:\n mtp[i,indtp] = np.copy(1./atp[i,indtp])\n del indtp\n\n mdate = pd.to_datetime(dict(year=myear,month=mmonth,day=mday,hour=mhour,minute=mmin))\n mtime=np.zeros(mdate.shape[0],'d')\n for i in range(0,mtime.shape[0]):\n mtime[i]=double(mdate[i].timestamp())\n\n result={'latitude':mlat,'longitude':mlon,\n 'time':mtime,'date':mdate,\n 'hs':mhs,'lm':mL,'tm':mtm,'dm':mdm,\n 'spr':mspr,'tp':mtp,'dp':mdp,'spr_dp':mpspr}\n\n return result\n del mdate,mtime,mlon,mlat,mhs,mL,mtm,mdm,mspr,atp,mtp,mdp,mpspr\n\ndef tseriesnc_ww3(*args):\n '''\n WAVEWATCH III, time series/table, netcdf format\n Input: file name (example: ww3gefs.20160928_tab.nc), and station name (example: 41002)\n Output: dictionary containing the arrays: time(seconds since 1970),time(datetime64),lat,lon, \n and arrays with the wave variables available.\n '''\n if len(args) == 2:\n fname=str(args[0]); stname=str(args[1])\n elif len(args) < 2 :\n sys.exit(' Two inputs are required: file name and station name')\n elif len(args) > 2:\n sys.exit(' Too many inputs')\n\n try:\n ds = xr.open_dataset(fname); f=nc.Dataset(fname)\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n mtime = np.array(f.variables['time'][:]*24*3600 + timegm( strptime(str(f.variables['time'].units).split(' ')[2][0:4]+'01010000', '%Y%m%d%H%M') )).astype('double')\n f.close(); del f\n \n auxstationname=ds['station_name'].values[:,:]; stationname=[]\n for i in range(0,auxstationname.shape[0]):\n stationname=np.append(stationname,\"\".join(np.array(auxstationname[i,:]).astype('str')))\n\n inds=np.where(stationname[:]==stname)\n if np.size(inds)>0:\n inds=int(inds[0][0]); stname=str(stationname[inds])\n else:\n sys.exit(' Station '+stname+' not included in the ww3 output file, or wrong station ID')\n\n mlat = np.nanmean(ds['latitude'].values[:,inds])\n mlon = np.nanmean(ds['longitude'].values[:,inds])\n # dictionary\n result={'latitude':np.array(mlat),'longitude':np.array(mlon),\n 'time':mtime,'date':ds['time'].values[:]} \n\n if 'hs' in ds.keys():\n mhs = ds['hs'].values[:,inds]\n mhs[(mhs<0)|(mhs>30)]=np.nan\n result['hs']=np.array(mhs)\n elif 'swh' in ds.keys():\n mhs = ds['swh'].values[:,inds]\n mhs[(mhs<0)|(mhs>30)]=np.nan\n result['hs']=np.array(mhs)\n\n if 'fp' in ds.keys():\n mtp = np.zeros(mhs.shape[0],'f')*np.nan\n indtp=np.where(ds['fp'].values[:,inds]>0.0)\n if np.size(indtp)>0:\n mtp[indtp] = np.copy(1./ds['fp'].values[indtp,inds])\n del indtp\n mtp[(mtp<0)|(mtp>40)]=np.nan\n\n result['tp']=np.array(mtp)\n if 'tr' in ds.keys():\n mtm = ds['tr'].values[:,inds]\n mtm[(mtm<0)|(mtm>40)]=np.nan\n result['tm']=np.array(mtm)\n if 'th1p' in ds.keys():\n mdp = ds['th1p'].values[:,inds]\n mdp[(mdp<-180)|(mdp>360)]=np.nan\n result['dp']=np.array(mdp)\n if 'th1m' in ds.keys():\n mdm = ds['th1m'].values[:,inds]\n mdm[(mdm<-180)|(mdm>360)]=np.nan\n result['dm']=np.array(mdm)\n if 'sth1m' in ds.keys():\n result['spr']=np.array(ds['sth1m'].values[:,inds])\n if 'lm' in ds.keys():\n result['lm']=np.array(ds['lm'].values[:,inds])\n if 'sth1p' in ds.keys():\n result['spr_dp']=np.array(ds['sth1p'].values[:,inds])\n\n return result\n ds.close(); del ds\n\n\n# Operational WW3 formats\n\ndef bull(*args):\n '''\n WAVEWATCH III, bull operational point output, see https://www.ftp.ncep.noaa.gov/data/nccf/com/\n Input: file name (example: gefs.wave.41004.bull)\n Output: dictionary containing:\n time(seconds since 1970),time(datetime64),lat,lon,station name; Arrays: hs, tp, and dp (gfs only)\n '''\n if len(args) == 1:\n fname=str(args[0])\n elif len(args) > 1:\n sys.exit(' Too many inputs')\n\n # confirm format\n if str(fname).split('/')[-1].split('.')[-1]=='bull':\n print(\" reading ww3 bull file ...\")\n at=[]; adate=[]; ahs=[]; atp=[]; adp=[]\n stname=str(fname).split('/')[-1].split('.')[-2]\n\n try:\n tfile = open(fname, 'r'); lines = tfile.readlines()\n except:\n sys.exit(' Cannot open '+fname)\n else:\n\n if 'gfs' in str(fname).split('/')[-1]:\n iauxhs=[24,30];iauxtp=[30,34];iauxdp=[35,38]\n\n # lat / lon\n auxpos=str(lines[0]).replace(\"b'\",\"\").split('(')[1]\n if auxpos[5]=='N':\n alat=np.float(auxpos[0:5])\n else:\n alat=-1.*np.float(auxpos[0:5])\n\n if auxpos[13]=='E':\n alon=np.float(auxpos[7:13])\n else:\n alon=-1.*np.float(auxpos[7:13])\n\n # time ----\n auxdate = str(lines[2]).split(':')[1].split('UTC')[0][1::]\n auxt = np.double(timegm( strptime( auxdate[0:8]+' '+auxdate[9:11]+'00', '%Y%m%d %H%M') ))\n year = int(time.gmtime(auxt)[0]); month = int(time.gmtime(auxt)[1])\n pday=0\n for j in range(7,np.size(lines)-8):\n day=int(lines[j][3:5]); hour=int(lines[j][6:8])\n if day0:\n ahs=np.append(ahs,np.float(lines[j][10:15]))\n auxhs=np.array([np.float(lines[j][iauxhs[0]:iauxhs[1]])])\n for k in range(1,4): \n if len(str(lines[j][int(iauxhs[0]+18*k):int(iauxhs[1]+18*k)]).replace(' ', '')):\n auxhs=np.append(auxhs,np.float(lines[j][int(iauxhs[0]+18*k):int(iauxhs[1]+18*k)]))\n\n auxtp=np.array([np.float(lines[j][iauxtp[0]:iauxtp[1]])])\n for k in range(1,4): \n if len(str(lines[j][int(iauxtp[0]+18*k):int(iauxtp[1]+18*k)]).replace(' ', '')):\n auxtp=np.append(auxtp,np.float(lines[j][int(iauxtp[0]+18*k):int(iauxtp[1]+18*k)]))\n\n auxdp=np.array([np.float(lines[j][iauxdp[0]:iauxdp[1]])])\n for k in range(1,4): \n if len(str(lines[j][int(iauxdp[0]+18*k):int(iauxdp[1]+18*k)]).replace(' ', '')):\n auxdp=np.append(auxdp,np.float(lines[j][int(iauxdp[0]+18*k):int(iauxdp[1]+18*k)]))\n\n indaux=np.nanmin(np.where(auxhs==np.nanmax(auxhs))[0])\n atp=np.append(atp,np.float(auxtp[indaux]))\n adp=np.append(adp,np.float(auxdp[indaux]))\n del indaux,auxhs,auxtp,auxdp\n else:\n ahs=np.append(ahs,np.nan)\n atp=np.append(atp,np.nan)\n adp=np.append(adp,np.nan)\n\n # build dictionary\n result={'latitude':alat,'longitude':alon,'station_name':stname,\n 'time':np.array(at).astype('double'),'date':np.array(adate).astype('double'),\n 'hs':np.array(ahs),'tp':np.array(atp),'dp':np.array(adp)}\n\n del adp\n\n elif 'gefs' in str(fname).split('/')[-1]:\n iauxhs=[10,15];iauxtp=[28,33]\n\n # lat / lon\n auxpos=str(lines[1]).split('(')[1].split('N')\n alat=np.float(auxpos[0])\n alon=np.float(auxpos[1].split('W')[0])\n\n # time ----\n auxdate = str(lines[3]).split(':')[1].split('UTC')[0][1::]\n auxt = np.double(timegm( strptime( auxdate[0:8]+' '+auxdate[10:12]+'00', '%Y%m%d %H%M') ))\n year = int(time.gmtime(auxt)[0]); month = int(time.gmtime(auxt)[1])\n pday=0\n for j in range(9,np.size(lines)-8):\n day=int(lines[j][2:4]); hour=int(lines[j][5:7])\n if day0:\n ahs=np.append(ahs,np.float(lines[j][iauxhs[0]:iauxhs[1]]))\n atp=np.append(atp,np.float(lines[j][iauxtp[0]:iauxtp[1]]))\n else:\n ahs=np.append(ahs,np.nan)\n atp=np.append(atp,np.nan)\n\n # build dictionary\n result={'time':np.array(at).astype('double'),'date':np.array(adate).astype('double'),\n 'latitude':alat,'longitude':alon,'station_name':stname, \n 'hs':np.array(ahs),'tp':np.array(atp)}\n\n print(\" Model data read, \"+fname+\", bull format.\")\n return result\n del result,alat,alon,at,adate,ahs,atp,tfile,lines\n else:\n sys.exit(\" Skipped file \"+fname+\" Not bull_tar format.\")\n\n\ndef bull_tar(*args):\n '''\n WAVEWATCH III, bull_tar operational point output, see https://www.ftp.ncep.noaa.gov/data/nccf/com/\n Input: file name (example: gfswave.t00z.bull_tar)\n Output: dictionary containing:\n time(seconds since 1970),time(datetime64),lat,lon,station names; Arrays: hs, tp, and dp (gfs only)\n '''\n if len(args) == 1:\n fname=str(args[0])\n elif len(args) > 1:\n sys.exit(' Too many inputs')\n\n # confirm file format\n if fname.split('/')[-1].split('.')[-1]=='bull_tar':\n print(\" reading ww3 bull_tar file ...\")\n import tarfile\n stname=[]\n\n try:\n tar = tarfile.open(fname)\n except:\n sys.exit(' Cannot open '+fname)\n else:\n at=[]; adate=[]; alat=[]; alon=[]\n\n if 'gfs' in str(fname).split('/')[-1]:\n iauxhs=[24,30];iauxtp=[30,34];iauxdp=[35,38]\n\n for t in range(0,np.size(tar.getmembers())):\n # station names\n stname=np.append(stname,str(str(tar.getmembers()[t].name).split('/')[-1]).split('/')[-1].split('.')[-2])\n\n try:\n tfile=tar.extractfile(tar.getmembers()[t]); lines = tfile.readlines()\n except:\n print(\" Cannot open \"+tar.getmembers()[t].name)\n else:\n\n # lat / lon\n auxpos=str(lines[0]).replace(\"b'\",\"\").split('(')[1]\n if auxpos[5]=='N':\n alat=np.append(alat,np.float(auxpos[0:5]))\n else:\n alat=np.append(alat,-1.*np.float(auxpos[0:5]))\n\n if auxpos[13]=='E':\n alon=np.append(alon,np.float(auxpos[7:13]))\n else:\n alon=np.append(alon,-1.*np.float(auxpos[7:13]))\n\n if t==0: \n # time array ----\n auxdate = str(lines[2]).split(':')[1].split('UTC')[0][1::]\n auxt = np.double(timegm( strptime( auxdate[0:8]+' '+auxdate[9:11]+'00', '%Y%m%d %H%M') ))\n year = int(time.gmtime(auxt)[0]); month = int(time.gmtime(auxt)[1])\n pday=0\n for j in range(7,np.size(lines)-8):\n auxlines = str(lines[j]).replace(\"b'\",\"\")\n day=int(auxlines[3:5]); hour=int(auxlines[6:8]); del auxlines\n if day0:\n auxhs=np.append(auxhs,np.float(auxlines[10:15]))\n fuxhs=np.array([np.float(auxlines[iauxhs[0]:iauxhs[1]])])\n for k in range(1,4): \n if len(str(auxlines[int(iauxhs[0]+18*k):int(iauxhs[1]+18*k)]).replace(' ', '')):\n fuxhs=np.append(fuxhs,np.float(auxlines[int(iauxhs[0]+18*k):int(iauxhs[1]+18*k)]))\n\n fuxtp=np.array([np.float(auxlines[iauxtp[0]:iauxtp[1]])])\n for k in range(1,4): \n if len(str(auxlines[int(iauxtp[0]+18*k):int(iauxtp[1]+18*k)]).replace(' ', '')):\n fuxtp=np.append(fuxtp,np.float(auxlines[int(iauxtp[0]+18*k):int(iauxtp[1]+18*k)]))\n\n fuxdp=np.array([np.float(auxlines[iauxdp[0]:iauxdp[1]])])\n for k in range(1,4): \n if len(str(auxlines[int(iauxdp[0]+18*k):int(iauxdp[1]+18*k)]).replace(' ', '')):\n fuxdp=np.append(fuxdp,np.float(auxlines[int(iauxdp[0]+18*k):int(iauxdp[1]+18*k)]))\n\n indaux=np.nanmin(np.where(fuxhs==np.nanmax(fuxhs))[0])\n auxtp=np.append(auxtp,np.float(fuxtp[indaux]))\n auxdp=np.append(auxdp,np.float(fuxdp[indaux]))\n del indaux,fuxhs,fuxtp,fuxdp\n else:\n auxhs=np.append(auxhs,np.nan)\n auxtp=np.append(auxtp,np.nan)\n auxdp=np.append(auxdp,np.nan)\n\n if ahs.shape[1]==auxhs.shape[0]:\n ahs[t,:]=np.array(auxhs)\n atp[t,:]=np.array(auxtp)\n adp[t,:]=np.array(auxdp)\n else:\n print(\" Time duration of \"+tar.getmembers()[t]+\" (in \"+fname+\") do not match the other stations. Mantained NaN.\")\n\n del auxhs,auxtp,auxdp,tfile,lines\n\n # build dictionary \n result={'time':np.array(at).astype('double'),'date':np.array(adate).astype('double'),\n 'latitude':np.array(alat),'longitude':np.array(alon),'station_name':np.array(stname),\n 'hs':np.array(ahs),'tp':np.array(atp),'dp':np.array(adp)}\n\n del adp\n\n elif 'gefs' in str(fname).split('/')[-1]:\n iauxhs=[10,15];iauxtp=[28,33]\n \n for t in range(0,np.size(tar.getmembers())):\n # station names\n stname=np.append(stname,str(str(tar.getmembers()[t].name).split('/')[-1]).split('/')[-1].split('.')[-2])\n\n try:\n tfile=tar.extractfile(tar.getmembers()[t]); lines = tfile.readlines()\n except:\n print(\" Cannot open \"+tar.getmembers()[t].name)\n else:\n # lat / lon\n auxpos=str(lines[1]).replace(\"b'\",\"\").split('(')[1].split('N')\n alat=np.append(alat,np.float(auxpos[0]))\n alon=np.append(alon,np.float(auxpos[1].split('W')[0]))\n\n if t==0:\n # time array ----\n auxdate = str(lines[3]).split(':')[1].split('UTC')[0][1::]\n auxt = np.double(timegm( strptime( auxdate[0:8]+' '+auxdate[10:12]+'00', '%Y%m%d %H%M') ))\n year = int(time.gmtime(auxt)[0]); month = int(time.gmtime(auxt)[1])\n pday=0\n for j in range(9,np.size(lines)-8):\n auxlines = str(lines[j]).replace(\"b'\",\"\")\n day=int(auxlines[2:4]); hour=int(auxlines[5:7]); del auxlines\n if day0:\n auxhs=np.append(auxhs,np.float(auxlines[iauxhs[0]:iauxhs[1]]))\n auxtp=np.append(auxtp,np.float(auxlines[iauxtp[0]:iauxtp[1]]))\n else:\n auxhs=np.append(auxhs,np.nan)\n auxtp=np.append(auxtp,np.nan)\n\n del auxlines\n\n if ahs.shape[1]==auxhs.shape[0]:\n ahs[t,:]=np.array(auxhs)\n atp[t,:]=np.array(auxtp)\n else:\n print(\" Time duration of \"+tar.getmembers()[t]+\" (in \"+fname+\") do not match the other stations. Mantained NaN.\")\n \n del auxhs,auxtp,tfile,lines\n\n # build dictionary \n result={'time':np.array(at).astype('double'),'date':np.array(adate).astype('double'),\n 'latitude':np.array(alat),'longitude':np.array(alon),'station_name':np.array(stname),\n 'hs':np.array(ahs),'tp':np.array(atp)}\n\n print(\" Model data read, \"+fname+\", bull_tar format.\")\n return result\n del result,tar,alat,alon,ahs,atp,at,adate\n else:\n sys.exit(\" Skipped file \"+fname+\" Not bull_tar format.\")\n\n\ndef ts(*args):\n '''\n WAVEWATCH III, ts operational point output, see https://www.ftp.ncep.noaa.gov/data/nccf/com/\n Input: file name (example: gefs.wave.41004.ts)\n Output: dictionary containing:\n time(seconds since 1970),time(datetime64),station name; Arrays: hs, hs_spr, tp (glwu or gefs)\n '''\n if len(args) == 1:\n fname=str(args[0])\n elif len(args) > 1:\n sys.exit(' Too many inputs')\n\n # confirm format\n if str(fname).split('/')[-1].split('.')[-1]=='ts':\n print(\" reading ww3 ts file ...\")\n stname=str(fname).split('/')[-1].split('.')[-2]\n try:\n tfile = pd.read_csv(fname,skiprows=2); lines = tfile.values[:,0]\n except:\n sys.exit(' Cannot open '+fname)\n else:\n\n if 'gefs' in str(fname).split('/')[-1]:\n # gefs lakes ww3 format\n at=[]; adate=[]; ahs=[]; ahspr=[]; atp=[]\n for j in range(0,np.size(lines)):\n at=np.append(at,np.double(timegm( strptime( lines[j][1:12]+'00', '%Y%m%d %H%M') )))\n adate=np.append(adate,date2num(datetime.datetime(time.gmtime(at[j])[0],time.gmtime(at[j])[1],time.gmtime(at[j])[2],time.gmtime(at[j])[3],time.gmtime(at[j])[4])))\n\n if len(lines[j])>0:\n ahs=np.append(ahs,np.float(lines[j][13:18]))\n ahspr=np.append(ahspr,np.float(lines[j][19:25]))\n atp=np.append(atp,np.float(lines[j][27:32]))\n else:\n ahs=np.append(ahs,np.nan)\n ahspr=np.append(ahspr,np.nan)\n atp=np.append(atp,np.nan)\n\n # build dictionary \n result={'time':np.array(at).astype('double'),'date':np.array(adate).astype('double'),\n 'station_name':np.array(stname),'hs':np.array(ahs),'hs_spr':np.array(ahspr),'tp':np.array(atp)}\n\n print(\" Model data read, \"+fname+\", ts format.\")\n return result\n del result,at,adate,ahs,ahspr,atp,tfile,lines\n\n elif 'glwu' in str(fname).split('/')[-1]:\n # great lakes ww3 format\n at=[];adate=[];ahs=[];al=[];atr=[];adir=[];aspr=[];atp=[];ap_dir=[];ap_spr=[]\n for j in range(0,np.size(lines)):\n at=np.append(at,np.double(timegm( strptime( lines[j][2:13]+'00', '%Y%m%d %H%M') )))\n adate=np.append(adate,date2num(datetime.datetime(time.gmtime(at[j])[0],time.gmtime(at[j])[1],time.gmtime(at[j])[2],time.gmtime(at[j])[3],time.gmtime(at[j])[4])))\n\n if len(lines[j])>0:\n ahs=np.append(ahs,np.float(lines[j][22:28]))\n al=np.append(al,np.float(lines[j][31:35]))\n atr=np.append(atr,np.float(lines[j][37:42]))\n adir=np.append(adir,np.float(lines[j][44:49]))\n aspr=np.append(aspr,np.float(lines[j][50:56]))\n atp=np.append(atp,np.float(lines[j][57:64]))\n ap_dir=np.append(ap_dir,np.float(lines[j][66:71]))\n ap_spr=np.append(ap_spr,np.float(lines[j][72:78]))\n else:\n ahs=np.append(ahs,np.nan)\n al=np.append(al,np.nan)\n atr=np.append(atr,np.nan)\n adir=np.append(adir,np.nan)\n aspr=np.append(aspr,np.nan)\n atp=np.append(atp,np.nan)\n ap_dir=np.append(ap_dir,np.nan)\n ap_spr=np.append(ap_spr,np.nan)\n\n atp[atp<0.01]=np.nan; atp=1./atp\n\n # build dictionary \n result={'time':np.array(at).astype('double'),'date':np.array(adate).astype('double'),\n 'station_name':np.array(stname),\n 'hs':np.array(ahs),'l':np.array(al),\n 'tm':np.array(atr),'dm':np.array(adir),\n 'spr':np.array(aspr),'tp':np.array(atp),\n 'dp':np.array(ap_dir),'peak_spr':np.array(ap_spr)}\n\n print(\" Model data read, \"+fname+\", ts format.\")\n return result\n del result,at,adate,ahs,al,atr,adir,aspr,atp,ap_dir,ap_spr,tfile,lines\n\n else:\n sys.exit(\" Skipped file \"+fname+\" Not ts format.\")\n\n\ndef station_tar(*args):\n '''\n WAVEWATCH III, station_tar operational point output, see https://www.ftp.ncep.noaa.gov/data/nccf/com/\n Input: file name (example: gefs.wave.t00z.station_tar)\n Output: dictionary containing:\n time(seconds since 1970),time(datetime64),station name; Arrays: hs, hs_spr, tp (gefs only)\n '''\n if len(args) == 1:\n fname=str(args[0])\n elif len(args) > 1:\n sys.exit(' Too many inputs')\n\n # confirm format\n if str(fname).split('/')[-1].split('.')[-1]=='station_tar':\n print(\" reading ww3 station_tar file ...\")\n import tarfile\n stname=[]\n\n try:\n tar = tarfile.open(fname)\n except:\n sys.exit(' Cannot open '+fname)\n else:\n for t in range(0,np.size(tar.getmembers())):\n # station names\n stname=np.append(stname,str(str(tar.getmembers()[t].name).split('/')[-1]).split('/')[-1].split('.')[-2])\n\n try:\n tfile=tar.extractfile(tar.getmembers()[t]); lines = tfile.readlines()[3::]\n except:\n print(\" Cannot open \"+tar.getmembers()[t].name)\n else:\n if t==0:\n # time array ----\n at=[]; adate=[]\n for j in range(0,np.size(lines)):\n at=np.append(at,np.double(timegm( strptime( str(lines[j])[3:14]+'00', '%Y%m%d %H%M') )))\n adate=np.append(adate,date2num(datetime.datetime(time.gmtime(at[j])[0],time.gmtime(at[j])[1],time.gmtime(at[j])[2],time.gmtime(at[j])[3],time.gmtime(at[j])[4])))\n\n # --------\n ahs=np.zeros((np.size(tar.getmembers()),at.shape[0]),'f')*np.nan\n ahspr=np.zeros((np.size(tar.getmembers()),at.shape[0]),'f')*np.nan\n atp=np.zeros((np.size(tar.getmembers()),at.shape[0]),'f')*np.nan\n\n auxhs=[]; auxhspr=[]; auxtp=[]\n for j in range(0,np.size(lines)):\n auxlines = str(lines[j]).replace(\"b'\",\"\")\n if len(lines[j])>0:\n auxhs=np.append(auxhs,np.float(auxlines[13:18]))\n auxhspr=np.append(auxhspr,np.float(auxlines[19:25]))\n auxtp=np.append(auxtp,np.float(auxlines[27:32]))\n else:\n auxhs=np.append(auxhs,np.nan)\n auxhspr=np.append(auxhspr,np.nan)\n auxtp=np.append(auxtp,np.nan)\n\n if ahs.shape[1]==auxhs.shape[0]:\n ahs[t,:]=np.array(auxhs)\n ahspr[t,:]=np.array(auxhspr)\n atp[t,:]=np.array(auxtp)\n else:\n print(\" Time duration of \"+tar.getmembers()[t]+\" (in \"+fname+\") do not match the other stations. Mantained NaN.\")\n\n del auxhs,auxhspr,auxtp,tfile,lines\n\n # build dictionary \n result={'time':np.array(at).astype('double'),'date':np.array(adate).astype('double'),\n 'station_name':np.array(stname),'hs':np.array(ahs),'hs_spr':np.array(ahspr),'tp':np.array(atp)}\n\n return result\n del result,tar,ahs,ahspr,atp,at,adate\n\n print(\" Model data read, \"+fname+\", station_tar format.\")\n\n else:\n sys.exit(\" Skipped file \"+fname+\" Not station_tar format.\")\n\n\n# SPECTRA \n\n# Observations NDBC, netcdf format\ndef spec_ndbc(*args):\n '''\n Observations NDBC, wave spectrum, netcdf format\n Input: file name (example: 46047w2016.nc)\n Output: dictionary containing:\n time(seconds since 1970),time(datetime64),lat,lon; Arrays: freq,dfreq,pspec,dmspec,dpspec,dirspec\n '''\n sk=1; deltatheta=int(10)\n if len(args) >= 1:\n fname=str(args[0])\n if len(args) >= 2:\n sk=int(args[1])\n if len(args) >= 3:\n deltatheta=int(args[3])\n if len(args) > 3:\n sys.exit(' Too many inputs')\n\n try:\n ds = xr.open_dataset(fname); f=nc.Dataset(fname)\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n btime = np.array(f.variables['time'][::sk]).astype('double')\n f.close(); del f\n bdate = ds['time'].values[::sk]\n blat = ds['latitude'].values[:]\n blon = ds['longitude'].values[:]\n freq = ds['frequency'].values[:]\n pspec = ds['spectral_wave_density'].values[::sk,:,0,0]\n dmspec = ds['mean_wave_dir'][::sk,:,0,0]\n dpspec = ds['principal_wave_dir'][::sk,:,0,0] \n r1spec = ds['wave_spectrum_r1'][::sk,:,0,0]\n r2spec = ds['wave_spectrum_r2'][::sk,:,0,0]\n ds.close(); del ds\n # DF in frequency (dfreq), https://www.ndbc.noaa.gov/wavespectra.shtml\n if int(freq.shape[0])==47:\n dfreq=np.zeros(47,'f')\n dfreq[0]=0.010; dfreq[1:14]=0.005; dfreq[14:40]=0.010; dfreq[40::]=0.020\n else:\n dfreq=np.zeros(freq.shape[0],'f')+0.01\n\n pspec=np.array(pspec*dfreq)\n # Directional 2D Spectrum, https://www.ndbc.noaa.gov/measdes.shtml#swden , https://www.ndbc.noaa.gov/wavemeas.pdf\n theta = np.array(np.arange(0,360+0.1,deltatheta))\n # final directional wave spectrum (frequency X direction)\n dirspec = np.zeros((btime.shape[0],freq.shape[0],theta.shape[0]),'f')\n for t in range(0,btime.shape[0]):\n dirspec[t,:,:] = np.array([pspec[t,:]]).T * (1/pi)*(0.5+ np.array([r1spec[t,:]]).T * cos(np.array( np.array([theta])-np.array([dmspec[t,:]]).T )*(pi/180)) \n + np.array([r2spec[t,:]]).T*cos(2*np.array( np.array([theta]) - np.array([dpspec[t,:]]).T )*(pi/180)))\n\n # build dictionary \n result={'time':btime,'date':bdate,'latitude':blat,'longitude':blon,\n 'freq':freq,'deltafreq':dfreq,'pspec':pspec,'dmspec':dmspec,'dpspec':dpspec,\n 'theta':theta,'dirspec':dirspec}\n\n return result\n del btime,bdate,blat,blon,freq,dfreq,pspec,dmspec,dpspec,theta,dirspec\n\n\n# WAVEWATCH III spectra output, netcdf format\ndef spec_ww3(*args):\n '''\n WAVEWATCH III, wave spectrum, netcdf format\n Input: file name (example: ww3gefs.20160928_spec.nc), and station name (example: 41002)\n Output: dictionary containing:\n time(seconds since 1970),time(datetime64),lat,lon; Arrays: freq,dfreq,pwst,d1sp,dire,dspec,wnds,wndd\n '''\n sk=1\n if len(args) < 2 :\n sys.exit(' Two inputs are required: file name and station name')\n if len(args) >= 2 :\n fname=str(args[0]); stname=str(args[1])\n if len(args) > 2 :\n sk=int(args[2])\n if len(args) > 3 :\n sys.exit(' Too many inputs')\n\n try:\n ds = xr.open_dataset(fname); f=nc.Dataset(fname)\n except:\n sys.exit(\" Cannot open \"+fname)\n else:\n\n mtime = np.array(f.variables['time'][::sk]*24*3600 + timegm( strptime(str(f.variables['time'].units).split(' ')[2][0:4]+'01010000', '%Y%m%d%H%M') )).astype('double')\n f.close(); del f\n\n auxstationname=ds['station_name'].values[:,:]; stationname=[]\n for i in range(0,auxstationname.shape[0]):\n stationname=np.append(stationname,\"\".join(np.array(auxstationname[i,:]).astype('str')))\n\n inds=np.where(stationname[:]==stname)\n if np.size(inds)>0:\n inds=int(inds[0][0]); stname=str(stationname[inds])\n else:\n sys.exit(' Station '+stname+' not included in the output file, or wrong station ID')\n\n # Spectrum\n dspec=np.array(ds['efth'][::sk,inds,:,:])\n # number of directions\n nd=dspec.shape[2]\n # number of frequencies\n nf=dspec.shape[1]\n # directions\n dire=np.array(ds['direction'].values[:])\n # frequencies\n freq=np.array(ds['frequency'].values[:])\n freq1=np.array(ds['frequency1'].values[:])\n freq2=np.array(ds['frequency2'].values[:])\n # DF in frequency (dfreq)\n dfreq=np.array(freq2 - freq1)\n # wind intensity and wind direction\n wnds=np.array(ds['wnd'].values[::sk,inds])\n wndd=np.array(ds['wnddir'].values[::sk,inds])\n # Time datetime64 array\n mdate=np.array(ds['time'].values[::sk])\n # water depth (constant in time)\n depth=np.nanmean(ds['dpt'].values[::sk,inds],axis=0)\n lon=np.array(np.nanmean(ds['longitude'].values[::sk,inds],axis=0))\n lat=np.array(np.nanmean(ds['latitude'].values[::sk,inds],axis=0)) \n \n ds.close(); del ds, auxstationname, inds, stationname\n # ------------------\n # 1D power spectrum\n pwst=np.zeros((dspec.shape[0],nf),'f')\n for t in range(0,dspec.shape[0]):\n for il in range(0,nf):\n pwst[t,il]=sum(dspec[t,il,:]*(2*np.pi)/nd)\n\n pwst[t,:]=pwst[t,:]*dfreq[:]\n\n # organizing directions -----\n adspec=np.copy(dspec); inddire=int(np.where(dire==min(dire))[0][0])\n for t in range(0,dspec.shape[0]):\n adspec[t,:,0:nd-(inddire+1)]=dspec[t,:,(inddire+1):nd]\n adspec[t,:,nd-(inddire+1):nd]=dspec[t,:,0:(inddire+1)]\n for i in range(0,nd):\n dspec[t,:,i]=adspec[t,:,nd-i-1]\n\n adspec[t,:,0:int(nd/2)]=dspec[t,:,int(nd/2):nd]\n adspec[t,:,int(nd/2):nd]=dspec[t,:,0:int(nd/2)]\n dspec[t,:,:]=adspec[t,:,:]\n\n dire=np.sort(dire)\n\n # 1D directional spectrum\n d1sp=np.zeros((dspec.shape[0],nf),'f')\n for t in range(0,dspec.shape[0]):\n for il in range(0,nf): \n a = np.sum(dspec[t,il,:] * np.array(np.sin((pi*dire)/180.)/np.sum(dspec[t,il,:])) )\n b = np.sum(dspec[t,il,:] * np.array(np.cos((pi*dire)/180.)/np.sum(dspec[t,il,:])) )\n aux = math.atan2(a,b)*(180./pi)\n if aux<0:\n aux=aux+360.\n \n d1sp[t,il]=np.float(aux)\n del a,b,aux\n\n # build dictionary \n result={'time':mtime,'date':mdate,'latitude':lat,'longitude':lon,\n 'wind_spd':wnds,'wind_dir':wndd,'freq':freq,'freq1':freq1,'freq2':freq2,\n 'deltafreq':dfreq,'pspec':pwst,'theta':dire,'dmspec':d1sp,'dirspec':dspec}\n\n return result\n del mtime,mdate,lat,lon,wnds,wndd,freq,freq1,freq2,dfreq,pwst,dire,d1sp,dspec\n\n","repo_name":"NOAA-EMC/WW3-tools","sub_path":"ww3tools/wread.py","file_name":"wread.py","file_ext":"py","file_size_in_byte":67091,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"84"} +{"seq_id":"72998957393","text":"from bottle import route, run, static_file, post, request\nfrom urllib import urlopen\nimport json\n\n@route('/index.html')\ndef index():\n return static_file('/index.html', root='.')\n\n@post('/md')\ndef md():\n requestedParam = request.POST\n params = {\n \"text\": requestedParam.text,\n \"mode\": \"gfm\"\n }\n res = urlopen('https://api.github.com/markdown', json.dumps(params))\n return res.read()\n\nrun(host='localhost', port=8080, debug=True, reloader=True)\n","repo_name":"haseg/dojo-md-editor","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"72802278995","text":"import logging\nfrom typing import Optional\n\nimport requests\nimport os\nfrom appdirs import user_cache_dir\nfrom base64 import urlsafe_b64encode, b64encode\nfrom cryptography.fernet import Fernet\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\nfrom datetime import datetime, timedelta\nfrom hashlib import sha256\nfrom json import dumps, loads\nfrom random import getrandbits\nfrom re import search\nfrom requests_oauthlib import OAuth2Session\nfrom socket import socket, AF_INET, SOCK_STREAM\nfrom webbrowser import open as browse\n\nCLIENT_ID = \"amcat4py\"\n\nFELINE_RESPONSE = \"\"\"HTTP/1.1 200 OK\nContent-Type: text/html; charset=utf-8\n\n
Authorization complete, you can close this tab and return to Python.\n ,_     _\n |\\\\\\\\_,-~/\n / _  _ |    ,--.\n(  @  @ )   / ,-'\n \\\\  _T_/-._( (\n /         `. \\\\\n|         _  \\\\ |\n \\\\ \\\\ ,  /      |\n  || |-_\\\\__   /\n ((_/`(____,-'\n
\n\"\"\"\n\n\ndef get_middlecat_token(host, callback_port=65432, refresh=\"static\") -> dict:\n \"\"\"\n Authenticate to an AmCAT instance using a middlecat instance (which is automatically retrieved from the AmCAT instance)\n :param host: The URL to the AmCAT instance (e.g. \"http://localhost/api\")\n :param callback_port: Port used to receive the token. The only reason to change this is if the port is already in use.\n :param refresh: Either \"refresh\" or \"static\" to en-/disable token rotation.\n \"\"\"\n # we open a socket and browser for for the interactive authentication and wait for the code from middlecat\n # We do this first so we can change the port if needed\n s = socket(AF_INET, SOCK_STREAM)\n while True:\n try:\n s.bind((\"127.0.0.1\", callback_port))\n except OSError:\n logging.info(f\"Port {callback_port} already in use, trying {callback_port-1}\")\n callback_port -= 1\n else:\n break\n s.listen()\n\n middlecat = requests.get(f\"{host}/config\").json()[\"middlecat_url\"]\n auth_url = f\"{middlecat}/authorize\"\n token_url = f\"{middlecat}/api/token\"\n pkce = pkce_challange()\n\n auth_params = {\n \"resource\": host,\n \"refresh_mode\": refresh,\n \"session_type\": \"api_key\",\n \"code_challenge_method\": pkce[\"method\"],\n \"code_challenge\": pkce[\"challenge\"]\n }\n\n oauth = OAuth2Session(client_id=CLIENT_ID, redirect_uri=f\"http://localhost:{callback_port}/\")\n\n authorization_url, state = oauth.authorization_url(auth_url, **auth_params)\n browse(authorization_url)\n print(\"Waiting for authorization in browser...\")\n conn, addr = s.accept()\n\n conn.sendall(FELINE_RESPONSE.encode(\"ascii\"))\n\n data = conn.recv(1024).decode()\n code = search(r\"code=([^&\\s]+)\", data).group(1)\n conn.close()\n\n # using the received code, make a request to get the actual token\n headers = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"}\n params = {\"grant_type\": \"authorization_code\", \"code\": code, \"code_verifier\": pkce[\"verifier\"], \"state\": state}\n return _request_token(params, headers, host, token_url)\n\n\ndef token_refresh(token, host) -> dict:\n \"\"\"\n Usually called by _check_token if token has expired\n :param token: old token\n :param host: The URL to the AmCAT instance (e.g. \"http://localhost/api\").\n \"\"\"\n middlecat = requests.get(f\"{host}/middlecat\").json()[\"middlecat_url\"]\n token_url = f\"{middlecat}/api/token\"\n params = {\n \"resource\": host,\n \"grant_type\": \"refresh_token\",\n \"refresh_mode\": token[\"refresh_rotate\"],\n \"session_type\": \"api_key\",\n \"refresh_token\": token[\"refresh_token\"],\n \"client_id\": CLIENT_ID\n }\n headers = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"}\n return _request_token(params, headers, host, token_url)\n\n\ndef _request_token(auth_params, headers, host, token_url) -> dict:\n r = requests.post(token_url, headers=headers, data=dumps(auth_params))\n r.raise_for_status()\n token = r.json()\n expires_at = timedelta(seconds=token[\"expires_in\"]) + datetime.now()\n token[\"expires_at\"] = expires_at.strftime(\"%Y-%m-%dT%H:%M:%S\")\n del token[\"expires_in\"]\n cache_token(token, host)\n return token\n\n\ndef _get_token(host, force_refresh=False, login_if_needed=True) -> Optional[dict]:\n \"\"\"\n Returns refreshed token if old token has expired\n :param host: The URL to the AmCAT instance (e.g. \"http://localhost/api\").\n :param username,password: optionally provide a username and password to trigger get_password_token()\n instead of get_middlecat_token()\n :param force_refresh: when True, overwrites the cached token and creates a new one\n \"\"\"\n file_path = user_cache_dir(CLIENT_ID) + \"/\" + sha256(host.encode()).hexdigest()\n if os.path.exists(file_path) and not force_refresh:\n token = secret_read(file_path, host)\n elif login_if_needed:\n token = get_middlecat_token(host)\n else:\n return None\n return _check_token(token, host)\n\n\ndef _check_token(token, host) -> dict:\n \"\"\"\n Returns refreshed token if old token has expired\n :param token: old token\n :param host: The URL to the AmCAT instance (e.g. \"http://localhost/api\").\n \"\"\"\n if \"expires_at\" in token:\n if datetime.now() + timedelta(seconds=10) > datetime.strptime(token[\"expires_at\"], \"%Y-%m-%dT%H:%M:%S\"):\n token = token_refresh(token, host)\n return token\n\n\ndef cache_token(token, host) -> None:\n \"\"\"\n Caches encrypted token on disk\n :param token: old token\n :param host: The URL to the AmCAT instance (e.g. \"http://localhost/api\").\n \"\"\"\n file_path = user_cache_dir(CLIENT_ID) + \"/\" + sha256(host.encode()).hexdigest()\n dir_path = os.path.dirname(file_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n fernet = Fernet(make_key(host))\n data = fernet.encrypt(dumps(token).encode())\n with open(file_path, \"wb\") as f:\n f.write(data)\n\n\ndef secret_read(path, host) -> dict:\n \"\"\"\n Reads encrypted token from disk\n :param path: path to file, usually in user_cache_dir(CLIENT_ID)\n :param host: The URL to the AmCAT instance (e.g. \"http://localhost/api\").\n \"\"\"\n with open(path, \"rb\") as f:\n token_enc = f.read()\n fernet = Fernet(make_key(host))\n return loads(fernet.decrypt(token_enc).decode())\n\n\ndef make_key(key) -> bytes:\n \"\"\"\n Helper function to make key for encryption of tokens\n :param key: string that is turned into key.\n \"\"\"\n kdf = PBKDF2HMAC(\n algorithm=hashes.SHA256(),\n length=32,\n salt=\"supergeheim\".encode(),\n iterations=5,\n )\n return urlsafe_b64encode(kdf.derive(key.encode()))\n\n\ndef base64_url_encode(x) -> str:\n \"\"\"\n Custom base64 encode for pkce challange nicked from httr2\n https://github.com/r-lib/httr2/blob/main/R/utils.R\n :param x: string to be encoded.\n \"\"\"\n x = b64encode(x).decode(\"utf-8\")\n # Replace some characters to align output with the javascript version\n x = x.rstrip(\"=\")\n x = x.replace(\"+\", \"-\").replace(\"/\", \"_\")\n return x\n\n\ndef pkce_challange() -> dict:\n \"\"\"\n Generates PKCE code challange for middlecat requests\n :param x: string to be encoded.\n \"\"\"\n # Generate random 32-octet sequence\n verifier = getrandbits(256).to_bytes(32, byteorder=\"big\")\n verifier = base64_url_encode(verifier)\n challenge = sha256(verifier.encode(\"utf-8\")).digest()\n challenge = base64_url_encode(challenge)\n return {\n \"verifier\": verifier,\n \"method\": \"S256\",\n \"challenge\": challenge\n }\n","repo_name":"ccs-amsterdam/amcat4py","sub_path":"amcat4py/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":7606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"30611933134","text":"### MyAnimeList.net ###\n# Source agent: https://github.com/Fribb/MyAnimeList.bundle/blob/master/Contents/Code/__init__.py\n# API xml exemple: http://fribbtastic-api.net/fribbtastic-api/services/anime?id=33487\n\n### Imports ###\n# Python Modules #\nimport os\n# HAMA Modules #\nimport common\nfrom common import Log, DictString, Dict, SaveDict, GetXml # Direct import of heavily used functions\n\n### Variables ###\nMAL_HTTP_API_URL = \"http://fribbtastic-api.net/fribbtastic-api/services/anime?id=\"\nMAL_PREFIX = \"https://myanimelist.cdn-dena.com\" # Some links in the XML will come from TheTVDB, not adding those....\n\n### Functions ###\ndef GetMetadata(movie, MALid):\n Log.Info(\"=== MyAnimeList.GetMetadata() ===\".ljust(157, '='))\n MyAnimeList_dict = {}\n\n Log.Info(\"MALid: '%s'\" % MALid)\n if not MALid or not MALid.isdigit(): return MyAnimeList_dict\n\n Log.Info(\"--- series ---\".ljust(157, '-'))\n xml = common.LoadFile(filename=MALid+\".xml\", relativeDirectory=os.path.join('MyAnimeList', 'xml'), url=MAL_HTTP_API_URL + MALid, cache=CACHE_1WEEK)\n if isinstance(xml, str):\n Log.Error('Invalid str returned: \"{}\"'.format(xml))\n elif xml:\n Log.Info(\"[ ] title: {}\" .format(SaveDict( GetXml(xml, 'title' ), MyAnimeList_dict, 'title' )))\n Log.Info(\"[ ] summary: {}\" .format(SaveDict( GetXml(xml, 'synopsis' ), MyAnimeList_dict, 'summary' )))\n Log.Info(\"[ ] score: {}\" .format(SaveDict( GetXml(xml, 'rating' ), MyAnimeList_dict, 'score' )))\n #Log.Info(\"[ ] rating: {}\" .format(SaveDict( GetXml(xml, 'content_rating').split(\" \")[0], MyAnimeList_dict, 'rating' )))\n Log.Info(\"[ ] originally_available_at: {}\".format(SaveDict( GetXml(xml, 'firstAired' ), MyAnimeList_dict, 'originally_available_at')))\n \n #for item in xml.xpath('//anime/genres/genre' or []): SaveDict([item.text], MyAnimeList_dict, 'genres')\n if GetXml(xml, '//anime/genres/genre'): Log.Info(\"[ ] genres: {}\".format(SaveDict( sorted([item.text for item in xml.xpath('//anime/genres/genre')]), MyAnimeList_dict, 'genres')))\n if GetXml(xml, 'status') == 'Currently Airing': Log.Info(\"[ ] status: {}\".format(SaveDict( \"Continuing\", MyAnimeList_dict, 'status')))\n if GetXml(xml, 'status') == 'Finished Airing': Log.Info(\"[ ] status: {}\".format(SaveDict( \"Ended\" , MyAnimeList_dict, 'status')))\n\n Log.Info(\"--- episodes ---\".ljust(157, '-'))\n for item in xml.xpath('//anime/episodes/episode') or []:\n ep_number, ep_title, ep_air = GetXml(item, 'episodeNumber'), GetXml(xml, 'engTitle'), GetXml(xml, 'aired')\n Log.Info('[ ] s1e{:>3} air_date: {}, title: \"{}\"'.format(ep_number, ep_title, ep_air))\n SaveDict( ep_title, MyAnimeList_dict, 'seasons', \"1\", 'episodes', ep_number, 'title' )\n SaveDict( ep_air, MyAnimeList_dict, 'seasons', \"1\", 'episodes', ep_number, 'originally_available_at')\n \n Log.Info(\"--- images ---\".ljust(157, '-'))\n for item in xml.xpath('//anime/covers/cover' ):\n Log.Info(\"[ ] poster: {}\".format(SaveDict((\"MyAnimeList/\" + \"/\".join(item.text.split('/')[3:]), 50, None) if item.text.startswith(MAL_PREFIX) else \"\", MyAnimeList_dict, 'posters', item.text)))\n for item in xml.xpath('//anime/backgrounds/background'):\n Log.Info(\"[ ] art: {}\" .format(SaveDict((\"MyAnimeList/\" + \"/\".join(item.text.split('/')[3:]), 50, None) if item.text.startswith(MAL_PREFIX) else \"\", MyAnimeList_dict, 'art', item.text)))\n for item in xml.xpath('//anime/banners/banner' ):\n Log.Info(\"[ ] banner: {}\".format(SaveDict((\"MyAnimeList/\" + \"/\".join(item.text.split('/')[3:]), 50, None) if item.text.startswith(MAL_PREFIX) else \"\", MyAnimeList_dict, 'banners', item.text)))\n\n Log.Info(\"--- return ---\".ljust(157, '-'))\n Log.Info(\"MyAnimeList_dict: {}\".format(DictString(MyAnimeList_dict, 4)))\n return MyAnimeList_dict\n","repo_name":"ameyuuno-3rd-party-dependencies/Hama.bundle","sub_path":"Contents/Code/MyAnimeList.py","file_name":"MyAnimeList.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"84"} +{"seq_id":"6142697911","text":"from __future__ import annotations\n\nimport io\nimport typing as t\nfrom collections import OrderedDict\nfrom os import getenv\nfrom pathlib import Path\n\nfrom ruamel.yaml import YAML, CommentedMap\n\nfrom sqlmesh.utils.errors import SQLMeshError\nfrom sqlmesh.utils.jinja import ENVIRONMENT\n\nJINJA_METHODS = {\n \"env_var\": lambda key, default=None: getenv(key, default),\n}\n\n\ndef load(\n source: str | Path, raise_if_empty: bool = True, render_jinja: bool = True\n) -> t.OrderedDict:\n \"\"\"Loads a YAML object from either a raw string or a file.\"\"\"\n path: t.Optional[Path] = None\n\n if isinstance(source, Path):\n path = source\n with open(source, \"r\", encoding=\"utf-8\") as file:\n source = file.read()\n\n if render_jinja:\n source = ENVIRONMENT.from_string(source).render(JINJA_METHODS)\n\n contents = YAML().load(source)\n if contents is None:\n if raise_if_empty:\n error_path = f\" '{path}'\" if path else \"\"\n raise SQLMeshError(f\"YAML source{error_path} can't be empty.\")\n return OrderedDict()\n\n return contents\n\n\ndef dumps(value: CommentedMap | OrderedDict) -> str:\n \"\"\"Dumps a ruamel.yaml loaded object and converts it into a string\"\"\"\n result = io.StringIO()\n YAML().dump(value, result)\n return result.getvalue()\n","repo_name":"robscriva/sqlmesh","sub_path":"sqlmesh/utils/yaml.py","file_name":"yaml.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"84"} +{"seq_id":"24743698128","text":"# Importing the necessary modules and libraries\nfrom flask import Flask\nfrom flask_migrate import Migrate\nfrom routes.blueprint import blueprint\nfrom models.machine import db\n\n\ndef create_app():\n app = Flask(__name__) # flask app object\n app.config.from_object('config') # Configuring from Python Files\n\n db.init_app(app) # Initializing the database\n return app\n\n\napp = create_app() # Creating the app\n# Registering the blueprint\napp.register_blueprint(blueprint, url_prefix='/machines')\nmigrate = Migrate(app, db) # Initializing the migration\n\n\nif __name__ == '__main__': # Running the app\n app.run(host='127.0.0.1', port=5000, debug=True)\n","repo_name":"arslanaut/dataScienceProjects","sub_path":"Minimal Flask Application using MVC design pattern/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"21233960985","text":"\nimport sys\nimport os\nimport logging\nimport torch\nimport argparse\n\nfrom starry.utils.config import Configuration\nfrom starry.utils.model_factory import loadModel, registerModels\nfrom onnxTypecast import convert_model_to_int32\n\n\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n\ndef runConfig (onnx_config, model_loader, outpath):\n\tinput_names = [input['name'] for input in onnx_config['inputs']]\n\toutput_names = onnx_config['outputs']\n\n\t#shapes = [tuple(onnx_config['inputs'][name]) for name in input_names]\n\tdummy_inputs = tuple(torch.zeros(*input['shape'], dtype=getattr(torch, input.get('dtype', 'float32'))) for input in onnx_config['inputs'])\n\topset = onnx_config['opset']\n\n\ttruncate_long = onnx_config.get('truncate_long')\n\ttemp_path = outpath.replace('.onnx', '.temp.onnx')\n\n\tmodel_postfix = onnx_config.get('model_postfix', '')\n\tstate_field = onnx_config.get('state_field', 'model')\n\tmodel = model_loader(model_postfix, state_field)\n\n\twith torch.no_grad():\n\t\ttorch.onnx.export(model, dummy_inputs, temp_path if truncate_long else outpath,\n\t\t\tverbose=True,\n\t\t\tinput_names=input_names,\n\t\t\toutput_names=output_names,\n\t\t\topset_version=opset)\n\n\tif truncate_long:\n\t\tconvert_model_to_int32(temp_path, outpath)\n\n\tlogging.info(f'ONNX model saved to: {outpath}')\n\n\ndef main ():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('config', type=str)\n\tparser.add_argument('-s', '--shapes', type=str, help='shapes of input tensors, e.g. 1,3,256,256;1,16')\n\tparser.add_argument('-op', '--opset', type=int, default=11, help='ONNX opset version')\n\tparser.add_argument('-in', '--input_names', type=str, default='in', help='e.g. in1;in2')\n\tparser.add_argument('-out', '--output_names', type=str, default='out', help='e.g. out1;out2')\n\n\targs = parser.parse_args()\n\n\tregisterModels()\n\tfrom starry.utils.model_factory import model_dict\n\n\tconfig = Configuration.createOrLoad(args.config)\n\n\tname = 'untrained'\n\tif config['best']:\n\t\tname = os.path.splitext(config['best'])[0]\n\n\tdef loadModel_ (postfix, state_field='model'):\n\t\tmodel = loadModel(config['model'], postfix=postfix)\n\n\t\tif config['best']:\n\t\t\tcheckpoint = torch.load(config.localPath(config['best']), map_location='cpu')\n\t\t\tif hasattr(model, 'deducer'):\n\t\t\t\tmodel.deducer.load_state_dict(checkpoint[state_field], strict=False)\n\t\t\telse:\n\t\t\t\tmodel.load_state_dict(checkpoint[state_field])\n\t\t\tlogging.info(f'checkpoint loaded: {config[\"best\"]}')\n\n\t\tmodel.eval()\n\t\tmodel.no_overwrite = True\n\n\t\treturn model\n\n\tif args.shapes is not None:\n\t\tmodel_postfix = config['onnx.postfix'] or ('Onnx' if (config['model.type'] + 'Onnx' in model_dict) else '')\n\t\tmodel = loadModel_(model_postfix)\n\n\t\ttruncate_long = config['onnx.truncate_long_tensor']\n\t\tout_name = f'{name}.temp.onnx' if truncate_long else f'{name}.onnx'\n\t\toutpath = config.localPath(out_name)\n\n\t\topset = args.opset\n\n\t\tshapes = args.shapes.split(';')\n\t\tshapes = [tuple(map(int, shape.split(','))) for shape in shapes]\n\t\tinput_names = args.input_names.split(';')\n\t\toutput_names = args.output_names.split(';')\n\t\tdummy_inputs = tuple(torch.randn(*shape) for shape in shapes)\n\n\t\ttorch.onnx.export(model, dummy_inputs, outpath,\n\t\t\tverbose=True,\n\t\t\tinput_names=input_names,\n\t\t\toutput_names=output_names,\n\t\t\topset_version=opset)\n\n\t\tif truncate_long:\n\t\t\ttemp_path = outpath\n\t\t\toutpath = config.localPath(f'{name}.onnx')\n\t\t\tconvert_model_to_int32(temp_path, outpath)\n\n\t\tlogging.info(f'ONNX model saved to: {outpath}')\n\telif config['onnx']:\n\t\tif 'multiple' in config['onnx']:\n\t\t\tfor key, onnx_config in config['onnx.multiple'].items():\n\t\t\t\trunConfig(onnx_config, loadModel_, outpath=config.localPath(f'{name}-{key}.onnx'))\n\t\telse:\n\t\t\trunConfig(config['onnx'], loadModel_, outpath=config.localPath(f'{name}.onnx'))\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"k-l-lambda/deep-starry","sub_path":"convertToOnnx.py","file_name":"convertToOnnx.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"41048027618","text":"class Genres:\n def __init__(self):\n self.stress_genres = [\"Dementia\",\"Horror\", \"Mystery\", \"Psychological\", \"Thriller\", \"Historical\", \"Military\", \"Martial Arts\", \"Samurai\", \"Drama\", \"Police\", \"Seinen\", \"Shounen\", \"Space\", \"Sci-Fi\", \"Mecha\", \"Sports\", \"Supernatural\"]\n self.calm_genres = [\"Action\",\"Adventure\",\"Comedy\", \"Ecchi\", \"Fantasy\", \"Game\", \"Harem\", \"Kids\", \"Parody\", \"Slice of Life\", \"Romance\", \"Music\", \"School\", \"Shoujo\", \"Shouji Ai\", \"Shounen Ai\", \"Yaoi\", \"Yuri\"]\n self.neutral_genres = [\"Magic\", \"Super Power\", \"Cars\", \"Demons\", \"Josei\", \"Vampire\"]\n\nclass Anime_Obj:\n def __init__(self, title, stress_counter, calm_counter, neutral_counter, genres):\n self.title = title\n self.stress_counter = stress_counter\n self.calm_counter = calm_counter\n self.neutral_counter = neutral_counter\n self.genres = genres\n def genre_exist(self,genre):\n for i in genre:\n if i in self.genres:\n return True\n return False\n\n def ratio(self):\n return int((self.stress_counter / (self.stress_counter + self.calm_counter)) * 100) / 10.\n\n\nif __name__ == \"__main__\": \n x = Anime_Obj('demon slayer', 2, 1, 0, ['demons', 'history'])\n print(x.ratio())","repo_name":"hscottvo/MALdoro","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"42922879610","text":"# -*- coding: utf-8 -*-\n\nimport os, sys, shutil\nfrom docx2pdf import convert \n\ndef clean(path):\n\ttry:\n\t\tshutil.rmtree(path)\n\texcept Exception as e:\n\t\tpass\n\tfinally:\n\t\tos.mkdir(path)\n\ndef run(folder):\n\tdest_dir = folder + '/pdfs'\n\tclean(dest_dir)\n\tfor fname in os.listdir(folder):\n\t\tfpath = folder + '/' + fname\n\t\tif not os.path.isdir(fpath):\n\t\t\tnames = fname.split('.')\n\t\t\ttarget = dest_dir + '/' + '.'.join(names[:-1]) + '.pdf'\n\t\t\tprint(fname)\n\t\t\tprint('>>>', target)\n\n\t\t\tconvert(fpath, target)\n\nif __name__ == '__main__':\n\targs = sys.argv[1:]\n\tif len(args) > 0:\n\t\trun(args[0])\n\telse:\n\t\tprint('Please specify the folder path...')\n","repo_name":"O70/python-examples","sub_path":"resumes/to_pdf.py","file_name":"to_pdf.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"24062923691","text":"import datetime\r\n\r\nDATA = {}\r\n\r\n\r\nclass Cummulation:\r\n \"\"\"Class to circumvent managing global accumulation variables\"\"\"\r\n def __init__(self, cummulative_pp, num_trans):\r\n self.cpp = cummulative_pp\r\n self.num_transactions = num_trans\r\n\r\n\r\n def accumulate(self, price):\r\n \"\"\"For the running accumulation of p1 . p2 ... . pn\r\n\r\n Args:\r\n price (float): the security price\r\n \"\"\"\r\n self.cpp *= price\r\n self.num_transactions += 1\r\n\r\n def get_cummulative_price_product(self):\r\n \"\"\"returns cummulative price product\"\"\"\r\n return self.cpp\r\n \r\n def get_number_of_transactions(self):\r\n \"\"\"returns number of transactions\"\"\"\r\n return self.num_transactions\r\n\r\n\r\nACCUMULATOR = Cummulation(1, 0)\r\n\r\ndef _get_recent_trades(trades, time_delta_in_minutes=15):\r\n def get_index_at_delta_start(trades, key):\r\n left, right = 0, len(trades) - 1\r\n boundary_index = -1\r\n while left <= right:\r\n mid = (left + right) // 2\r\n if trades[mid] >= key:\r\n boundary_index = mid\r\n right = mid - 1\r\n else:\r\n left = mid + 1\r\n return boundary_index\r\n\r\n key = datetime.datetime.utcnow() - datetime.timedelta(minutes=time_delta_in_minutes)\r\n\r\n start_index = get_index_at_delta_start(trades, key)\r\n return trades[start_index:]\r\n\r\n\r\ndef add_transaction(stock, trade):\r\n \"\"\"Record Transaction\r\n\r\n Args:\r\n stock (str): The ticker\r\n trade (models.Trade): the trade object\r\n \"\"\"\r\n if stock in DATA:\r\n DATA[stock].append(trade)\r\n else:\r\n DATA[stock] = [trade]\r\n\r\n ACCUMULATOR.accumulate(trade.trade_price)\r\n\r\n\r\ndef get_all_share_index():\r\n \"\"\"Calculates All Share Index\r\n\r\n Args:\r\n None\r\n\r\n Returns:\r\n All Share Index\r\n \"\"\"\r\n number_of_transactions = ACCUMULATOR.get_number_of_transactions()\r\n if number_of_transactions:\r\n return round(\r\n ACCUMULATOR.get_cummulative_price_product() \r\n ** (1 / number_of_transactions), 2)\r\n return 0\r\n\r\ndef get_volume_weighted_stock_price(stock):\r\n \"\"\"Calculates Volume Weighted Stock Price\r\n\r\n Args:\r\n stock (str): The ticker\r\n\r\n Returns:\r\n Volume Weighted Stock Price\r\n \"\"\"\r\n if stock in DATA:\r\n stock_transactions = DATA[stock]\r\n recent_trades = _get_recent_trades(stock_transactions)\r\n\r\n cummulative_price = sum([trade.trade_price * trade.quantity for trade in recent_trades])\r\n cummulative_quantity = sum([trade.quantity for trade in recent_trades])\r\n return round(cummulative_price/cummulative_quantity, 2)\r\n return 0\r\n","repo_name":"pkopoku/super-simple-stock-market","sub_path":"sssm/models/Transactions.py","file_name":"Transactions.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"42033487592","text":"import os, os.path as osp\nfrom time import strftime\nimport tqdm\nimport torch\nfrom torch_geometric.data import DataLoader\nimport argparse\n\nfrom torch_cmspepr.dataset import TauDataset\nfrom torch_cmspepr.gravnet_model import GravnetModel\nimport torch_cmspepr.objectcondensation as oc\nfrom lrscheduler import CyclicLRWithRestarts\n\ntorch.manual_seed(1009)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dry', action='store_true', help='Turn off checkpoint saving and run limited number of events')\n parser.add_argument('-v', '--verbose', action='store_true', help='Print more output')\n parser.add_argument('--settings-Sep01', action='store_true', help='Use 21Sep01 settings')\n # parser.add_argument('--reduce-noise', action='store_true', help='Randomly kills 95% of noise')\n parser.add_argument('--ckptdir', type=str)\n args = parser.parse_args()\n if args.verbose: oc.DEBUG = True\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('Using device', device)\n\n reduce_noise = True\n\n n_epochs = 400\n batch_size = 4\n\n shuffle = True\n dataset = TauDataset('data/taus')\n dataset.blacklist([ # Remove a bunch of bad events\n 'data/taus/110_nanoML_98.npz',\n 'data/taus/113_nanoML_13.npz',\n 'data/taus/124_nanoML_77.npz',\n 'data/taus/128_nanoML_70.npz',\n 'data/taus/149_nanoML_90.npz',\n 'data/taus/153_nanoML_22.npz',\n 'data/taus/26_nanoML_93.npz',\n 'data/taus/32_nanoML_45.npz',\n 'data/taus/5_nanoML_51.npz',\n 'data/taus/86_nanoML_97.npz',\n ])\n if reduce_noise:\n # dataset.reduce_noise = .95\n # multiply_batch_size = 8\n dataset.reduce_noise = .70\n multiply_batch_size = 6\n print(f'Throwing away {dataset.reduce_noise*100:.0f}% of noise (good for testing ideas, not for final results)')\n print(f'Batch size: {batch_size} --> {multiply_batch_size*batch_size}')\n batch_size *= multiply_batch_size\n if args.dry:\n keep = .005\n print(f'Keeping only {100.*keep:.1f}% of events for debugging')\n dataset, _ = dataset.split(keep)\n train_dataset, test_dataset = dataset.split(.8)\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle)\n test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=shuffle)\n\n if args.settings_Sep01:\n model = GravnetModel(input_dim=9, output_dim=4).to(device)\n else:\n model = GravnetModel(input_dim=9, output_dim=6, k=50).to(device)\n\n # Checkpoint loading\n # if True:\n # # ckpt = 'ckpts_gravnet_Aug27_2144/ckpt_9.pth.tar'\n # ckpt = 'ckpts_gravnet_Aug27_0502/ckpt_23.pth.tar'\n # print(f'Loading initial weights from ckpt {ckpt}')\n # model.load_state_dict(torch.load(ckpt, map_location=device)['model'])\n\n epoch_size = len(train_loader.dataset)\n optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5, weight_decay=1e-4)\n\n if not args.settings_Sep01:\n scheduler = CyclicLRWithRestarts(optimizer, batch_size, epoch_size, restart_period=400, t_mult=1.1, policy=\"cosine\")\n\n loss_offset = 1. # To prevent a negative loss from ever occuring\n\n # def loss_fn(out, data, s_c=1., return_components=False):\n # device = out.device\n # pred_betas = torch.sigmoid(out[:,0])\n # pred_cluster_space_coords = out[:,1:]\n # assert all(t.device == device for t in [\n # pred_betas, pred_cluster_space_coords, data.y, data.batch,\n # ])\n # out_oc = oc.calc_LV_Lbeta(\n # pred_betas,\n # pred_cluster_space_coords,\n # data.y.long(),\n # data.batch,\n # return_components=return_components\n # )\n # if return_components:\n # return out_oc\n # else:\n # LV, Lbeta = out_oc\n # return LV + Lbeta + loss_offset\n\n def loss_fn(out, data, i_epoch=None, return_components=False):\n device = out.device\n pred_betas = torch.sigmoid(out[:,0])\n pred_cluster_space_coords = out[:,1:]\n assert all(t.device == device for t in [\n pred_betas, pred_cluster_space_coords, data.y, data.batch,\n ])\n out_oc = oc.calc_LV_Lbeta(\n pred_betas,\n pred_cluster_space_coords,\n data.y.long(),\n data.batch,\n return_components=return_components,\n beta_term_option='short-range-potential',\n )\n if return_components:\n return out_oc\n else:\n LV, Lbeta = out_oc\n if i_epoch <= 7:\n return LV + loss_offset\n else:\n return LV + Lbeta + loss_offset\n\n def train(epoch):\n print('Training epoch', epoch)\n model.train()\n if not args.settings_Sep01: scheduler.step()\n try:\n pbar = tqdm.tqdm(train_loader, total=len(train_loader))\n pbar.set_postfix({'loss': '?'})\n for i, data in enumerate(pbar):\n data = data.to(device)\n optimizer.zero_grad()\n result = model(data.x, data.batch)\n loss = loss_fn(result, data, i_epoch=epoch)\n loss.backward()\n optimizer.step()\n if not args.settings_Sep01: scheduler.batch_step()\n pbar.set_postfix({'loss': float(loss)})\n # if i == 2: raise Exception\n except Exception:\n print('Exception encountered:', data, ', npzs:')\n print(' ' + '\\n '.join([train_dataset.npzs[int(i)] for i in data.inpz]))\n raise\n\n def test(epoch):\n N_test = len(test_loader)\n loss_components = {}\n def update(components):\n for key, value in components.items():\n if not key in loss_components: loss_components[key] = 0.\n loss_components[key] += value\n with torch.no_grad():\n model.eval()\n for data in tqdm.tqdm(test_loader, total=len(test_loader)):\n data = data.to(device)\n result = model(data.x, data.batch)\n update(loss_fn(result, data, return_components=True))\n # Divide by number of entries\n for key in loss_components:\n loss_components[key] /= N_test\n # Compute total loss and do printout\n print('test ' + oc.formatted_loss_components_string(loss_components))\n test_loss = loss_offset + loss_components['L_V']+loss_components['L_beta']\n print(f'Returning {test_loss}')\n return test_loss\n\n ckpt_dir = strftime('ckpts_gravnet_%b%d_%H%M') if args.ckptdir is None else args.ckptdir\n def write_checkpoint(checkpoint_number=None, best=False):\n ckpt = 'ckpt_best.pth.tar' if best else 'ckpt_{0}.pth.tar'.format(checkpoint_number)\n ckpt = osp.join(ckpt_dir, ckpt)\n if best: print('Saving epoch {0} as new best'.format(checkpoint_number))\n if not args.dry:\n os.makedirs(ckpt_dir, exist_ok=True)\n torch.save(dict(model=model.state_dict()), ckpt)\n\n min_loss = 1e9\n for i_epoch in range(n_epochs):\n train(i_epoch)\n write_checkpoint(i_epoch)\n test_loss = test(i_epoch)\n if test_loss < min_loss:\n min_loss = test_loss\n write_checkpoint(i_epoch, best=True)\n\ndef debug():\n oc.DEBUG = True\n dataset = TauDataset('data/taus')\n dataset.npzs = [\n # 'data/taus/49_nanoML_84.npz',\n # 'data/taus/37_nanoML_4.npz',\n 'data/taus/26_nanoML_93.npz',\n # 'data/taus/142_nanoML_75.npz',\n ]\n for data in DataLoader(dataset, batch_size=len(dataset), shuffle=False): break\n print(data.y.sum())\n model = GravnetModel(input_dim=9, output_dim=4)\n with torch.no_grad():\n model.eval()\n out = model(data.x, data.batch)\n pred_betas = torch.sigmoid(out[:,0])\n pred_cluster_space_coords = out[:,1:4]\n out_oc = oc.calc_LV_Lbeta(\n pred_betas,\n pred_cluster_space_coords,\n data.y.long(),\n data.batch.long()\n )\n\ndef run_profile():\n from torch.profiler import profile, record_function, ProfilerActivity\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('Using device', device)\n\n batch_size = 2\n n_batches = 2\n shuffle = True\n dataset = TauDataset('data/taus')\n dataset.npzs = dataset.npzs[:batch_size*n_batches]\n loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)\n print(f'Running profiling for {len(dataset)} events, batch_size={batch_size}, {len(loader)} batches')\n\n model = GravnetModel(input_dim=9, output_dim=8).to(device)\n epoch_size = len(loader.dataset)\n optimizer = torch.optim.AdamW(model.parameters(), lr=1e-7, weight_decay=1e-4)\n\n print('Start limited training loop')\n model.train()\n with profile(activities=[ProfilerActivity.CPU], record_shapes=True) as prof:\n with record_function(\"model_inference\"):\n pbar = tqdm.tqdm(loader, total=len(loader))\n pbar.set_postfix({'loss': '?'})\n for i, data in enumerate(pbar):\n data = data.to(device)\n optimizer.zero_grad()\n result = model(data.x, data.batch)\n loss = loss_fn(result, data)\n print(f'loss={float(loss)}')\n loss.backward()\n optimizer.step()\n pbar.set_postfix({'loss': float(loss)})\n print(prof.key_averages().table(sort_by=\"cpu_time\", row_limit=10))\n # Other valid keys:\n # cpu_time, cuda_time, cpu_time_total, cuda_time_total, cpu_memory_usage,\n # cuda_memory_usage, self_cpu_memory_usage, self_cuda_memory_usage, count\n\nif __name__ == '__main__':\n pass\n main()\n # debug()\n # run_profile()","repo_name":"tklijnsma/hgcal_training_scripts","sub_path":"train_taus.py","file_name":"train_taus.py","file_ext":"py","file_size_in_byte":9858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"40865486903","text":"import sys, os\nimport pandas as pd\nfrom pymongo import collection\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom Clust.clust.meta.metaDataManager.wizMongoDbApi import WizApiMongoMeta\n\ndef get_meta_table(mongodb_client):\n wiz_c = WizApiMongoMeta(mongodb_client)\n \n main_domian_list = ['air', 'farm', 'factory', 'bio', 'life', 'energy',\\\n 'weather', 'city', 'traffic', 'culture', 'economy','INNER','OUTDOOR']\n \n db_list = wiz_c.get_database_list()\n exploration_df = pd.DataFrame()\n\n for db_name in db_list :\n if db_name in main_domian_list: \n colls = wiz_c.get_collection_list(db_name)\n for coll in colls:\n print(db_name, coll)\n items = wiz_c.read_mongodb_document_by_get(db_name, coll)\n for item in items:\n try:\n influx_db_name = item['domain']+\"_\"+item[\"subDomain\"]\n measurement_name = item['table_name']\n start_time = item['startTime']\n end_time = item['endTime']\n frequency = item['frequency']\n number_of_columns = item['numberOfColumns']\n exploration_df = exploration_df.append([[influx_db_name, measurement_name, start_time, end_time, frequency, number_of_columns]])\n except KeyError as e:\n print(\"KeyError:\", e)\n \n exploration_df.columns = ['db_name', 'measurement_name', 'start_time', 'end_time', 'frequency', 'number_of_columns']\n exploration_df.reset_index(drop=True, inplace = True)\n exploration_js = exploration_df.to_json(orient = 'records')\n \n return exploration_js\n\n# def get_meta_some_tables(db_ms_names):\n# '''{\n# db_name : {collection : [ms_names]}\n# }'''\n# db_list = wiz_c.get_database_list()\n# result = {}\n# for db in db_ms_names.keys():\n# if db not in db_list: \n# continue\n# result[db]={}\n# for coll in db_ms_names[db].keys():\n# result[db][coll]={}\n# for ms in db_ms_names[db][coll]:\n# data = wiz_c.read_mongodb_document_by_get(db, coll, ms)\n# data = {\"start_time\":data[\"startTime\"],\"end_time\":data[\"endTime\"]}\n# result[db][coll][ms]=data\n# return result\n \n\nif __name__==\"__main__\":\n import json\n from Clust.setting import influx_setting_KETI as isk\n\n #re = get_meta_some_tables({\"air\":{\"indoor_경로당\":['ICL1L2000234','ICL1L2000235']}}) \n #print(re)\n test_exploration_js = get_meta_table()","repo_name":"ClustProject/KETIToolMetaManager","sub_path":"ingestion_meta_exploration.py","file_name":"ingestion_meta_exploration.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"84"} +{"seq_id":"16925998770","text":"import shutil\nimport tempfile\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom ..models import Comment, Follow, Group, Post\n\nUser = get_user_model()\n\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass PostsTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='HasNoName')\n cls.follower_user = User.objects.create_user(username='follower')\n cls.nofollower_user = User.objects.create_user(username='nofollower')\n Follow.objects.create(user=cls.follower_user,\n author=cls.nofollower_user)\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test',\n description='Тестовое описание')\n cls.small_gif = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B')\n cls.uploaded = SimpleUploadedFile(\n name='small.gif',\n content=cls.small_gif,\n content_type='image/gif')\n cls.post = Post.objects.create(\n author=cls.user,\n text='Тестовый пост',\n group=cls.group,\n image=cls.uploaded)\n cls.index = reverse('posts:index')\n cls.follow = reverse('posts:follow_index')\n cls.group_list = reverse(\n ('posts:group_list'),\n kwargs={'slug': f'{cls.group.slug}'})\n cls.profile = reverse(\n ('posts:profile'),\n kwargs={'username': f'{cls.user.username}'})\n cls.profile_follow = reverse(\n ('posts:profile_follow'),\n kwargs={'username': f'{cls.user.username}'})\n cls.profile_unfollow = reverse(\n ('posts:profile_unfollow'),\n kwargs={'username': f'{cls.user.username}'})\n cls.detail = reverse(\n ('posts:post_detail'),\n kwargs={'post_id': f'{cls.post.id}'})\n cls.edit = reverse(\n ('posts:post_edit'),\n kwargs={'post_id': f'{cls.post.id}'})\n cls.comment = reverse(\n ('posts:add_comment'),\n kwargs={'post_id': f'{cls.post.id}'})\n cls.create = reverse('posts:post_create')\n cls.templates = {\n cls.index: 'posts/index.html',\n cls.follow: 'posts/follow.html',\n cls.group_list: 'posts/group_list.html',\n cls.profile: 'posts/profile.html',\n cls.detail: 'posts/post_detail.html',\n cls.edit: 'posts/create_post.html',\n cls.create: 'posts/create_post.html'}\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n cache.clear()\n self.guest_client = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n self.follower = Client()\n self.nofollower = Client()\n self.follower.force_login(self.follower_user)\n self.nofollower.force_login(self.nofollower_user)\n\n def test_pages_uses_correct_template(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n for reverse_name, template in self.templates.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.authorized_client.get(reverse_name)\n self.assertTemplateUsed(response, template)\n\n def test_home_page_show_correct_context(self):\n \"\"\"Шаблон index сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(self.index)\n first_object = response.context['page_obj'][0]\n\n self.assertEqual(first_object, self.post)\n\n def test_follow_page_show_correct_context(self):\n \"\"\"Шаблон follow_index сформирован с правильным контекстом.\"\"\"\n follower_post = Post.objects.create(\n author=self.follower_user,\n text='Тестовый пост подписчика')\n nofollower_post = Post.objects.create(\n author=self.nofollower_user,\n text='Тестовый пост автора')\n follower_response = self.follower.get(self.follow)\n nofollower_response = self.nofollower.get(self.follow)\n\n self.assertIn(nofollower_post, follower_response.context['page_obj'])\n self.assertNotIn(follower_post,\n nofollower_response.context['page_obj'])\n self.assertEqual(follower_response.context.get('is_following'),\n True)\n self.assertEqual(nofollower_response.context.get('is_following'),\n False)\n follower_post.delete()\n nofollower_post.delete()\n\n def test_group_list_page_show_correct_context(self):\n \"\"\"Шаблон group_list сформирован с правильным контекстом.\"\"\"\n another_user = User.objects.create_user(username='ZAnotherName')\n another_group = Group.objects.create(\n title='Тестовая группа #2',\n slug='test_2',\n description='Тестовое описание #2')\n post_with_another_group = Post.objects.create(\n author=another_user,\n text='Тестовый пост #2',\n group=another_group)\n\n response = self.authorized_client.get(self.group_list)\n first_object = response.context['page_obj'][0]\n\n self.assertEqual(first_object, self.post)\n self.assertEqual(response.context.get('group').title,\n 'Тестовая группа')\n self.assertEqual(response.context.get('group').slug, 'test')\n self.assertEqual(response.context.get('group').description,\n 'Тестовое описание')\n self.assertNotIn(post_with_another_group, response.context['page_obj'])\n del post_with_another_group\n\n def test_profile_page_show_correct_context(self):\n \"\"\"Шаблон profile сформирован с правильным контекстом.\"\"\"\n self_follow = Follow.objects.create(user=self.follower_user,\n author=self.follower_user)\n response = self.authorized_client.get(self.profile)\n follower_response = self.follower.get(\n reverse(('posts:profile'),\n kwargs={'username': f'{self.nofollower_user.username}'}))\n nofollower_response = self.nofollower.get(\n reverse(('posts:profile'),\n kwargs={'username': f'{self.follower_user.username}'}))\n first_object = response.context['page_obj'][0]\n\n self.assertEqual(follower_response.context.get('following'),\n True)\n self.assertEqual(nofollower_response.context.get('following'), False)\n self.assertEqual(first_object, self.post)\n self.assertEqual(response.context.get('author').username, 'HasNoName')\n self.assertEqual(response.context.get('author_posts_count'), 1)\n self.assertEqual(response.context.get('is_profile'), True)\n self_follow.delete()\n\n def test_post_detail_page_show_correct_context(self):\n \"\"\"Шаблон post_detail сформирован с правильным контекстом.\"\"\"\n comment = Comment.objects.create(text='Тестовый коммент',\n post=self.post,\n author=self.user)\n response = self.authorized_client.get(self.detail)\n post_object = response.context['post']\n post_text = post_object.text\n post_author = post_object.author.username\n post_image = post_object.image\n form_fields = {\n 'text': forms.fields.CharField}\n\n self.assertEqual(post_text, 'Тестовый пост')\n self.assertEqual(post_author, f'{self.user}')\n self.assertEqual(post_image, 'posts/small.gif')\n self.assertEqual(response.context.get('comments')[0], comment)\n self.assertEqual(response.context.get('author_posts_count'), 1)\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def test_create_post_page_show_correct_context(self):\n \"\"\"Шаблон create_post сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(self.create)\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField}\n\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def test_edit_post_page_show_correct_context(self):\n \"\"\"Шаблон edit_post сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(self.edit)\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField}\n\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n self.assertEqual(response.context.get('is_edit'), True)\n\n\nclass PaginatorViewsTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='HasNoName')\n cls.follower_user = User.objects.create_user(username='follower')\n Follow.objects.create(user=cls.follower_user, author=cls.user)\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test',\n description='Тестовое описание')\n for i in range(13):\n cls.post = Post.objects.create(\n author=cls.user,\n text='Тестовый пост',\n group=cls.group)\n cls.reverse_names = (\n (reverse('posts:index')),\n (reverse(('posts:group_list'),\n kwargs={'slug': f'{cls.group.slug}'})),\n (reverse(('posts:profile'),\n kwargs={'username': f'{cls.user.username}'})),\n reverse('posts:follow_index')\n )\n\n def setUp(self):\n cache.clear()\n self.follower = Client()\n self.follower.force_login(self.follower_user)\n\n def test_first_page_contains_ten_records(self):\n \"\"\" Проверка: на первой странице должно быть десять постов.\"\"\"\n for reverse_name in self.reverse_names:\n with self.subTest(reverse_name=reverse_name):\n response = self.follower.get(reverse_name)\n self.assertEqual(len(response.context['page_obj']), 10)\n\n def test_second_page_contains_three_records(self):\n \"\"\" Проверка: на второй странице должно быть три поста.\"\"\"\n for reverse_name in self.reverse_names:\n with self.subTest(reverse_name=reverse_name):\n response = self.follower.get(reverse_name + '?page=2')\n self.assertEqual(len(response.context['page_obj']), 3)\n\n\nclass CommentTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='TestUser')\n cls.post = Post.objects.create(author=cls.user,\n text='Тестовый текст')\n cls.comment = Comment.objects.create(text='Тестовый коммент',\n post=cls.post,\n author=cls.user)\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n\n def test_post_detail_page_show_correct_context(self):\n \"\"\"Комментарий отображается на странице поста \"\"\"\n response = self.authorized_client.get(\n reverse('posts:post_detail',\n kwargs={'post_id': f'{self.post.id}'}))\n comment_obj = response.context['comments'][0]\n\n self.assertEqual(comment_obj, self.comment)\n\n\nclass CacheTests(TestCase):\n def setUp(self):\n self.guest_client = Client()\n\n def test_index_page_being_cached(self):\n \"\"\"Главная страница кэшируется\"\"\"\n user = User.objects.create_user(username='TestUser')\n post = Post.objects.create(author=user,\n text='Тестовый текст')\n\n response_content = self.guest_client.get(\n reverse('posts:index')).content\n post.delete()\n response_content_post_delete = self.guest_client.get(\n reverse('posts:index')).content\n cache.clear()\n response_content_cache_delete = self.guest_client.get(\n reverse('posts:index')).content\n\n self.assertEqual(response_content, response_content_post_delete)\n self.assertNotEqual(response_content, response_content_cache_delete)\n self.assertNotIn(post.text.encode('utf-8'),\n response_content_cache_delete)\n\n\nclass FollowTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user_user = User.objects.create_user(username='user')\n cls.user_author = User.objects.create_user(username='author')\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n\n def setUp(self):\n self.user = Client()\n self.author = Client()\n self.user.force_login(self.user_user)\n self.author.force_login(self.user_author)\n\n def test_authenticated_user_can_follow(self):\n \"\"\"Залогиненный пользователь может подписаться на авторов,\n при этом нельзя подписаться, если он уже подписан\"\"\"\n follow_count = Follow.objects.count()\n\n self.user.get(\n reverse(('posts:profile_follow'),\n kwargs={'username': f'{self.user_author.username}'}))\n self.user.get(\n reverse(('posts:profile_follow'),\n kwargs={'username': f'{self.user_author.username}'}))\n\n self.assertEqual(Follow.objects.count(), follow_count + 1)\n self.assertTrue(\n Follow.objects.filter(user=self.user_user,\n author=self.user_author).exists())\n\n def test_authenticated_user_can_unfollow(self):\n \"\"\"Залогиненный пользователь может отписаться от авторов,\n при этом нельзя отписаться, если он уже отписан\"\"\"\n follow_count = Follow.objects.count()\n\n self.user.get(\n reverse(('posts:profile_follow'),\n kwargs={'username': f'{self.user_author.username}'}))\n self.user.get(\n reverse(('posts:profile_unfollow'),\n kwargs={'username': f'{self.user_author.username}'}))\n self.user.get(\n reverse(('posts:profile_unfollow'),\n kwargs={'username': f'{self.user_author.username}'}))\n\n self.assertEqual(Follow.objects.count(), follow_count)\n self.assertFalse(\n Follow.objects.filter(user=self.user_user,\n author=self.user_author).exists())\n\n def test_authenticated_user_cant_follow_himself(self):\n \"\"\"Залогиненный пользователь не может подписаться на самого себя\"\"\"\n follow_count = Follow.objects.count()\n\n self.user.get(\n reverse(('posts:profile_follow'),\n kwargs={'username': f'{self.user_user.username}'}))\n\n self.assertEqual(Follow.objects.count(), follow_count)\n","repo_name":"Etozheigor/hw05_final","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":16926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"5380833345","text":"from PyQt5.Qt import *\n\n\nclass Btn(QPushButton):\n pass\n\n\n# qto\nclass Window(QWidget):\n def __init__(self):\n super(Window, self).__init__()\n\n self.setWindowTitle(\"QSS学习\")\n self.resize(500, 500)\n\n self.setup_ui()\n\n def setup_ui(self):\n box1 = QWidget(self)\n box2 = QWidget(self)\n box2.setObjectName(\"box2\")\n\n box3 = QWidget(box2)\n box3.resize(150, 150)\n # box3.setStyleSheet(\"background-color: lightgray\")\n\n # box1.setStyleSheet(\"QPushButton {background-color: orange;}\")\n # box2.setStyleSheet(\"background-color: cyan;\")\n\n label1 = QLabel(\"标签1\", box1)\n label1.resize(100, 60)\n # label1.setObjectName(\"l1\")\n # label1.setObjectName(\"pink\")\n label1.setProperty(\"notice_level\", \"warning\")\n\n label1.move(50, 50)\n btn1 = Btn(\"按钮1\", box1)\n btn1.move(150, 50)\n btn1.setObjectName(\"btn1\")\n # btn1.setStyleSheet(\"background-color: orange;\")\n\n cb = QCheckBox(\"python\", box1)\n cb.move(150, 50)\n cb.resize(100, 100)\n cb.setTristate(True)\n\n label2 = QLabel(\"标签2\", box2)\n label2.resize(100, 60)\n label2.move(50, 50)\n # label2.setProperty(\"notice_level\", \"error\")\n # label2.notice_level = \"error\"\n\n btn2 = QPushButton(\"按钮2\", box2)\n btn2.move(150, 50)\n btn2.setObjectName(\"btn2\")\n # btn2.setObjectName(\"pink\")\n\n label3 = QLabel(\"标签3\", box2)\n label3.move(100, 200)\n\n v_layout = QVBoxLayout()\n self.setLayout(v_layout)\n\n v_layout.addWidget(box1)\n v_layout.addWidget(box2)\n\n btn2.setEnabled(False)\n\n # self.setStyleSheet(\"QPushButton {background-color: orange;}\")\n\n self.other_btn = QPushButton(\"按钮3\")\n self.other_btn.show()\n\n # self.setStyleSheet(\"QPushButton {background-color: orange;}\")\n\n\nif __name__ == '__main__':\n\n import sys\n from tool import QSSTool\n\n app = QApplication(sys.argv)\n\n window = Window()\n window.show()\n\n # app.setStyleSheet(\"QPushButton {background-color: orange;}\") # 设置全局按钮为橘黄色。\n # app.setStyleSheet(\"QPushButton {background-color: orange;}\") # 设置全局按钮为橘黄色。\n # app.setStyleSheet(\"QLabel {background-color: orange;}\") # 设置全局按钮为橘黄色。\n # app.setStyleSheet(\"QLabel#l1 {background-color: orange;}\") # 设置全局按钮为橘黄色。\n # app.setStyleSheet(\"QLabel#l1 {background-color: orange;} QPushButton {background-color: cyan;}\") # 设置全局按钮为橘黄色。\n # app.setStyleSheet(\"QPushButton#b2 {background-color: cyan;}\") # 设置全局按钮为橘黄色。\n\n QSSTool.setQssToObj(\"test7.qss\", app)\n\n sys.exit(app.exec_())\n","repo_name":"liu1073811240/pyqt5_study","sub_path":"04-样式控制/01-QSS-初体验.py","file_name":"01-QSS-初体验.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"71602006674","text":"import numpy as np\nfrom visual import *\ncos, sin = np.cos, np.sin\n# =======================\n# Problem 1\n# =======================\n\"\"\"\nlat = np.radians(51.1789)\ndec = np.radians(-23.4373)\nHA = np.arccos(-sin(lat)*sin(dec)/(cos(lat)*cos(dec)))\nA = np.arcsin(-cos(dec)*sin(HA))\n\"\"\"\n# ======================\n# Problem 2\n# ======================\n#The function that will convert the Ra and Dec from JPL horizons to decimal degrees\ndef dms_to_degrees(degrees, arcmin, arcsec):\n if degrees < 0.0:\n return degrees - (arcmin/60.0) - (arcsec/3600.0)\n else:\n return degrees + (arcmin/60.0) + (arcsec/3600.0)\n\ndef equ_toecl(ra,dec):\n e = np.radians(23.43713) #tilt of the earth in 2017\n #ra, dec = '20 28 07.25', '-14 56 18.2' #Ra, Dec found on JPL horizons\n #Convert Ra and Dec to decimal degrees/radians\n ra = np.fromstring(ra,sep=' ')\n dec = np.fromstring(dec,sep = ' ')\n ra = np.radians(dms_to_degrees(ra[0],ra[1],ra[2])*15.)\n dec = np.radians(dms_to_degrees(dec[0],dec[1],dec[2]))\n #Therefore, the unit vector in the equatorial coordinate system is:\n v_equ = vector(cos(ra)*cos(dec),sin(ra)*cos(dec),sin(dec))\n\n #Using the multiplication of vectors, we can convert the vector in the equtorial system to the ecliptic system\n trans_matrix = np.array([[1,0,0],[0,cos(e),sin(e)],[0,-sin(e),cos(e)]])\n v_ecl = vector(np.matmul(trans_matrix, v_equ))\n #We can also use the rotate function in vPython to generate a unit vector in the ecliptic coordinate system\n r_prime = rotate(v_equ,-e,axis = vector(1,0,0)) #This vector should be the same as v_ecl\n\n #Then we can use the vector to find the ecliptic longitude and latitude of our asteroid\n lat = np.arcsin(v_ecl.z)\n lon = np.arcsin(v_ecl.y/cos(lat))\n if lon < 0:\n lon += 360\n ra, dec, lon, lat = degrees(ra), degrees(dec), degrees(lon), degrees(lat)\n if plot:\n x_axis = arrow(pos=(0,0,0),axis = (2,0,0),shaftwidth =0.01, color = color.blue)\n y_axis = arrow(pos=(0,0,0),axis = (0,2,0),shaftwidth =0.01, color = color.green)\n z_axis = arrow(pos=(0,0,0),axis = (0,0,2),shaftwidth =0.01, color = color.red)\n earth = sphere(pos = (0,0,0), radius = 2, color = color.green, opacity = 0.1)\n horizon = cylinder(pos = (0,0,0), axis = (0,0,0.01), radius = 2, opacity = 0.5, color = color.yellow)\n ecliptic = cylinder(pos = (0,0,0), axis = rotate(horizon.axis, e,axis = vector(1,0,0)), radius = 2, opacity = 0.5)\n # 2017-Jul-14 04:00 20 28 14.82 -14 49 14.2\n label(pos = (1,0,0), text = 'x axis', height = 6, box = False, opacity = 0)\n label(pos = (0,1,0), text = 'y axis', height = 6, box = False, opacity = 0)\n label(pos = (0,0,1), text = 'z axis', height = 6, box = False, opacity = 0)\n label(pos = (2,0,0), text = 'vernal equinox', height = 6, box = False, opacity = 0)\n label(pos = (-2,0,2), text = 'Yellow: equatorial, Cyan: ecliptical', box = False, opacity = 0)\n arrow(pos = (0,0,0), axis = v_equ, color = color.yellow)\n arrow(pos = (0,0,0), axis = v_ecl, color = color.cyan)\n return v_equ,v_ecl,lon,lat\n","repo_name":"yaosarayin/Yao_py","sub_path":"YSPA/Yao_Yin_problem_set_3.py","file_name":"Yao_Yin_problem_set_3.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"36929403671","text":"\nimport pytest\nimport json\nfrom unittest.mock import MagicMock\nimport pymongo\nimport tweepy\n\n#\n# Mocking so that tests run without credentials\n#\npymongo.MongoClient = MagicMock()\ntweepy.StreamListener.stream = MagicMock()\ntweepy.OAuthHandler = MagicMock()\ntweepy.API = MagicMock()\n\ndef send_tweet(self, *args, **kwargs):\n \"\"\"Stub for filter method that returns one single tweet\"\"\"\n self.listener.on_data(open('tweet.json', encoding='utf-8').read())\n\ntweepy.Stream.filter = send_tweet\n\n@pytest.fixture\ndef tweet():\n s = open('tweet.json').read()\n j = json.loads(s)\n return j\n\n@pytest.fixture\ndef parsed_tweet():\n s = open('parsed_tweet.json').read()\n j = json.loads(s)\n return j\n","repo_name":"realtweego/tweego","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"84"} +{"seq_id":"30110708282","text":"from cs50 import get_int\n\nwhile True:\n height = get_int(\"Input pyramid height (between 1-8): \")\n if (height > 0 and height < 9):\n break\nfor i in range(height):\n print(\" \" * (height - (i + 1) ), \"#\" * (i + 1), \" \", \"#\" * (i + 1))\n \n \n","repo_name":"lilhuge/cs50","sub_path":"pset6/mario/more/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"4510548481","text":"import re\nimport sys\nimport time\nimport typing\nimport warnings\n\n__author__ = \"Bojan Potočnik\"\n\nimport ctypes\nimport os\nimport telnetlib\n\n\nclass SeggerRTTClient:\n def __init__(self, host: str = \"localhost\", port: int = 19021):\n self.telnet = telnetlib.Telnet(timeout=1) # type: telnetlib.Telnet\n self.host = host\n self.port = port\n self._opened = False\n\n # noinspection SpellCheckingInspection\n def open(self, parse_jlink_info: bool = True):\n \"\"\"Connect to the JLink host.\"\"\"\n try:\n self.telnet.open(self.host, self.port)\n except ConnectionRefusedError:\n raise ConnectionRefusedError(\n f\"Could not connect to {self.host}:{self.port}.\"\n \" Are you sure that the JLink is running?\"\n \" You can run it with 'JLink -Device -If -AutoConnect 1 -Speed '\"\n \", e.g. 'JLink --Device NRF52840_xxAA -If SWD -AutoConnect 1 -Speed 50000'\"\n )\n self._opened = True\n\n # Bold/Bright green\n msg = \"\\x1B[32;1m\" + f\"{type(self).__name__} connected to {self.telnet.host}:{self.telnet.port}\"\n if parse_jlink_info:\n # Wait for JLink information to be printed.\n # self.telnet.expect() could be used, but it throws error\n # `TypeError: cannot use a string pattern on a bytes-like object` because it executes\n # `m = list[i].search(self.cookedq)` where `self.cookedq` is bytes, not str.\n # Manual matching is done instead.\n # 3 newline characters are required for RegEx below to match.\n data = b''.join(self.telnet.read_until(b'\\n', 0.1) for _ in range(3))\n match = re.match(r\"SEGGER J-Link (V[\\w.]+) - Real time terminal output\\r?\\n\"\n r\"SEGGER J-Link ([\\w .]+), SN=([\\d]+)\\r?\\n\"\n r\"Process: ([\\w.\\-]+)\\r?\\n\", data.decode('utf-8')) if data else None\n if match:\n data = data[match.span()[1]:] # Leave only the unused data in the buffer.\n # Bold/Bright blue\n msg += \"\\x1B[34;1m\" + f\" ('{match[3]}' {match[1]} using {match[2]} (SN {match[3]}))\"\n # Put unrecognized/unused data back in the buffer (in front of any new data received in meantime).\n self.telnet.cookedq = data + self.telnet.cookedq\n print(msg + \"\\x1B[0m\", flush=True)\n\n def close(self):\n \"\"\"Close the connection, if opened.\"\"\"\n if self._opened:\n self.telnet.close()\n self._opened = False\n # Bold/Bright magenta\n print(\"\\x1B[35;1m\"\n f\"Connection to {self.telnet.host}:{self.telnet.port} closed.\"\n \"\\x1B[0m\", flush=True)\n\n def __enter__(self):\n self.open()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def read_blocking(self) -> str:\n \"\"\"Read any available data and return it as-is.\"\"\"\n while True:\n try:\n rx_data = self.telnet.read_very_eager()\n except ConnectionResetError:\n # Bold red\n print(\"\\x1B[31;1m\"\n f\"{type(self).__name__} disconnected from {self.host}:{self.port}.\"\n \"\\x1B[0m\")\n self.close()\n return \"\\x00\"\n if rx_data:\n break\n time.sleep(0.01)\n try:\n return rx_data.decode('utf-8')\n except UnicodeDecodeError as e:\n print(f\"While decoding {rx_data}: {e}\", file=sys.stderr)\n return rx_data.decode('utf-8', errors='replace')\n\n def read_lines(self) -> typing.Iterator[str]:\n \"\"\"Read line by line and strip all newline characters (\\r\\n) at the end.\"\"\"\n while self.connected:\n # Wait for new line indefinitely, remove newline characters at the end and convert it to string.\n # Only wait for \\n as some code uses \\r\\n as newline and other only \\n.\n rx_data = \"\"\n while \"\\n\" not in rx_data:\n rx_data += self.read_blocking()\n # Multiple lines can be received.\n for line in rx_data.split(\"\\n\"):\n yield line.strip(\"\\r\\n\")\n\n def write_line(self, buffer: typing.Union[bytes, str]) -> None:\n if isinstance(buffer, str):\n buffer = buffer.encode('ascii')\n self.telnet.write(buffer + b\"\\n\")\n\n def __iter__(self) -> typing.Iterator[str]:\n \"\"\"Read (undetermined) fragments of received data and return it as-is.\"\"\"\n while self.connected:\n yield self.read_blocking()\n\n def __del__(self):\n self.close()\n\n @property\n def connected(self) -> bool:\n return self.telnet.sock and (not self.telnet.eof) and (self.telnet.get_socket().fileno() != -1)\n\n def __bool__(self) -> bool:\n return self.connected\n\n\nclass SeggerRTTListener(SeggerRTTClient):\n \"\"\"Class for backward compatibility of the @ref SeggerRTTClient.\"\"\"\n\n def __init__(self, host: str = \"localhost\", port: int = 19021):\n warnings.warn(f\"{type(self).__name__} is deprecated - use {SeggerRTTClient.__name__} instead.\")\n super().__init__(host, port)\n\n\ndef read(client):\n \"\"\" read only one once instead of using generator forever\"\"\"\n print(next(iter(client)), end=\"\")\n\n\ndef main__open_close() -> None:\n \"\"\" main to demonstrate bi-directional (read and write) over Telnet to SEGGER J-Link RTT server\n write_line can be used on platforms which support bi-directional RTT transfer, e.g. Nordic CLI.\n \"\"\"\n client = SeggerRTTClient()\n client.open()\n user_input_sent = False\n\n try:\n while True:\n read(client)\n if not user_input_sent: # sent input only once and continue reading\n client.write_line(b\"\\t\") # tab on Nordic RTT CLI shows available commands\n user_input_sent = True\n except KeyboardInterrupt:\n print(\"User requested keyboard interrupt\")\n finally:\n # In this case calling close() is not strictly required as it will be called\n # automatically when the object is garbage collected, however this shall not be\n # relied upon - open()-ed resources shall always be close()-ed.\n client.close()\n\n\ndef main__context_manager() -> None:\n user_input_sent = False\n\n with SeggerRTTClient() as client:\n for line in client:\n print(line, end=\"\")\n if not user_input_sent: # Sent input only once and continue reading\n client.write_line(b\"\\t\") # Tab on Nordic RTT CLI shows available commands\n user_input_sent = True\n\n\nif __name__ == '__main__':\n if os.name == 'nt':\n # Running on Windows, enable console colors\n # ENABLE_PROCESSED_OUTPUT = 0x0001\n # ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002\n # ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004\n kernel32 = ctypes.windll.kernel32\n kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 0x01 | 0x02 | 0x04)\n\n main__context_manager()\n # main__open_close()\n","repo_name":"bojanpotocnik/segger-rtt-viewer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"84"} +{"seq_id":"39868519103","text":"import calendargui\nimport display_facts\nfrom PyQt5.QtCore import QTimer, QTime\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_main_window(object):\n def show_time(self):\n current_time = QTime.currentTime()\n label_time = current_time.toString(\"hh:mm:ss\")\n self.real_time.setText(label_time)\n\n def setupUi(self, main_window):\n main_window.setObjectName(\"main_window\")\n main_window.resize(777, 360)\n main_window.setStyleSheet(\"\")\n self.img = QtWidgets.QLabel(main_window)\n self.img.setGeometry(QtCore.QRect(0, 0, 781, 381))\n self.img.setText(\"\")\n self.img.setPixmap(QtGui.QPixmap(\"space.jpg\"))\n self.img.setScaledContents(True)\n self.img.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\n self.img.setObjectName(\"img\")\n self.timer = QTimer()\n self.timer.timeout.connect(self.show_time)\n self.timer.start(1000)\n self.year_label = QtWidgets.QLabel(main_window)\n self.year_label.setGeometry(QtCore.QRect(260, 315, 41, 21))\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.year_label.setFont(font)\n self.year_label.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.year_label.setAlignment(QtCore.Qt.AlignCenter)\n self.year_label.setObjectName(\"year_label\")\n self.calc = QtWidgets.QPushButton(main_window, clicked=self.calculate)\n self.calc.setGeometry(QtCore.QRect(490, 310, 81, 28))\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.calc.setFont(font)\n self.calc.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"\n\"background-color: rgb(0, 0, 0);\")\n self.calc.setObjectName(\"calc\")\n self.facts = QtWidgets.QPushButton(main_window, clicked=self.display_facts)\n self.facts.setGeometry((QtCore.QRect(10, 330, 50, 28)))\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.facts.setFont(font)\n self.facts.setText(\"Facts\")\n self.facts.setStyleSheet(\"color: rgb(255, 255, 255);\\n\"\n\"background-color: rgb(0, 0, 0);\")\n self.facts.setObjectName(\"facts\")\n self.real_time = QtWidgets.QLabel(main_window)\n self.real_time.setGeometry(QtCore.QRect(350, 10, 101, 21))\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.real_time.setFont(font)\n self.real_time.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.real_time.setAlignment(QtCore.Qt.AlignCenter)\n self.real_time.setObjectName(\"real_time\")\n self.open_calendar = QtWidgets.QPushButton(main_window, clicked=self.go_calendar)\n self.open_calendar.setGeometry(QtCore.QRect(450, 310, 31, 28))\n self.open_calendar.setStyleSheet(\"image: url(cal_logo.png);\")\n self.open_calendar.setText(\"\")\n self.open_calendar.setObjectName(\"open_calendar\")\n self.year_input = QtWidgets.QLabel(main_window)\n self.year_input.setGeometry(QtCore.QRect(310, 310, 171, 31))\n self.year_input.setStyleSheet(\"background-color: rgb(255, 255, 255);\\n\"\n\"color: rgb(0, 0, 0);\")\n self.year_input.setText(calendargui.string_date_input)\n self.year_input.setObjectName(\"year_input\")\n self.year_input.setIndent(5)\n self.year_input.setEnabled(False)\n input_font = QtGui.QFont()\n input_font.setFamily(\"Arial\")\n input_font.setPointSize(10)\n input_font.setBold(True)\n input_font.setWeight(75)\n self.year_input.setFont(input_font)\n self.img.raise_()\n self.year_label.raise_()\n self.calc.raise_()\n self.real_time.raise_()\n self.facts.raise_()\n self.year_input.raise_()\n self.open_calendar.raise_()\n self.retranslateUi(main_window)\n QtCore.QMetaObject.connectSlotsByName(main_window)\n\n def retranslateUi(self, main_window):\n _translate = QtCore.QCoreApplication.translate\n main_window.setWindowTitle(_translate(\"main_window\", \"Planet Position Prediction\"))\n home_logo_map = QtGui.QPixmap(\"earth_icon.png\")\n home_logo = QtGui.QIcon(home_logo_map)\n main_window.setWindowIcon(home_logo)\n self.year_label.setText(_translate(\"main_window\", \"Year\"))\n self.calc.setText(_translate(\"main_window\", \"Calculate\"))\n\n def go_calendar(self):\n self.cal_window = QtWidgets.QWidget()\n self.cal_ui = calendargui.Ui_calendar_window()\n self.cal_ui.setupUi(self.cal_window)\n self.cal_window.show()\n\n def calculate(self):\n if not self.year_input.text():\n # if the input field is empty\n message_box = QtWidgets.QMessageBox()\n message_box.setText(\"The input field cannot be empty!\\n Please select a date.\")\n message_box.setWindowTitle(\"Error\")\n message_box.exec_()\n else:\n from Solarsystem import main as game\n home_total_days = calendargui.total_days\n game()\n\n def display_facts(self):\n self.fact_window = QtWidgets.QWidget()\n self.facts_ui = display_facts.Ui_facts_window()\n self.facts_ui.setupUi(self.fact_window)\n self.fact_window.show()\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n main_window = QtWidgets.QWidget()\n ui_home = Ui_main_window()\n ui_home.setupUi(main_window)\n main_window.show()\n sys.exit(app.exec_())\n\n","repo_name":"azeem30/Planet_Simulation","sub_path":"testgui.py","file_name":"testgui.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"71182472595","text":"from dirty_loader import LoaderNamespaceReversedCached\nfrom dirty_loader.factories import register_logging_factories\n\nfrom maehtrobot.common.blueprints import register_blueprints_factories, Application\nfrom .config import load_configuration\n\n\ndef create_loader(namespaces=None, factory_registers=None):\n default_namespaces = {'blueprint': 'maehtrobot.blueprints',\n 'core': 'maehtrobot.blueprints.core',\n 'common': 'maehtrobot.common'}\n\n if namespaces:\n default_namespaces.update(namespaces)\n\n loader = LoaderNamespaceReversedCached(default_namespaces)\n\n register_logging_factories(loader)\n register_blueprints_factories(loader)\n\n try:\n for factory_register in factory_registers:\n factory_register(loader)\n except TypeError:\n pass\n\n return loader\n\n\ndef create_application(environment, config_name, config_dir,\n loader=None, application_class='common:blueprints.Application'):\n config = load_configuration(environment, config_name, config_dir)\n loader = loader or create_loader()\n\n return loader.factory(application_class, **config)\n","repo_name":"alfred82santa/maehtrobot","sub_path":"maehtrobot/common/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"6483701135","text":"__author__ = 'Thomas.Maschler'\n\nfrom metadata_constructors import MetadataItemConstructor\nfrom metadata_constructors import MetadataListConstructor\nfrom metadata_constructors import MetadataItemsConstructor\nfrom metadata_constructors import MetadataParentItemConstructor\n\nfrom elements import contact_elements\nfrom languages import languages\n\n\n\n# ########## General Info\n\nclass MetadataItem(MetadataItemConstructor):\n def __init__(self, path, name, parent):\n self.path = path\n self.name = name\n super(MetadataItem, self).__init__(parent)\n\n# ########## Keywords\n\nclass MetadataList(MetadataListConstructor):\n\n def __init__(self, tagname, path, name, parent=None):\n self.name = name\n super(MetadataList, self).__init__(parent, tagname=tagname, path=path)\n\n\nclass MetadataLanguage(MetadataParentItemConstructor):\n\n \"\"\"\n A MetadataParentItem for Language settings\n Each Language Item has two children\n - Language\n - Country\n Predefined language pairs are stored in the global language_code dictionary\n \"\"\"\n\n def __init__(self, path, name, parent):\n self.parent = parent\n self.name = name\n self.path = path\n\n language_elements = {\n \"attr_lang\": {\n \"parent\": \"element\",\n \"path\": \"languageCode\"},\n\n \"attr_country\": {\n \"parent\": \"element\",\n \"path\": \"countryCode\"}\n }\n\n super(MetadataLanguage, self).__init__(self.parent, language_elements)\n\n def get_lang(self):\n lang = self._attr_lang.attributes\n if \"value\" in lang.keys():\n for key in languages:\n if languages[key][0] == lang[\"value\"]:\n return key\n return \"\"\n else:\n return \"\"\n\n def __setattr__(self, n, v):\n if n in [\"path\", \"parent\", \"child_elements\", \"name\", \"value\"]:\n self.__dict__[n] = v\n elif n == \"attr_lang\":\n if v == \"\" or v is None:\n self._attr_lang.attributes = {}\n else:\n self._attr_lang.attributes = v\n elif n == \"attr_country\":\n if v == \"\" or v is None:\n self._attr_country.attributes = {}\n else:\n self._attr_country.attributes = v\n else:\n if n in self.child_elements.keys():\n if isinstance(v, str) or isinstance(v, unicode):\n self.__dict__[\"_{}\".format(n)].element.text = v\n elif v is None:\n self.__dict__[\"_{}\".format(n)].element.text = \"\"\n else:\n raise RuntimeWarning(\"Input value must be of type String or None\")\n else:\n self.__dict__[n] = v\n\n def __getattr__(self, name):\n\n if name != \"child_elements\" and name in self.child_elements.keys():\n return self.__dict__[\"_{}\".format(name)].element.text\n #elif name == \"value\":\n # return self.element\n elif name == \"attr_lang\":\n return self._attr_lang.attributes\n elif name == \"attr_country\":\n return self._attr_country.attributes\n else:\n return self.__dict__[name]\n\n# #### locals\n\nclass MetadataLocal(MetadataParentItemConstructor):\n \"\"\"\n A MetadataLocal Item\n \"\"\"\n\n def __init__(self, parent, path, language, country):\n\n self.parent = parent\n self.path = \"%s[@language='%s'][@country='%s']\" % (path, language, country)\n\n super(MetadataLocal, self).__init__(self.parent)\n\n self.attributes = {}\n self.title = self._create_item(self.element.iter(), self.element, \"resTitle\")\n self.abstract = self._create_item(self.element.iter(), self.element, \"idAbs\")\n\n\nclass MetadataLocals(MetadataItemsConstructor):\n \"\"\"\n A MetadataLocals Item for Localized Titles and Abstracts\n Each Local Item has two children\n - Title\n - Abstract\n and a language and country attribute to define the local language\n Predefined language pairs are stored in the global language_code dictionary\n There can be many MetadataLocals instances\n \"\"\"\n\n def __init__(self, path, name, parent=None):\n\n self.parent = parent\n\n self.name = name\n self.path = path\n\n super(MetadataLocals, self).__init__(parent, self.path)\n self._locals = {}\n\n for element in self.elements:\n attrib = element.attrib\n\n found = False\n for lang in languages:\n if languages[lang][0] == attrib[\"language\"]:\n found = True\n break\n\n if found:\n self._locals[lang] = (MetadataLocal(self.parent, self.path, attrib[\"language\"], attrib[\"country\"]))\n\n def __iter__(self):\n return iter(self._locals)\n\n def __getitem__(self, key):\n return self._locals[key]\n\n def _write(self):\n items_to_remove = []\n for element in self.elements:\n items_to_remove.append(element)\n\n for element in items_to_remove:\n self.elements.remove(element)\n\n for lang in self._locals:\n self.elements.append(self._locals[lang])\n\n def new_local(self, lang):\n\n if lang in languages.keys():\n language = languages[lang][0]\n country = languages[lang][1]\n else:\n raise KeyError\n\n self._locals[lang] = (MetadataLocal(self.parent, self.path, language, country))\n self._write()\n\n\nclass MetadataContact(MetadataParentItemConstructor):\n \"\"\"\n Just a shortcut MetadataContacts that predefines the paths and position\n \"\"\"\n # TODO: Define Role, Country and Online Resource list\n def __init__(self, path, name, parent=None, index=0):\n self.name = name\n self.path = \"%s[%i]\" % (path, index)\n super(MetadataContact, self).__init__(parent, contact_elements)\n\n\n\n","repo_name":"wri/gfw-sync2","sub_path":"utilities/arcpy_metadata/metadata_items.py","file_name":"metadata_items.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"84"} +{"seq_id":"41427344518","text":"import unittest\nfrom TestUtils import TestCodeGen\nfrom AST import *\n\n\nclass CheckCodeGenSuite(unittest.TestCase):\n def test_1(self):\n \"\"\"Simple program: int main() {} \"\"\"\n input = \"\"\"Class Program{\n main(){}\n }\"\"\"\n expect = \"\"\n self.assertTrue(TestCodeGen.test(input,expect,500))\n\n\n\n\n","repo_name":"hungnguyen2611/PPL-ASM3","sub_path":"src/test/CodeGenSuite.py","file_name":"CodeGenSuite.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"15304700637","text":"from multiprocessing import Process, Queue\nimport os\n\ndef my_func(*args):\n queue = args[0]\n\n word = \"\"\n while word != \"END\":\n word = queue.get()\n if len(word) == 15:\n print(os.getpid(), \":\", word)\n\ndef main():\n queue = Queue()\n\n p1 = Process(target=my_func, args=(queue, \"1\"))\n p2 = Process(target=my_func, args=(queue, \"2\"))\n\n p1.start()\n p2.start()\n\n for line in open('words.txt'):\n queue.put(line[:-1]) \n\n queue.put(\"END\")\n queue.put(\"END\")\n\n p1.join()\n p2.join()\n print(\"ALl done!\")\n\n\nif __name__ == \"__main__\": # have to be in main\n main()\n\n","repo_name":"Emil88PL/Python-Day3","sub_path":"queue1.py","file_name":"queue1.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"25809836717","text":"from fastai.learner import *\nfrom fastai.text import *\n\n\ndef get_probs(ids, vocabulary_size):\n counter = Counter(ids)\n counter = np.array([counter[i] for i in range(vocabulary_size)])\n return counter / counter.sum()\n\n\nclass LinearDecoder(nn.Module):\n init_range = 0.1\n\n def __init__(self, n_out, n_hid, dropout, tie_encoder=None, decode_train=True):\n super().__init__()\n self.decode_train = decode_train\n self.decoder = nn.Linear(n_hid, n_out, bias=False)\n self.decoder.weight.data.uniform_(-self.init_range, self.init_range)\n self.dropout = LockedDropout(dropout)\n if tie_encoder:\n self.decoder.weight = tie_encoder.weight\n\n def forward(self, inputs):\n raw_outputs, outputs = inputs\n output = self.dropout(outputs[-1])\n output = output.view(output.size(0) * output.size(1), output.size(2))\n if self.decode_train or not self.training:\n decoded = self.decoder(output)\n output = decoded.view(-1, decoded.size(1))\n return output, raw_outputs, outputs\n\n\ndef get_language_model(n_token, embedding_size, n_hid, n_layer, padding_token, decode_train=True, dropouts=None):\n if dropouts is None:\n dropouts = [0.5, 0.4, 0.5, 0.05, 0.3]\n enc = RNN_Encoder(n_token, embedding_size, nhid=n_hid, nlayers=n_layer, pad_token=padding_token,\n dropouti=dropouts[0], wdrop=dropouts[2], dropoute=dropouts[3], dropouth=dropouts[4])\n dec = LinearDecoder(n_token, embedding_size, dropouts[1], decode_train=decode_train,\n tie_encoder=enc.encoder)\n return SequentialRNN(enc, dec)\n\n\ndef pt_sample(probs, n):\n w = -torch.log(cuda.FloatTensor(len(probs)).uniform_()) / (probs + 1e-10)\n return torch.topk(w, n, largest=False)[1]\n\n\nclass CrossEntropyDecoder(nn.Module):\n init_range = 0.1\n\n def __init__(self, probs, decoder, n_neg=4000, sampled=True):\n super().__init__()\n self.probs, self.decoder, self.sampled = T(probs).cuda(), decoder, sampled\n self.set_n_neg(n_neg)\n\n def set_n_neg(self, n_neg):\n self.n_neg = n_neg\n\n def get_random_indexes(self):\n return pt_sample(self.probs, self.n_neg)\n\n def sampled_softmax(self, input, target):\n idxs = V(self.get_random_indexes())\n dw = self.decoder.weight\n output = input @ dw[idxs].t()\n max_output = output.max()\n output = output - max_output\n num = (dw[target] * input).sum(1) - max_output\n negs = torch.exp(num) + (torch.exp(output) * 2).sum(1)\n return (torch.log(negs) - num).mean()\n\n def forward(self, input, target):\n if self.decoder.training:\n if self.sampled:\n return self.sampled_softmax(input, target)\n else:\n input = self.decoder(input)\n return F.cross_entropy(input, target)\n\n\ndef get_learner(dropouts, n_neg, sampled, model_data, embedding_size, n_hidden, n_layer, opt_func, probs):\n model = to_gpu(get_language_model(model_data.n_tok, embedding_size, n_hidden, n_layer, model_data.pad_idx, decode_train=False, dropouts=dropouts))\n criterion = CrossEntropyDecoder(probs, model[1].decoder, n_neg=n_neg, sampled=sampled).cuda()\n learner = RNN_Learner(model_data, LanguageModel(model), opt_fn=opt_func)\n criterion.dw = learner.model[0].encoder.weight\n learner.crit = criterion\n learner.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)\n learner.clip = 0.3\n return learner, criterion\n","repo_name":"PracticingMan/chinese_ulmfit","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"80"} +{"seq_id":"40213931117","text":"import numpy as np\nimport pandas as pd\n\n### GRADED\n### Code a function called \"all_distances\"\n### ACCEPT two inputs:\n### An observation from a data set. e.g: har_train.iloc[50,:]\n### The full data set. e.g. har_train.\n\n### Create a or numpy array of distances between:\n### ### that single point, and all points in the full dataset\n\n### RETURN the list of distances SORTED from smallest to largest.\n\n### Notes:\n### Use `np.linalg.norm()`, as described in above cell.\n### The smallest distance should be 0.\n\n### YOUR ANSWER BELOW\n\n\ndef all_distances(test_point, data_set):\n \"\"\"\n Find and return a list of distances between the \"test_point\"\n and all the points in \"data_set\", sorted from smallest to largest.\n\n Positional Arguments:\n test_point -- a Pandas Series corresponding to a row in \"data_set\"\n data_set -- a Pandas DataFrame\n\n Example:\n test_point = har_train.iloc[50,:]\n data_set = har_train\n\n print(all_distances(test_point, data_set)[:5])\n #--> [0.0, 2.7970187358249854, 2.922792670143521, 2.966555149052483, 3.033982453218797]\n\n \"\"\"\n\n alldist = list()\n\n for x in range( data_set.shape[0] ):\n norm = np.linalg.norm( data_set.iloc[x,:] - test_point)\n alldist.append(norm)\n\n alldist.sort()\n\n return alldist\n\n\n\n\n\nFEATURE_NAMES = '/Users/darioflores/Documents/machineLearn/uci/features.txt'\nTRAIN_DATA = '/Users/darioflores/Documents/machineLearn/uci/train/X_train.txt'\nTRAIN_LABELS = '/Users/darioflores/Documents/machineLearn/uci/train/y_train.txt'\n\n# read feature names\nfeats = pd.read_table(FEATURE_NAMES, sep='\\n', header=None)\n\n# read in training data\nhar_train = pd.read_table(TRAIN_DATA, sep='\\s+', header=None)\n\n# read in training labels\nhar_train_labels = pd.read_table(TRAIN_LABELS, sep='\\n', header=None, names=[\"label\"], squeeze = True)\n\nhar_train.columns = feats.iloc[:,0]\n\n\n\ntest_point = har_train.iloc[50,:]\ndata_set = har_train\n\nprint(all_distances(test_point, data_set)[:5])\n\n#\n","repo_name":"dariofl24/machineLearningPy","sub_path":"alldist.py","file_name":"alldist.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"11533872033","text":"import copy\nimport random\n\nimport matplotlib.pyplot as plt # noqa\nimport numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed\n\nfrom built_characters import hexadin, hexadin5, paladin, paladin_pure # noqa\nfrom classes import giant, goblin, umberhulk\nfrom combat import combat\n\nrandom.seed(1)\nnp.random.seed(1)\n\n\ndef gauntlet(party, level=3):\n stages_completed = 0\n table = np.zeros([15, 15], dtype=object)\n party = [char.sample_at_level(level, name=str(i)) for i, char in enumerate(party)]\n for i, enemies in enumerate(\n [\n # [goblin] * 3,\n # [umberhulk] * 1,\n # [umberhulk] * 2,\n # [umberhulk] * 3,\n [giant] * 1,\n # [giant] * 2,\n [giant] * 2,\n [giant] * 3,\n [giant] * 4,\n ]\n ):\n if ((i % 2) == 0) and (i > 0):\n [char.shortrest() for char in party]\n if ((i % 6) == 0) and (i > 0):\n [char.longrest() for char in party]\n\n result = combat(\n party, [copy.copy(char) for char in enemies], copy.copy(table), i\n )\n if not result:\n return stages_completed\n stages_completed += 1\n party = [char for char in party if char.hp > 0]\n return stages_completed\n\n\ndef main(dreamteam=[paladin]):\n scores = pd.DataFrame()\n for lvl in range(11, 16):\n if 0:\n gauntlets = Parallel(n_jobs=5)(\n delayed(gauntlet)(dreamteam, lvl) for _ in range(10)\n )\n else:\n gauntlets = [gauntlet(dreamteam, lvl) for _ in range(10)]\n print(gauntlets)\n scores[lvl] = gauntlets\n return scores\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"leedtan/dndsimulator","sub_path":"dnd/full_simulator.py","file_name":"full_simulator.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"12661977340","text":"while 1:\n A = int(input(\"Enter a Number\"))\n if 0 ACTIVATION layer\n A_prev -- activations from previous layer(input data): (size of previous layer, number of examples)\n W -- weights matrix: (size of current layer, size of previous layer)\n b -- bias vector: (size of the current layer, 1)\n activation -- \"sigmoid\" or \"relu\"\n\n A -- the output of the activation function, also called the post-activation value\n cache -- \"linear_cache\", \"activation_cache\";\n stored for computing the backward pass efficiently\n \"\"\"\n\n linear_cache = 0\n activation_cache = 0\n a = 0\n\n if activation == \"sigmoid\":\n\n z, linear_cache = linear_forward(a_prev, w, b)\n a, activation_cache = sigmoid(z)\n\n elif activation == \"relu\":\n\n z, linear_cache = linear_forward(a_prev, w, b)\n a, activation_cache = relu(z)\n\n cache = (linear_cache, activation_cache)\n\n return a, cache\n\n\ndef L_model_forward(x, parameters):\n \"\"\"\n forward propagation for the [LINEAR -> RELU] * (layers - 1) -> LINEAR -> SIGMOID computation\n\n X -- data (input size, number of examples)\n parameters -- output of initialize_parameters_deep()\n\n AL -- last post-activation value\n caches -- : every cache of linear_relu_forward() (there are layers - 1 of them, indexed from 0 to layers - 2)\n the cache of linear_sigmoid_forward() (there is one, indexed layers - 1)\n \"\"\"\n\n caches = []\n a = x\n\n # 网络层数: 由于有b和w, 所以, // 2\n layers = len(parameters) // 2\n\n # [LINEAR -> RELU] * (layers - 1)\n for l in range(1, layers):\n a_prev = a\n a, cache = linear_activation_forward(a_prev, parameters['W' + str(l)], parameters['b' + str(l)],\n activation=\"relu\")\n caches.append(cache)\n\n # LINEAR -> SIGMOID\n AL, cache = linear_activation_forward(a, parameters['W' + str(layers)], parameters['b' + str(layers)],\n activation=\"sigmoid\")\n caches.append(cache)\n\n return AL, caches\n\n\ndef compute_cost(AL, y):\n \"\"\"\n AL -- probability vector corresponding to your label predictions, shape (1, number of examples)\n Y -- true \"label\" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)\n\n cost -- cross-entropy cost\n \"\"\"\n\n # 标签y数量\n m = y.shape[1]\n\n # 计算AL, y\n cost = (1.0 / m) * (-np.dot(y, np.log(AL).T) - np.dot(1 - y, np.log(1 - AL).T))\n\n # 将矩阵转化为数字\n # (e.g. turns [[17]] into 17)\n cost = np.squeeze(cost)\n\n return cost\n\n\ndef linear_backward(dz, cache):\n \"\"\"\n linear portion of backward propagation for a single layer (layer l)\n\n dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n cache -- tuple of values (a_prev, w, b) coming from the forward propagation in the current layer\n\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l - 1), same shape as a_prev\n dW -- Gradient of the cost with respect to w (current layer l), same shape as w\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n a_prev, w, b = cache\n\n # 输入X的数量\n m = a_prev.shape[1]\n\n dW = 1.0 / m * np.dot(dz, a_prev.T)\n\n db = 1.0 / m * np.sum(dz, axis=1, keepdims=True)\n\n dA_prev = np.dot(w.T, dz)\n\n return dA_prev, dW, db\n\n\n# 因activation不同而存在\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n dA -- post-activation gradient for current layer l\n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n\n dA_prev = dW = db = 0\n\n linear_cache, activation_cache = cache\n\n if activation == \"relu\":\n\n dz = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dz, linear_cache)\n\n elif activation == \"sigmoid\":\n\n dz = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dz, linear_cache)\n\n return dA_prev, dW, db\n\n\ndef L_model_backward(AL, y, caches):\n \"\"\"\n backward propagation for the [LINEAR -> RELU] * (layers - 1) -> LINEAR -> SIGMOID group\n\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (there are (layers - 1) or them,\n indexes from 0 to layers - 2)\n the cache of linear_activation_forward() with \"sigmoid\" (there is one, index layers - 1)\n\n grads -- gradients\n grads[\"dA\" + str(l)] = ...\n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ...\n \"\"\"\n grads = {}\n\n # 网络层数: 由于有b和w, 所以, // 2\n layers = len(caches)\n\n # 重塑y\n y = y.reshape(AL.shape)\n\n # 初始化反向传播\n # 对应位置元素相除\n dAL = - (np.divide(y, AL) - np.divide(1 - y, 1 - AL))\n\n # Lth layer (SIGMOID -> LINEAR) gradients\n # \"AL, Y, caches\"\n current_cache = caches[layers - 1]\n\n # \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n grads[\"dA\" + str(layers - 1)], grads[\"dW\" + str(layers)], \\\n grads[\"db\" + str(layers)] = linear_activation_backward(dAL, current_cache, activation=\"sigmoid\")\n\n for l in reversed(range(layers - 1)):\n # lth layer: (RELU -> LINEAR) gradients\n\n current_cache = caches[l]\n\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 1)], current_cache,\n activation=\"relu\")\n grads[\"dA\" + str(l)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n\n return grads\n\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters\n\n grads -- gradients, output of L_model_backward\n\n Returns:\n parameters -- updated parameters\n parameters[\"W\" + str(l)] = ...\n parameters[\"b\" + str(l)] = ...\n \"\"\"\n\n # 网络层数: 由于有b和w, 所以, // 2\n layers = len(parameters) // 2\n\n # 更新参数w, b\n for l in range(layers):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n\n return parameters\n\n\n# L_layer_model\ndef L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False): # lr was 0.009\n \"\"\"\n L-layer neural network: [LINEAR- > RELU] * (L - 1) -> LINEAR -> SIGMOID\n\n X -- data, numpy array of shape (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)\n layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).\n learning_rate -- learning rate of the gradient descent update rule\n num_iterations -- number of iterations of the optimization loop\n print_cost -- if True, it prints the cost every 100 steps\n\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n\n np.random.seed(1)\n costs = [] # keep track of cost\n\n # 随机初始化\n parameters = initialize_parameters_deep(layers_dims)\n\n # 梯度下降\n for i in range(0, num_iterations):\n\n # 前向传播: [LINEAR -> RELU] * (L - 1) -> LINEAR -> SIGMOID\n AL, caches = L_model_forward(X, parameters)\n\n # 计算代价\n cost = compute_cost(AL, Y)\n\n # 反向传播\n grads = L_model_backward(AL, Y, caches)\n\n # 更新参数\n parameters = update_parameters(parameters, grads, learning_rate)\n\n # print\n if print_cost and i % 100 == 0:\n print(\"Cost after iteration %i: %f\" % (i, cost))\n\n if print_cost and i % 100 == 0:\n costs.append(cost)\n\n # 绘制学习曲线\n plot.plot(np.squeeze(costs))\n plot.ylabel('cost')\n plot.xlabel('iterations (per hundreds)')\n plot.title(\"Learning rate =\" + str(learning_rate))\n\n plot.show()\n\n return parameters\n\n\ndef predict(x, y, parameters):\n \"\"\"\n X -- data set of examples you would like to label\n parameters -- parameters of the trained model\n\n p -- predictions for the given dataset X\n \"\"\"\n\n # X的数量\n m = x.shape[1]\n\n # 数据集X的对应的prediction的维度\n p = np.zeros((1, m))\n\n # 前向传播\n probas, caches = L_model_forward(x, parameters)\n\n # 将0 ~ 1 -- 映射 --> 0, 1\n for i in range(0, probas.shape[1]):\n if probas[0, i] > 0.5:\n p[0, i] = 1\n else:\n p[0, i] = 0\n\n print(\"Accuracy: \" + str(np.sum((p == y) / m)))\n\n\ndef main():\n\n # 4 - layer model\n layers_dims = [12288, 20, 7, 5, 1]\n\n train_x, train_y, test_x, test_y = pre_process_data()\n\n parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations=2500, print_cost=True)\n\n predict(train_x, train_y, parameters)\n\n predict(test_x, test_y, parameters)\n\n\nmain()\n","repo_name":"RaySunWHUT/NeuralNetwork","sub_path":"NerualNetwork/neural_network/week4/L_NN.py","file_name":"L_NN.py","file_ext":"py","file_size_in_byte":10937,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"80"} +{"seq_id":"16081117979","text":"class ModelType(object):\n PREDNET = 'prednet'\n CONV_PREDNET = 'conv_prednet'\n CONCAT_PREDNET = 'concat_prednet'\n AMPLIF_ERROR = 'amplified_error_prednet'\n SINGLE_PIXEL_ACTIVATION = 'single_pixel_activation' # input is 1 where the centers of the balls are, 0 everywhere else\n STATE_VECTOR = 'state_vector'\n\n\nclass LossFunctions(object):\n PREDNET_ERROR_LOSS = 'prednet_error_loss'\n PIXEL_LOSS = 'pixel_loss'\n DYNAMIC_LOSS = 'dynamic_loss'\n","repo_name":"AlbertoCenzato/deep_predictive_coding","sub_path":"deep_predictive_coding/model_type.py","file_name":"model_type.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"41690496415","text":"\"\"\"\nDatos de entrada\ncategoría-->c-->int\nsalario bruto-->sb-->float\nDatos de salida\naumento-->a-->float\nnuevo salario-->ns-->float\n\"\"\"\n\nsb=float(input(\"Digite el salario bruto \"))\n\nif(sb==5000000):\n a=sb*0.1\n ns=sb+a\n print(\"La categoría es 1, y su nuevo salario es de $\"+str(ns))\nelif(sb==4300000):\n a=sb*0.15\n ns=sb+a\n print(\"La categoría es 2, y el nuevo salario es de $\"+str(ns))\nelif(sb==3600000):\n a=sb*0.2\n ns=sb+a\n print(\"La categoría es 3, y el nuevo salario es de $\"+str(ns))\nelif(sb==2000000):\n a=sb*0.4\n ns=sb+a\n print(\"La categoría es 4, y el nuevo salario es de $\"+str(ns))\nelif(sb==900000):\n a=sb*0.6\n ns=sb+a\n print(\"La categoría es 5, y el nuevo salario es de $\"+str(ns))\n","repo_name":"Argenta47/talleres_de_algoritmos","sub_path":"Estructura de control selectivas/Ejercicio10.py","file_name":"Ejercicio10.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"43273625886","text":"# -*- coding: utf-8 -*-\r\nimport re\r\nimport csv\r\n\r\nlinesFileName = input('Enter the name of the extracted lines file\\n')\r\nlineFile = open(linesFileName, 'r')\r\n\r\ncsvFileName = input('Enter the name of the csv to output\\n')\r\n\r\nlinesArray = []\r\n\r\ndef filterEmpty(x):\r\n return x != ''\r\n\r\n\r\nfor line in lineFile:\r\n line = line.replace(',','')\r\n spacedLine = re.sub('([.!?\\n])', r' \\1', line)\r\n wordsArray = re.split('\\s', spacedLine)\r\n wordsArray = list(filter(filterEmpty, wordsArray))\r\n linesArray.append(wordsArray)\r\n\r\ncsvFile = open('{}.csv'.format(csvFileName), 'w')\r\ncsvWriter = csv.writer(csvFile)\r\ncsvWriter.writerow(['word1', 'word2'])\r\nfor line in linesArray:\r\n print(line)\r\n for index, word in enumerate(line):\r\n if (index == 0):\r\n csvWriter.writerow(['^', word])\r\n elif (index == (len(line) - 1)):\r\n csvWriter.writerow([word, '$'])\r\n else:\r\n csvWriter.writerow([word, line[index + 1]])\r\n\r\ncsvFile.close()","repo_name":"rcwillett/script-lines-extractor","sub_path":"lines_to_csv.py","file_name":"lines_to_csv.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"35220287839","text":"import argparse\nimport pathlib\nimport itertools\nfrom typing import Union, List, Tuple\n\nfrom reader.mrbfile import MRBFile\nfrom reader.export import HDF5Exporter\n\n\ndef _flatten(l, container_types=(List, Tuple)):\n \"\"\"\n Flatten nested iterables (default: list or tuples) of arbitrary depth.\n Might fail at extreme depths due to Python recursion limit.\n \"\"\"\n for elem in l:\n if isinstance(elem, container_types):\n yield from _flatten(elem)\n else:\n yield elem\n\n\ndef _gather_files(candidate_paths: List[Union[str, pathlib.Path]]) -> List[pathlib.Path]:\n \"\"\"\n Gather the list of MRB files from a heterogenous candidate path list\n that may contain directories and filepaths.\n Directories are crawled recursively - this might fail at extreme\n depths due to Python recursion limit.\n \"\"\"\n suffixes = ['.MRB', '.mrb']\n # wrap into list if candidate path is only one object\n if isinstance(candidate_paths, (str, pathlib.Path)):\n candidate_paths = [candidate_paths]\n\n hdf5_filepaths = []\n for c_path in candidate_paths:\n if not isinstance(c_path, pathlib.Path):\n c_path = pathlib.Path(c_path)\n\n if c_path.is_file():\n if c_path.suffix in suffixes:\n hdf5_filepaths.append(c_path.resolve())\n elif c_path.is_dir():\n hdf5_filepaths.append(\n _gather_files([p for p in c_path.iterdir()])\n )\n return list(_flatten(hdf5_filepaths))\n\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description='Transduce 3DSlicer-produced MRB files into HDF5 DNN training data.'\n )\n parser.add_argument(\n '--source', nargs='+', type=str, required=True,\n help=('MRB source files. (Sub-) Directories are crawled recursively '\n 'and files are added automatically based on suffix matching.')\n )\n parser.add_argument(\n '--target_dir', type=str, required=True,\n help='Target directory where the produced HDF5 files are stored.'\n )\n parser.add_argument(\n '--force_write', action='store_true', default=False,\n help=('Set to overwrite preexisting files. Otherwise FileExistsError is '\n 'thrown on overwrite attempt.')\n )\n args = parser.parse_args()\n\n target_dir = pathlib.Path(args.target_dir)\n source_paths = _gather_files(args.source)\n\n if not target_dir.is_dir():\n assert not target_dir.is_file(), f'Target path < {target_dir.resolve()} > is a file!'\n target_dir.mkdir(parents=True)\n \n exporter = HDF5Exporter(force_write=args.force_write)\n \n for source_file in source_paths:\n prefix = ''\n suffix = '.hdf5'\n hdf_fname = ''.join((prefix, source_file.stem, suffix))\n\n mrbfile = MRBFile(source_file)\n\n exporter.store(\n save_path=target_dir / hdf_fname,\n tagged_raw_data=mrbfile.read_raws(),\n tagged_label_data=mrbfile.read_segmentations()\n )\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"stebix/mrbreader","sub_path":"transduce.py","file_name":"transduce.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"80"} +{"seq_id":"13870199089","text":"# Import the necessary libraries\r\nfrom PIL import Image\r\nfrom numpy import asarray\r\nimport numpy as np\r\n\r\n\r\n# Print iterations progress\r\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\r\n \"\"\"\r\n Call in a loop to create terminal progress bar\r\n @params:\r\n iteration - Required : current iteration (Int)\r\n total - Required : total iterations (Int)\r\n prefix - Optional : prefix string (Str)\r\n suffix - Optional : suffix string (Str)\r\n decimals - Optional : positive number of decimals in percent complete (Int)\r\n length - Optional : character length of bar (Int)\r\n fill - Optional : bar fill character (Str)\r\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\r\n \"\"\"\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()\r\n\r\n# load the image and convert into\r\n# numpy array\r\nimg = Image.open('../images/1.jpg')\r\na = asarray(img)\r\n\r\n\r\n# load the image and convert into \r\n# numpy array\r\nimage = Image.open('../images/1.jpg')\r\nb = asarray(image)\r\n\r\n# # Debugging\r\n# print(a.shape)\r\n# print(b.shape)\r\n# print(len(a))\r\n\r\nc = [[[0]*a.shape[2]]*a.shape[1]]*a.shape[0]\r\nc = np.array(c)\r\n# print(c.shape)\r\n\r\nfor i in range(len(a)):\r\n\tprintProgressBar(i , len(a)-1)\r\n\tfor j in range(len(a[0])):\r\n\t\ttemp = []\r\n\t\tfor k in range(len(a[0][0])):\r\n\t\t\tif a[i][j][k] == b[i][j][k]:\r\n\t\t\t\ttemp.append(a[i][j][k])\r\n\t\t\telse:\r\n\t\t\t\ttemp.append(0)\t\t\r\n\t\tc[i][j] = (np.array(temp).astype(np.uint8))\r\n\r\n# print(c.shape)\r\n\r\n# Below is the way of creating Pillow \r\n# image from our numpyarray\r\npilImage = Image.fromarray(c)\r\npilImage = pilImage.resize((500,500))\r\npilImage.show()","repo_name":"CaptainLazarus/8thSemProject","sub_path":"src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"43393664442","text":"# Import Libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\n\n# Task complete text\nprint(\"Import Libraries complete\")\n\n# Ask user for number of GPS ranges\nnum_ranges = int(input(\"Enter number of GPS ranges: \"))\n\n# Initialize empty dataframe to store filtered occurrences\noccurrences = pd.DataFrame(columns=['gbifID', 'decimalLatitude', 'decimalLongitude', 'eventDate', 'file_path'])\n\n# Loop through each GPS range\nfor i in range(num_ranges):\n # Ask user for latitude and longitude range\n lat_range = input(\"Enter latitude range (min,max): \")\n lon_range = input(\"Enter longitude range (min,max): \")\n lat_min, lat_max = [float(x) for x in lat_range.split(',')]\n lon_min, lon_max = [float(x) for x in lon_range.split(',')]\n\n # Ask user for number of CSVs to use and their file paths\n num_csvs = int(input(\"Enter number of CSVs: \"))\n file_names = []\n for j in range(num_csvs):\n csv_num = j + 1\n file_path = input(f\"Enter CSV file path for CSV {csv_num}: \")\n file_name = os.path.basename(file_path)\n file_names.append(file_name)\n print(f\"{file_name} added.\")\n\n # Loop through each CSV file\n for file_name in file_names:\n print(f\"Filtering {file_name} to GPS range\")\n # Get the full file path for the current CSV file\n file_path = os.path.join(os.getcwd(), file_name)\n\n # Read in CSV and extract relevant columns\n data = pd.read_csv(file_path, usecols=['gbifID', 'decimalLatitude', 'decimalLongitude', 'eventDate'], delimiter='\\t')\n\n # Filter occurrences by GPS range\n filtered = data.loc[(data['decimalLatitude'] >= lat_min) & (data['decimalLatitude'] <= lat_max) &\n (data['decimalLongitude'] >= lon_min) & (data['decimalLongitude'] <= lon_max)]\n\n # Parse eventDate column into Year and Month columns\n filtered['Year'] = pd.to_datetime(filtered['eventDate']).dt.year\n filtered['Month'] = pd.to_datetime(filtered['eventDate']).dt.month_name().str.slice(stop=3)\n filtered = filtered.drop(columns=['eventDate'])\n filtered['file_path'] = file_path # add file_path column\n\n # Add filtered occurrences to overall dataframe\n occurrences = occurrences.append(filtered, ignore_index=True)\n \n # Task complete text\n print(\"Complete\")\n\n # Create subsections of GPS ranges that are 0.05 by 0.05\n print(\"Craeting GPS subsections\")\n lat_bins = pd.cut(occurrences['decimalLatitude'], bins=int((lat_max-lat_min)/0.05), precision=2)\n lon_bins = pd.cut(occurrences['decimalLongitude'], bins=int((lon_max-lon_min)/0.05), precision=2)\n\n # Task complete text\n print(\"Complete\")\n\n # Count occurrences in each subsection and filter out empty subsections\n print(\"Counting occurrences in subsections\")\n subsections = occurrences.groupby([lat_bins, lon_bins]).size().reset_index(name='count')\n subsections = subsections.loc[subsections['count'] > 0]\n subsections = subsections.rename(columns={'decimalLatitude': 'lat', 'decimalLongitude': 'lon'})\n\n # Task complete text\n print(\"Complete\")\n\n # Loop through each subsection\n print(\"Creating figures\")\n for i, subsection in subsections.iterrows():\n # Get the occurrences in the current subsection\n lat_range = subsection['lat'].mid\n lon_range = subsection['lon'].mid\n filtered = occurrences.loc[(lat_bins == subsection['lat']) & (lon_bins == subsection['lon'])]\n\n # Group occurrences by month and CSV file\n group = filtered.groupby(['Month', 'file_path']).size().reset_index(name='count')\n\n # Filter out months with no occurrences in all CSVs\n months = group['Month'].unique()\n for month in months:\n if (group.loc[group['Month'] == month, 'file_path'].nunique() < len(file_names)):\n group = group.loc[group['Month'] != month]\n\n # Plot bar graph if there are months with occurrences from all CSVs\n if len(group) > 0:\n fig, ax = plt.subplots(figsize=(10, 6))\n for file_path, file_group in group.groupby('file_path'):\n ax.bar(file_group['Month'], file_group['count'], label=os.path.basename(file_path))\n ax.set_xlabel('Month')\n ax.set_ylabel('Number of Occurrences')\n ax.set_title(f\"GPS Range: {lat_range:.2f},{lon_range:.2f}\")\n plt.xticks(rotation=45)\n ax.legend()\n plt.tight_layout()\n plt.show()\n \n# Script complete text\nprint(\"Script finished\")","repo_name":"Bozzmer/401FP","sub_path":"401FP.py","file_name":"401FP.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"32170312043","text":"\n\na = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\n\ndef MaxSumBrute(array):\n \"\"\" Brute force method for finding largest sum for contiguous subarrays of positive and negative number array of size n\n\n @param: Array array: target array\n @rparam: int maxSum: max sum of contiguous subarrays\n \"\"\"\n # set default max sum\n # true because array consists of both negative and positive numbers\n maxSum = 0\n for i in xrange(0, len(array)):\n startPosition = i\n endPosition = startPosition + 1\n while(endPosition <= len(array)):\n currentSubarraySum = sum(array[startPosition: endPosition])\n if(currentSubarraySum > maxSum):\n maxSum = currentSubarraySum\n endPosition += 1\n return maxSum\n\n\nprint(MaxSumBrute(a))\n","repo_name":"tt6746690/Courses","sub_path":"CSC236/assignments/a2/maxSumBrute.py","file_name":"maxSumBrute.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"80"} +{"seq_id":"28274667397","text":"class Node:\n def __init__(self, id, x, y, initEnergy, energyTransfer, energyReceive,\n energyFreeSpace, energyMultiPath, energyAgg, isDead, numPkts, dataPktSize):\n self.id = id\n self.x = x,\n self.y = y\n self.initEnergy = initEnergy\n self.energyTransfer = energyTransfer\n self.energyReceive = energyReceive\n self.energyFreeSpace = energyFreeSpace\n self.energyMultiPath = energyMultiPath\n self.energyAgg = energyAgg\n self.isDead = isDead\n self.numPkts = numPkts\n self.dataPktSize = dataPktSize\n","repo_name":"saialekhhya/cn-assignment","sub_path":"Nodes.py","file_name":"Nodes.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"24739681995","text":"import threading\nimport time\n\n\nclass MyThread(threading.Thread):\n def __init__(self, name, function, pos, items, users, tags, group_indices, group_algos, num_recall, tag_set):\n threading.Thread.__init__(self)\n self.name = name\n self.function = function\n self.results = []\n self.pos = pos\n self.items = items\n self.users = users\n self.tags = tags\n self.group_indices = group_indices\n self.group_algos = group_algos\n self.num_recall = num_recall\n self.tag_set = tag_set\n\n def init(self):\n threading.Thread.__init__(self)\n\n def run(self):\n self.results = self.function(self.pos, self.items, self.users, self.tags, self.group_indices, self.group_algos,\n self.num_recall, self.tag_set)\n","repo_name":"bytecamp2021-search-C/algorithm","sub_path":"src/bcthread.py","file_name":"bcthread.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"41133601946","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 23 17:21:33 2021\n\n@author: victo\n\"\"\"\n\nimport numpy as np\nimport data_importing as data\nimport my_stat_tools as stt\nimport numexpr as ne\n\ndef rbf_kernel_matrix(X,gamma=0.01,var=5.0):\n\n X_norm = np.einsum('ij,ij->i',X_train_normal,X_train_normal)\n K = ne.evaluate('v * exp(-g * (A + B - 2 * C))', {\n 'A' : X_norm[:,None],\n 'B' : X_norm[None,:],\n 'C' : np.dot(X_train_normal, X_train_normal.T),\n 'g' : gamma,\n 'v' : var\n })\n return K\n\ndef fit_LSSVR(X,y,C):\n # will return optimal alphas and b\n # X is in R(n_sample,n_feature)\n \n n_samples,n_features=X.shape\n ker_matrix=rbf_kernel_matrix(X)\n \n ## calculate K+(1/C)*identity\n ones=np.ones((1,X_train_normal.shape[0]))\n ones_a=np.ones((1+X_train_normal.shape[0],1))\n K=ker_matrix+(1/C)*np.identity(ker_matrix.shape[0])\n K_=np.append(ones_a,np.vstack([ones,K]),1)\n \n \n y_i=[0]+list(y)\n y_i=np.array(y_i)\n \n coeffs=np.linalg.inv(K_)@y_i\n return coeffs[0],coeffs[1:]","repo_name":"bcgold19/smart-beta-support-vector","sub_path":"LSSVR.py","file_name":"LSSVR.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"919592212","text":"import numpy as np\n\nclass Node:\n nodeid = 0\n \"Node in a tree\" \n def __init__(self, parent):\n self.parent = parent\n self.depth = (0 if self.is_root() else parent.depth + 1)\n self.children = []\n self.nodeid = Node.nodeid\n Node.nodeid += 1\n \n def is_root(self):\n return self.parent == self\n \n def set_nodeid(value=0):\n Node.nodeid = value\n\nclass ActionNode(Node):\n \"Type of node where a decision must be taken\"\n def __init__(self, parent, player):\n super().__init__(parent)\n self.player = player\n self.decision = np.nan\n self.value = (np.nan, np.nan)\n \n def reproduce_actions(self, lam, minactions, maxactions):\n nchildren = np.clip(np.random.poisson(lam), minactions, maxactions)\n next_player = (self.player + 1) % 2\n self.children = [ActionNode(self, next_player) for _ in range(nchildren)]\n \n def reproduce_payoffs(self, lam, minpayoffs, maxpayoffs, ties):\n nchild = np.clip(np.random.poisson(lam), minpayoffs, maxpayoffs) # at least one\n opts = [-1, 0, 1] if ties else [-1, 1]\n payoffs0 = [np.random.choice(opts) for _ in range(nchild)]\n self.children = [PayOffNode(self, (-v, v)) for v in payoffs0]\n \n def __repr__(self):\n decision_str = \"\" if np.isnan(self.decision) else \"selected: {}\".format(self.decision)\n value_str = \"\" if np.isnan(self.value[0]) else \"value: ({}, {})\".format(self.value[0], self.value[1])\n return \"Action(id: {}, player: {}, {}, {})\".\\\n format(self.nodeid, self.player, decision_str, value_str)\n \nclass GameRootNode(ActionNode):\n \"Regular action node but self refential\"\n def __init__(self):\n parent = self\n player = 0\n super().__init__(parent, player) \n \nclass PayOffNode(Node):\n \"A node with a payoff value\"\n def __init__(self, parent, payoff):\n assert len(payoff) == 2\n super().__init__(parent)\n self.payoff = payoff\n parent.children = [self]\n self.selected = np.nan\n \n def __repr__(self):\n return \"PayOff(id: {}, value: ({:d}, {:d}))\".format(self.nodeid, *self.payoff)\n\nclass TwoPlayerGame:\n def __init__(self, \n root=GameRootNode(), \n lam=2., maxdepth=10,\n generate=True,\n minactions=0, \n maxactions=99, \n minpayoffs=2,\n maxpayoffs=99,\n ties=False):\n Node.set_nodeid(1)\n self.root = root\n self.lam = lam\n self.minactions=minactions\n self.maxactions=maxactions\n self.minpayoffs=minpayoffs\n self.maxpayoffs=maxpayoffs\n self.maxdepth = maxdepth\n self.ties = ties\n self.solved = False\n self.num_nodes = 1\n \n if generate:\n self.generate()\n \n def generate(self): \n \"Randomly reproduces nodes until maxdepth or no descendents found\"\n # while a branch has a non-payoff node\n nonpayoff = [self.root]\n while len(nonpayoff) > 0:\n # take one nonterminal node and reproduce\n node = nonpayoff.pop()\n if node.depth < self.maxdepth - 1:\n node.reproduce_actions(self.lam, minactions=self.minactions, maxactions=self.maxactions)\n \n # max_depth reached or no children then attach payoff nodes, else add to nonpayoff\n if len(node.children) > 0: \n for child in node.children:\n nonpayoff.append(child)\n self.num_nodes += 1\n else:\n # spawn a generation of payoffs\n node.reproduce_payoffs(self.lam, self.minpayoffs, self.maxpayoffs, self.ties)\n self.num_nodes += len(node.children)\n \n def branch(self, node):\n \"returns a pointer to the same fields of the tree but different root\"\n return TwoPlayerGame(root=node, generate=False)\n \n def solve(self):\n \"uses backward induction (dynamic programming) to find a Nash equlibrium\"\n node = self.root \n if isinstance(node, ActionNode):\n player = node.player\n children = node.children\n child_value = []\n for i, child in enumerate(children):\n if isinstance(child, PayOffNode):\n child_value.append(child.payoff)\n else:\n child_subgame = self.branch(child)\n child_subgame.solve()\n subgame_value = child_subgame.root.value\n child_value.append(subgame_value)\n \n node.decision = np.argmax([x[player] for x in child_value])\n node.value = child_value[node.decision]\n self.solved = True\n \n def print_solution_path(self):\n \"Prints selected nodes only\"\n assert self.solved\n node = self.root\n s = (' ' * node.depth) + str(node) + '\\n'\n while not isinstance(node, PayOffNode):\n node = node.children[node.decision]\n s += (':-' * node.depth) + str(node) + '\\n'\n if node.payoff[0] > node.payoff[1]:\n s += \"Player 0 wins\"\n elif node.payoff[1] > node.payoff[0]:\n s += \"Player 1 wins\"\n else:\n s += \"Player 0 and 1 draw\"\n print(s)\n\n def __repr__(self):\n return \"Tree with {:d} nodes\".format(self.num_nodes) \n\n def __str__(self):\n \"Depth-First printing\"\n node = self.root\n strout = (':-' * node.depth) + str(node) + '\\n'\n if isinstance(node, PayOffNode):\n return strout\n else:\n children = node.children\n for child in children:\n strout += self.branch(child).__str__()\n return strout\n \n def valuemap(self, playerid):\n # we do BFS for saving in a dict all values\n assert self.solved\n valuemap = dict()\n curr=self.root\n valuemap[curr.nodeid]=curr.value[playerid]\n to_visit=curr.children.copy()\n while len(to_visit) > 0:\n curr = to_visit.pop()\n for x in curr.children:\n to_visit.append(x)\n if isinstance(curr, PayOffNode):\n valuemap[curr.nodeid]=curr.payoff[playerid]\n else:\n valuemap[curr.nodeid]=curr.value[playerid] \n \n \n return valuemap","repo_name":"mauriciogtec/RLPlayGround","sub_path":"randomgames.py","file_name":"randomgames.py","file_ext":"py","file_size_in_byte":6518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"73485454339","text":"from typing import Optional\r\nfrom dataclasses import dataclass\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.nn import CrossEntropyLoss\r\nfrom transformers.models.bert.modeling_bert import (\r\n BertPreTrainedModel,\r\n BertModel,\r\n SequenceClassifierOutput\r\n)\r\n\r\n\r\n@dataclass\r\nclass REOutput(SequenceClassifierOutput):\r\n loss_mi: Optional[torch.FloatTensor] = None\r\n rel_hidden_states: Optional[torch.FloatTensor] = None\r\n supcon_hidden_states: Optional[torch.FloatTensor] = None\r\n\r\n\r\nclass BertForRelationExtraction(BertPreTrainedModel):\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.num_labels = config.num_labels\r\n self.config = config\r\n\r\n self.bert = BertModel(config)\r\n\r\n self.classifier_projection = nn.Sequential(\r\n nn.Dropout(config.classifier_dropout),\r\n nn.Linear(config.hidden_size * 2, config.hidden_size, bias=True),\r\n nn.GELU(),\r\n nn.LayerNorm([config.hidden_size]),\r\n )\r\n # self.linear_transform = nn.Linear(768 * 2, 768, bias=True)\r\n\r\n # self.dropout = nn.Dropout(drop)\r\n # self.linear = nn.Linear(768 * 2, 768, bias=True)\r\n # self.layer_normalization = nn.LayerNorm([768])\r\n self.re_classifier = nn.Linear(config.hidden_size, config.num_labels, bias=False)\r\n\r\n self.supcon_head = nn.Sequential(\r\n nn.Linear(config.hidden_size * 2, config.hidden_size),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(config.hidden_size, 64),\r\n )\r\n\r\n def forward(\r\n self,\r\n input_ids: Optional[torch.Tensor] = None,\r\n attention_mask: Optional[torch.Tensor] = None,\r\n inputs_embeds: Optional[torch.Tensor] = None,\r\n labels: Optional[torch.Tensor] = None,\r\n subject_start_pos: Optional[torch.Tensor] = None,\r\n object_start_pos: Optional[torch.Tensor] = None,\r\n fine_class: bool = False,\r\n **kwargs,\r\n ):\r\n if attention_mask is None:\r\n attention_mask = input_ids != 0\r\n mi = False\r\n # if len(input_ids.size()) == 3:\r\n # mi = True\r\n # input_ids = input_ids.view((-1, input_ids.size(-1)))\r\n # attention_mask = attention_mask.view((-1, attention_mask.size(-1)))\r\n outputs = self.bert(\r\n input_ids,\r\n inputs_embeds=inputs_embeds,\r\n attention_mask=attention_mask,\r\n )\r\n\r\n last_hidden_states = outputs[0]\r\n\r\n idx = torch.arange(last_hidden_states.size(0)).to(last_hidden_states.device)\r\n ss_emb = last_hidden_states[idx, subject_start_pos]\r\n os_emb = last_hidden_states[idx, object_start_pos]\r\n # sent_emb = ((last_hidden_states * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1))\r\n raw_rel_hidden_states = torch.cat([ss_emb, os_emb], dim=-1)\r\n\r\n rel_hidden_states = self.classifier_projection(raw_rel_hidden_states)\r\n\r\n # if fine_labels is not None:\r\n # fine_rel_hidden_states = self.fine_linear_transform(raw_rel_hidden_states)\r\n\r\n # rel_hidden_states = self.dropout(rel_hidden_states)\r\n # rel_hidden_states = self.linear(rel_hidden_states)\r\n # rel_hidden_states = F.gelu(rel_hidden_states)\r\n # rel_hidden_states = self.layer_normalization(rel_hidden_states)\r\n # rel_hidden_states = self.dropout(rel_hidden_states)\r\n\r\n supcon_hidden_states = self.supcon_head(raw_rel_hidden_states)\r\n\r\n\r\n loss = None\r\n # logits = None\r\n if fine_class:\r\n logits = self.fine_classifier(rel_hidden_states)\r\n else:\r\n logits = self.re_classifier(rel_hidden_states)\r\n\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss()\r\n loss = loss_fct(logits, labels)\r\n\r\n return REOutput(\r\n loss=loss,\r\n logits=logits,\r\n hidden_states=rel_hidden_states,\r\n supcon_hidden_states=supcon_hidden_states,\r\n )\r\n","repo_name":"Yifan-Song793/InfoCL","sub_path":"model/BertForRelationExtraction.py","file_name":"BertForRelationExtraction.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"80"} +{"seq_id":"40052843249","text":"init=[int(x) for x in input().split()]\nnumOfStudents=init[0]\nstudents=[]\nfor i in range(numOfStudents):\n students.append(input())\nquestions=init[1]\nscore=[int(x) for x in input().split()]\nsum=0\nfor i in range(questions):\n countA=0\n countB=0\n countC=0\n countD=0\n countE=0\n for j in range(numOfStudents):\n if students[j][i]=='A':\n countA+=1\n elif students[j][i]=='B':\n countB+=1\n elif students[j][i]=='C':\n countC+=1\n elif students[j][i]=='D':\n countD+=1\n elif students[j][i]=='E':\n countE+=1\n sum+=max(countA,countB,countC,countD,countE)*score[i]\nprint(sum)","repo_name":"AdamZhouSE/pythonHomework","sub_path":"Code/CodeRecords/2834/60614/292116.py","file_name":"292116.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"32588445548","text":"\n\nfrom typing import Any, List\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlalchemy.orm import Session\n\nfrom app import schemas, models, crud\nfrom app.api import deps\n\nrouter = APIRouter()\n\n\n@router.get( \"\", response_model= List[ schemas.Organization ] )\ndef read_organizations(\n skip: int = 0,\n limit: int = 100,\n db: Session = Depends(deps.get_db),\n) -> Any:\n \"\"\"\n Retrieve organizations.\n \"\"\"\n organizations = crud.organization.get_multi( db, skip=skip, limit=limit )\n return jsonable_encoder( organizations )\n\n\n@router.post( \"\", response_model= schemas.Organization )\ndef create_organization(\n organization_in: schemas.OrganizationCreate,\n db: Session = Depends(deps.get_db),\n) -> Any:\n \"\"\"\n Create new organization.\n \"\"\"\n organization = crud.organization.if_organization( db, obj_in=organization_in )\n if ( organization ):\n raise HTTPException(\n status_code=400, detail=\"organization already exists in the system\"\n )\n organization = crud.organization.create(db, obj_in=organization_in)\n return jsonable_encoder( organization )\n\n\n@router.patch( \"/{id}\", response_model= schemas.Organization )\ndef update_organization(\n id: int,\n organization_in: schemas.OrganizationUpdate,\n db: Session = Depends(deps.get_db),\n) -> Any:\n \"\"\"\n Update an organization.\n \"\"\"\n organization = crud.organization.get( db, id= id)\n if not organization:\n raise HTTPException(\n status_code=404,\n detail=\"Organization not found\",\n )\n organization = crud.organization.update( db, db_obj= organization, obj_in= organization_in)\n return jsonable_encoder( organization )\n\n\n@router.get( \"/{id}\", response_model= schemas.Organization )\ndef read_organization_by_id(\n id: int,\n db: Session = Depends(deps.get_db)\n) -> Any:\n \"\"\"\n Get a specific organization by id.\n \"\"\"\n organization = crud.organization.get( db, id=id)\n if not organization:\n raise HTTPException(\n status_code=404, detail=\"Organization not found\"\n )\n return jsonable_encoder( organization )\n\n\n@router.delete( \"/{id}\", response_model= schemas.Organization )\ndef delete_organization(\n id: int,\n db: Session = Depends(deps.get_db)\n) -> Any:\n \"\"\"\n Delete a specific organization by id.\n \"\"\"\n organization = crud.organization.get( db, id=id)\n if not organization:\n raise HTTPException(\n status_code=404, detail=\"Organization not found\"\n )\n organization = crud.organization.remove( db, id=id)\n return jsonable_encoder( organization )","repo_name":"juanMaAV92/fastApiDemo","sub_path":"app/api/api_v1/endpoints/organizations.py","file_name":"organizations.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"34973967321","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0001_initial'),\n ('chat', '0003_auto_20150407_2250'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='chatroom',\n name='group',\n field=models.ForeignKey(to='auth.Group', default=0),\n preserve_default=False,\n ),\n ]\n","repo_name":"michalmatlega/mudsys","sub_path":"chat/migrations/0004_chatroom_group.py","file_name":"0004_chatroom_group.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"26278983478","text":"#!/usr/bin/python\n\n\"\"\"\n ____ _ ___\n| _ \\ __ _ _ __| |_ / _ \\ _ __ ___\n| |_) / _` | '__| __| | | | | '_ \\ / _ \\\n| __/ (_| | | | |_ | |_| | | | | __/\n|_| \\__,_|_| \\__| \\___/|_| |_|\\___|\n\n\"\"\"\n\ninput = open(\"input.txt\")\nincrease_count = 0\n\nprevious_value = int(input.readline().rstrip())\nline = input.readline()\n\nwhile len(line) > 0:\n current_value = int(line.rstrip())\n\n if current_value > previous_value:\n increase_count += 1\n\n previous_value = current_value\n line = input.readline()\n\ninput.close()\n\nprint(increase_count)\n\n\"\"\"\n ____ _ _____ \n| _ \\ __ _ _ __| |_ |_ _|_ _____ \n| |_) / _` | '__| __| | | \\ \\ /\\ / / _ \\ \n| __/ (_| | | | |_ | | \\ V V / (_) |\n|_| \\__,_|_| \\__| |_| \\_/\\_/ \\___/ \n\n\"\"\"\n\nsliding_input = open(\"input.txt\")\nsum_increase_count = 0\n\nvalue_one = int(sliding_input.readline().rstrip())\nvalue_two = int(sliding_input.readline().rstrip())\nvalue_three = int(sliding_input.readline().rstrip())\n\nnext_line = sliding_input.readline()\n\nwhile len(next_line) > 0:\n next_value = int(next_line.rstrip())\n\n prior_sliding_window = value_one + value_two + value_three\n current_sliding_window = value_two + value_three + next_value\n\n if current_sliding_window > prior_sliding_window:\n sum_increase_count += 1\n\n value_one = value_two\n value_two = value_three\n value_three = next_value\n next_line = sliding_input.readline()\n\nsliding_input.close()\n\nprint(sum_increase_count)","repo_name":"80columns/advent-2021","sub_path":"1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"70124414660","text":"import os, pdb, shutil, pickle, sys, librosa\nfrom tdqm import tdqm\nimport numpy as np\n\n\"\"\"\n Counts all audio files in a 1-tier directory\n collects audio durations and produces the dataset stats for:\n List of files that couldn't be computes\n List of durations\n Number of files\n Average duration\n Median duration\n\n Requires src and dst directories as arguments\n\"\"\"\n\nsrc_dir = sys.argv[1]\ndst_dir = sys.argv[2]\ndir_dict = {}\n\n_, dirs, _ = next(os.walk(src_dir))\n\nlength_list = []\nerror_list = []\n\nfor i, dir in tdqm(enumerate(dirs)):\n _,_, files = next(os.walk(dir))\n num_files = len(files)\n dir_dict[dir] = num_files\n for j, f in enumerate(files):\n if f.endswith('.m4a') and not f.startswith('.'):\n # if num == 432 or num == 821:\n # pdb.set_trace()\n try:\n wav, sr = librosa.load(os.path.join(src_dir, dir, f), sr=None)\n wav_dur = len(wav) / sr\n print(f, wav_dur)\n length_list.append(wav_dur)\n except:\n error_list.append(os.path.join(src_dir, dir, f))\n\nlength_arr = np.asarray(length_list)\n\nwith open(os.path.join(dst_dir, 'dataset_durations.pkl'), 'wb') as handle:\n pickle.dump({'error_list':error_list, 'length_list':length_list, 'num_files':len(length_arr), 'average_dur':np.average(length_arr), 'mediam_dur':np.median(length_arr)}, handle)\n\nprint(f'Average is {np.average(length_arr)}')\nsorted_dict = sorted(dir_dict.items(), key=lambda item: item[1])\nprint('Play with dictionary \\'sorted_dict\\' to get an idea of the distribution of directory sizes (directories names are 8 digit long)'.upper())\n","repo_name":"Trebolium/my_utils","sub_path":"count_dir_files.py","file_name":"count_dir_files.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"30638276340","text":"from datetime import datetime\n\nimport pytz\n\nBINANCE_API_KEY = \"BINANCE_API_KEY\"\nBINANCE_API_SECRET = \"BINANCE_API_SECRET\"\n\nBINANCE_API_KEY_TEST = \"BINANCE_API_KEY_TEST\"\nBINANCE_API_SECRET_TEST = \"BINANCE_API_SECRET_TEST\"\n\nBINANCE_SPOT_TRADING = \"SPOT\"\nBINANCE_MARGIN_TRADING = \"MARGIN\"\n\n\nCANDLE_SIZES_MAPPER = {\n '1m': '1T',\n '5m': '5T',\n '10m': '10T',\n '15m': '15T',\n '30m': '30T',\n '1h': '1H',\n '1d': '1D',\n}\n\nCANDLE_SIZES_ORDERED = [\n '1m',\n '5m',\n '10m',\n '15m',\n '30m',\n '1h',\n '1d',\n]\n\n\nCOUNT_MAPPER = {\n '5m': {'5m': 1},\n '10m': {'5m': 2, '10m': 1},\n '15m': {'5m': 3, '15m': 1},\n '30m': {'5m': 6, '10m': 3, '15m': 2, '30m': 1},\n '1h': {'5m': 12, '10m': 6, '15m': 4, '30m': 2, '1h': 1},\n '1d': {'5m': 288},\n}\n\n\nCANDLE_SIZE_TIMEDELTA = {\n '1m': {\"seconds\": 59.9999},\n '5m': {\"minutes\": 4.9999},\n '10m': {\"minutes\": 9.9999},\n '15m': {\"minutes\": 14.9999},\n '30m': {\"minutes\": 29.9999},\n '1h': {\"minutes\": 59.9999},\n '1d': {\"hours\": 23.9999},\n}\n\n\nCOLUMNS_AGGREGATION = {\n \"close_time\": 'last',\n \"open\": \"first\",\n \"high\": \"max\",\n \"low\": \"min\",\n \"close\": \"last\",\n \"volume\": 'sum',\n \"quote_volume\": \"sum\",\n \"trades\": \"sum\",\n \"taker_buy_asset_volume\": \"sum\",\n \"taker_buy_quote_volume\": \"sum\"\n}\n\nCOLUMNS_AGGREGATION_WEBSOCKET = {\n \"close_time\": 'last',\n \"open\": \"last\",\n \"high\": \"last\",\n \"low\": \"last\",\n \"close\": \"last\",\n \"volume\": 'last',\n \"quote_volume\": \"last\",\n \"trades\": \"last\",\n \"taker_buy_asset_volume\": \"last\",\n \"taker_buy_quote_volume\": \"last\"\n}\n\nNAME_MAPPER = {\n \"t\": \"open_time\",\n \"T\": \"close_time\",\n \"o\": \"open\",\n \"c\": \"close\",\n \"h\": \"high\",\n \"l\": \"low\",\n \"v\": \"volume\",\n \"n\": \"trades\",\n \"q\": \"quote_volume\",\n \"V\": \"taker_buy_asset_volume\",\n \"Q\": \"taker_buy_quote_volume\",\n}\n\nFUNCTION_MAPPER = {\n \"t\": lambda x: datetime.fromtimestamp(x / 1000).astimezone(pytz.utc),\n \"T\": lambda x: datetime.fromtimestamp(x / 1000).astimezone(pytz.utc),\n \"o\": lambda x: float(x),\n \"c\": lambda x: float(x),\n \"h\": lambda x: float(x),\n \"l\": lambda x: float(x),\n \"v\": lambda x: float(x),\n \"n\": lambda x: float(x),\n \"q\": lambda x: float(x),\n \"V\": lambda x: float(x),\n \"Q\": lambda x: float(x),\n}\n\n\nBINANCE_KEY = {\n \"open_time\": lambda x: datetime.fromtimestamp(x[0] / 1000).astimezone(pytz.timezone('UTC')),\n \"close_time\": lambda x: datetime.fromtimestamp(x[6] / 1000).astimezone(pytz.timezone('UTC')),\n \"open\": lambda x: float(x[1]),\n \"high\": lambda x: float(x[2]),\n \"low\": lambda x: float(x[3]),\n \"close\": lambda x: float(x[4]),\n \"volume\": lambda x: float(x[5]),\n \"quote_volume\": lambda x: float(x[7]),\n \"trades\": lambda x: int(x[8]),\n \"taker_buy_asset_volume\": lambda x: float(x[9]),\n \"taker_buy_quote_volume\": lambda x: float(x[10]),\n}\n","repo_name":"diogomatoschaves/MyCryptoBot","sub_path":"shared/exchanges/binance/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"80"} +{"seq_id":"70246022339","text":"import torch\n\n\nclass DataSampler(object):\n \"\"\"Samples training points.\"\"\"\n def __init__(self, nr_of_points=1000, p_surface=0.3, p_offset=0.3, p_grid=0.4):\n assert p_surface + p_offset + p_grid == 1\n self.nr_of_points = nr_of_points\n self.p_surface = p_surface\n self.p_offset = p_offset\n self.p_grid = p_grid\n\n def __call__(self, data):\n surface_points = int(self.nr_of_points * self.p_surface)\n offset_points = int(self.nr_of_points * self.p_offset)\n grid_points = int(self.nr_of_points * self.p_grid)\n\n perm = torch.randperm(data[\"surface_points\"].shape[0])\n idx = perm[:surface_points]\n surface_samples = data[\"surface_points\"][idx]\n surface_samples_sdf = data[\"surface_sdf\"][idx]\n\n perm = torch.randperm(data[\"offset_points\"].shape[0])\n idx = perm[:offset_points]\n offset_samples = data[\"offset_points\"][idx]\n offset_samples_sdf = data[\"offset_sdf\"][idx]\n\n perm = torch.randperm(data[\"grid_points\"].shape[0])\n idx = perm[:grid_points]\n grid_samples = data[\"grid_points\"][idx]\n grid_samples_sdf = data[\"grid_sdf\"][idx]\n\n coords = torch.cat((surface_samples, offset_samples, grid_samples), dim=0).float()\n sdf = torch.cat((surface_samples_sdf, offset_samples_sdf, grid_samples_sdf), dim=0).float()\n\n sampled_data = {\n \"surface_points\": data[\"surface_points\"],\n \"surface_normals\": data[\"surface_normals\"],\n \"sampled_points\": coords,\n \"sampled_sdf\": sdf,\n \"centroid\": data.get(\"centroid\"),\n }\n\n return sampled_data\n","repo_name":"LucasKre/teeth_learning","sub_path":"dataset/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"73929267457","text":"import numpy as np\t\t#import numpy lib\n\ndef main():\t\t\t\n\ti = 0\t\t\t\t#declare integer\n\tn = 10\t\t\t\t#another integer\n\tx = 119.0\t\t\t#this is a float \".\"\n\t\n\t#numpy can be used to declare arrays\n\t\t\n\ty = np.zeros(n,dtype=float)\t\t#10 zeros as floats\n\t\n\t#use for loop to iterate with a variable\n\t\n\tfor i in range(n):\t\t#i in range [0,n01]\n\t\ty[i] = 2.0 * float(i) + 1.\t#set y = 2i+1 as float\n\t\t\n\t#iterate through a variable\n\t\t\n\tfor y_element in y:\n\t\tprint(y_element)\n\n#execute the function\n\t\nif __name__ == \"__main__\":\n\tmain()","repo_name":"ddouty119/astr-119-hw-1","sub_path":"variables_and_loops.py","file_name":"variables_and_loops.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"30670247077","text":"from random import randint\n\n\nclass CellData:\n data = []\n rows = 0\n cols = 0\n\n def __init__(self, rows: int, cols: int):\n self.rows = rows\n self.cols = cols\n self.clear()\n\n def clear(self):\n data2 = []\n for row in range(0, self.rows):\n data2.append([False for col in range(0, self.cols)])\n self.data = data2\n\n def create(self):\n data2 = []\n for row in range(0, self.rows):\n data2.append([(randint(0, 9) <= 2)\n for col in range(0, self.cols)])\n self.data = data2\n\n def count_livingcells_around(self, row: int, col: int):\n # 周囲の生存セルを数える\n cnt = 0\n tbl = [(-1, -1), (0, -1), (1, -1), (1, 0),\n (1, 1), (0, 1), (-1, 1), (-1, 0)]\n for t in tbl:\n check_col, check_row = [col + t[0], row + t[1]]\n if 0 <= check_col < self.cols and 0 <= check_row < self.rows:\n if self.data[check_row][check_col]:\n cnt += 1\n return cnt\n\n def is_next_live(self, cell: bool, count: int):\n if count == 3:\n return True\n if cell:\n if 2 <= count <= 3:\n return True\n return False\n return cell\n\n def check(self, row, col):\n count = self.count_livingcells_around(row=row, col=col)\n return self.is_next_live(self.data[row][col], count)\n\n def next_turn(self):\n data2 = []\n for row in range(0, self.rows):\n data2.append([self.check(row, col) for col in range(0, self.cols)])\n self.data = data2\n","repo_name":"h-ueno2/lifegame_py","sub_path":"lifegame/cell_data.py","file_name":"cell_data.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"25604976986","text":"import random\nclass Person:\n def __init__(self, name):\n self.name = name\n self.myday = MyDay()\n def morning(self):\n self.myday.event1()\n def breakfast(self):\n self.myday.event2()\n def way_to_work(self):\n self.myday.event3()\n def work(self):\n self.myday.event4()\n def evening(self):\n self.myday.event5()\n def mood(self):\n global x\n global y\n if self.myday.mood in range(-23, -12):\n x = \"жахливий\"\n elif self.myday.mood in range(-12, -5):\n x = \"поганий\"\n elif self.myday.mood in range(-5, 5):\n x = \"нормальний\"\n elif self.myday.mood in range(5, 12):\n x = \"гарний\"\n elif self.myday.mood in range(12, 23):\n x = \"чудовий\"\n if self.myday.health in range(-12, -4):\n y = \"ненайкраще\"\n elif self.myday.health in range(-4, 4):\n y = \"нормальне\"\n elif self.myday.health in range(4, 12):\n y = \"добре\"\n print(self.name, \" має зараз \", x, \"настрій та \", y, \" самопочуття\")\n print(self.name, \"наразі має при собі\", self.myday.money, \"грн\")\nclass MyDay:\n def __init__(self):\n self.mood = 0\n self.health = 0\n self.money = 10000\n self.__action1 = [\"Прокинувся після солодкого 8-годинного сну\",\n \"Всю ніч розважався у нічному клубі\", \"Прикинувся в поганому настрої після нічних кошмарів\"]\n self.__action2 = [\"Поснідав вівсянкою зі свіжими ягодами\", \"Похапцем з'їв тост з джемом\",\n \"Запізнювався на роботу та не встиг по'їсти зранку\"]\n self.__action3 = [\"Дорогою на роботу випив смачної кави в Starbucks\",\n \"Запізнився на автобус та змушений був замовляти таксі\",\n \"Поїхав на роботу на свому велосипеді та насолоджувався свіжим повітрям\"]\n self.__action4 = [\"На роботі виконав план на день та отримав похвалу від боса у виді премії\",\n \"Не міг зконцентруватись та робочий день був не продуктивний\",\n \"Босс був злий на тебе через спізнення та виписав штраф\"]\n self.__action5 = [\"Приїхавши до дому, замовив піцу та подивився свій улюблений серіал\",\n \"Був дуже втомлений після роботи тому ліг спати не повечерявши\",\n \"Коли їхав до дому, в метро вкрали гаманець\"]\n def event1(self):\n print(\"День розпочався):\")\n x = random.randint(0, 2)\n if x == 0:\n print(self.__action1[0])\n self.mood = self.mood + 3\n self.health = self.health + 4\n elif x == 1:\n print(self.__action1[1])\n self.mood = self.mood + 4\n self.health = self.health - 3\n else:\n print(self.__action1[2])\n self.mood = self.mood - 4\n self.health = self.health - 2\n def event2(self):\n x = random.randint(0, 2)\n if x == 0:\n print(self.__action2[0])\n self.mood = self.mood + 2\n self.health = self.health + 3\n elif x == 1:\n print(self.__action2[1])\n self.mood = self.mood + 1\n self.health = self.health - 1\n else:\n print(self.__action2[2])\n self.mood = self.mood - 3\n self.health = self.health - 3\n def event3(self):\n x = random.randint(0, 2)\n if x == 0:\n print(self.__action3[0])\n self.mood = self.mood + 4\n self.health = self.health - 1\n self.money = self.money - 100\n print(\"Гроші = \", self.money, \"грн\")\n elif x == 1:\n print(self.__action3[1])\n self.mood = self.mood - 2\n self.money = self.money - 500\n print(\"Гроші = \", self.money, \"грн\")\n else:\n print(self.__action3[2])\n self.mood = self.mood + 2\n self.health = self.health + 4\n def event4(self):\n x = random.randint(0, 2)\n if x == 0:\n print(self.__action4[0])\n self.mood = self.mood + 3\n self.money = self.money + 3000\n print(\"Гроші = \", self.money, \"грн\")\n elif x == 1:\n print(self.__action4[1])\n self.mood = self.mood - 3\n else:\n print(self.__action4[2])\n self.mood = self.mood - 5\n self.money = self.money - 1500\n print(\"Гроші = \", self.money, \"грн\")\n def event5(self):\n x = random.randint(0, 2)\n if x == 0:\n print(self.__action5[0])\n self.mood = self.mood + 3\n self.health = self.health - 1\n self.money = self.money - 200\n print(\"Гроші = \", self.money, \"грн\")\n elif x == 1:\n print(self.__action5[1])\n self.mood = self.mood - 1\n self.health = self.health - 1\n else:\n print(self.__action5[2])\n self.mood = self.mood - 5\n self.money = self.money - 7000\n print(\"Гроші = \", self.money, \"грн\")\n print(\"День закінчився(\")\nDan = Person(\"Даня\")\nDan.mood()\nDan.morning()\nDan.breakfast()\nDan.way_to_work()\nDan.work()\nDan.evening()\nDan.mood()","repo_name":"DanPolishchuk/Uni_tasks","sub_path":"Dan_Polishchuk_1.7_project/Групове завдання.py","file_name":"Групове завдання.py","file_ext":"py","file_size_in_byte":6093,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"22786144818","text":"from turtle import Turtle\n\n\nclass Paddle(Turtle):\n def __init__(self, position: tuple = (0, 0)):\n super().__init__()\n self.shape(\"square\")\n self.shapesize(stretch_wid=5, stretch_len=1)\n self.color(\"white\")\n self.penup()\n self.speed(0)\n self.goto(position)\n\n def move_up(self):\n pos = self.ycor() + 20\n self.setpos(self.xcor(), pos)\n\n def move_down(self):\n pos = self.ycor() - 20\n self.setpos(self.xcor(), pos)\n","repo_name":"Zik-Tech/100-Days-Of-Python","sub_path":"Day-22/paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"80"} +{"seq_id":"26846885428","text":"\"\"\"\nYou are given a doubly linked list which in addition to the next and previous pointers, \nit could have a child pointer, which may or may not point to a separate doubly linked list. \nThese child lists may have one or more children of their own, and so on, \nto produce a multilevel data structure, as shown in the example below.\n\nFlatten the list so that all the nodes appear in a single-level, \ndoubly linked list. You are given the head of the first level of the list.\n\nExample 1:\n\nInput: head = [1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]\nOutput: [1,2,3,7,8,11,12,9,10,4,5,6]\n\nHow multilevel linked list is represented in test case:\n\nWe use the multilevel linked list from Example 1 above:\n\n 1---2---3---4---5---6--NULL\n |\n 7---8---9---10--NULL\n |\n 11--12--NULL\nThe serialization of each level is as follows:\n\n[1,2,3,4,5,6,null]\n[7,8,9,10,null]\n[11,12,null]\nTo serialize all levels together we will add nulls in each level \nto signify no node connects to the upper node of the previous level. The serialization becomes:\n\n[1,2,3,4,5,6,null]\n[null,null,7,8,9,10,null]\n[null,11,12,null]\nMerging the serialization of each level and removing trailing nulls we obtain:\n\n[1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]\n\"\"\"\n\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\n\nclass Solution:\n # 思路:\n # 1. start from the head, move one step each time to the next node\n # 当 p.child == None, p = p.next\n # 2. 当 p 有child节点,则转向child chain,一直遍历到child chain的end,将child chain的tail node\n # 链接回到原本的p.next\n # 通过这样做,我们就将child chain merge back to the main thread\n # 然后返回到p,继续move,当找到了有child节点的node,重复上述步骤,直到终点\n def flatten(self, head: Node) -> Node:\n if not head:\n return head\n \n p = head\n while p:\n if not p.child:\n p = p.next\n else:\n tmp = p.child\n # find the tail of the child chain\n while tmp.next:\n tmp = tmp.next\n # connect tail node to teh p.next\n tmp.next = p.next\n if p.next:\n p.next.prev = tmp\n # connect p.next with child chain, and remove p.child\n # to merge child chain back to the main thread\n p.next = p.child\n p.child.prev = p\n p.child = None\n p = p.next\n \n return head \n\n # 思路:利用stack进行DFS\n def flatten2(self, head: Node) -> Node:\n if not head:\n return head\n \n stk = [head]\n pre = Node(-1)\n while stk:\n root = stk.pop()\n root.prev = pre\n pre.next = root\n pre = root\n\n if root.next:\n stk.append(root.next)\n\n if root.child:\n stk.append(root.child)\n root.child = None\n \n head.prev = None\n return head\n\n # 思路:递归进行DFS\n def flatten3(self, head: Node) -> Node:\n cur = head\n self.dfs(cur)\n return head\n\n def dfs(self, cur: Node):\n prev = cur\n while cur:\n prev = cur\n if not cur.child:\n cur = cur.next\n continue\n # deal with node with child\n tmp = cur.next # save the original cur's next node, will be used, when back track\n cur.next = cur.child\n cur.child.prev = cur\n\n # recurse over the child node\n ret = self.dfs(cur.child) # ret is childtail\n cur.child = None\n \n if tmp:\n ret.next = tmp\n tmp.prev = ret\n cur = tmp\n else:\n cur = ret\n \n return prev","repo_name":"PRKKILLER/Algorithm_Practice","sub_path":"LeetCode/0430-Flatten a Multilevel Doubly Linked List/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"23150881822","text":"#! /usr/bin/env python\n# vim: set fileencoding=utf-8\n\"\"\"Run experiments in parallel.\"\"\"\nimport os\nimport sys\n\nimport convert_experiment as cexp\n\nsys.path.append(os.path.expanduser('~/venvs/34/lib/python3.4/site-packages/'))\n\nif __name__ == '__main__':\n # pylint: disable=C0103\n from multiprocessing import Pool\n NUM_THREADS = 14\n cexp.NUM_THREADS = NUM_THREADS\n pool = Pool(NUM_THREADS)\n kind, n = int(sys.argv[1]), int(sys.argv[2])\n strategies = [cexp.redensify.PivotSelection.Uniform,\n cexp.redensify.PivotSelection.Preferential,\n cexp.redensify.PivotSelection.ByDegree]\n for s in strategies:\n if kind == 0:\n cexp.run_rings_experiment(n*n, n, pivot=s, shared_sign=True,\n rigged=False, one_at_a_time=True,\n n_rep=4*NUM_THREADS, pool=pool)\n if kind == 1:\n cexp.run_rings_experiment(2+2*n, n, pivot=s, shared_sign=True,\n rigged=False, one_at_a_time=True,\n n_rep=4*NUM_THREADS, pool=pool)\n if kind == 2:\n cexp.run_circle_experiment(n, one_at_a_time=True, rigged=False,\n n_rep=4*NUM_THREADS, pivot=s, pool=pool)\n if kind == 3:\n params = [(15, 5), (6, 30)][n]\n cexp.run_planted_experiment(params[0], params[1],\n one_at_a_time=True, pool=pool,\n n_rep=4*NUM_THREADS, pivot=s)\n pool.close()\n pool.join()\n","repo_name":"daureg/magnet","sub_path":"veverica/square_exp.py","file_name":"square_exp.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"23320392356","text":"'''\nhttps://www.acmicpc.net/problem/1991\n'''\nimport sys\n\ninput_ = sys.stdin.readline\n\nn = int(input_())\nbinary_tree = {}\n\nfor _ in range(n):\n parent, left, right = input_().split()\n binary_tree[parent] = (left, right)\n\npreorder_path = []\ninorder_path = []\npostorder_path = []\n\ndef preorder_traversal(node):\n if node == '.':\n return\n left, right = binary_tree[node]\n preorder_path.append(node)\n preorder_traversal(left)\n preorder_traversal(right)\n\ndef inorder_traversal(node):\n if node == '.':\n return\n left, right = binary_tree[node]\n inorder_traversal(left)\n inorder_path.append(node)\n inorder_traversal(right)\n\ndef postorder_traversal(node):\n if node == '.':\n return\n left, right = binary_tree[node]\n postorder_traversal(left)\n postorder_traversal(right)\n postorder_path.append(node)\n\npreorder_traversal('A')\ninorder_traversal('A')\npostorder_traversal('A')\n\nprint(''.join(preorder_path))\nprint(''.join(inorder_path))\nprint(''.join(postorder_path))\n","repo_name":"younghch/Algorithm","sub_path":"Python/Baekjoon/Tree/1991_traversal_tree.py","file_name":"1991_traversal_tree.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"7065935459","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as image\nimport networkx as nx\nimport pydot\n\n\nclass Tree:\n\n def __init__(self, root):\n self.root = root\n self.G = pydot.Dot(graph_type='digraph')\n\n def draw_graph(self):\n if len(self.root.next_node) is 0:\n self.G.add_node(pydot.Node(str(self.root.node_number), label=self.root.value))\n else:\n self.G.add_node(pydot.Node(str(self.root.node_number), label=self.root.attribute))\n self.add_nodes(self.root)\n\n self.G.write(\"DecisionTree.png\", format='png')\n img = image.imread(\"DecisionTree.png\", format='png')\n plt.imshow(img, aspect='equal')\n plt.axis('off')\n plt.show()\n\n def add_nodes(self, node):\n for key in node.next_node.keys():\n if len(node.next_node[key].next_node) is 0:\n self.G.add_node(pydot.Node(str(node.next_node[key].node_number), label=node.next_node[key].value))\n else:\n self.G.add_node(pydot.Node(str(node.next_node[key].node_number), label=node.next_node[key].attribute))\n self.add_nodes(node.next_node[key])\n self.G.add_edge(pydot.Edge(str(node.node_number), str(node.next_node[key].node_number), label=key))\n\n","repo_name":"DeKuczma/Decision-tree","sub_path":"Code/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"20094087055","text":"\"\"\"\r\nFaça um programa que peça o tamanho de um arquivo para download (em MB) e a velocidade de um link de Internet (em Mbps),\r\ncalcule e informe o tempo aproximado de download do arquivo usando este link (em minutos).\r\n\"\"\"\r\ntamanhoArquivo = float(input(\"Digite o tamanho do arquivo para download: \"))\r\nvelocidade = float(input(\"Digite a velocidade de um link de internet: \"))\r\nsegundos = tamanhoArquivo/velocidade\r\nminutos = int(segundos / 60)\r\nsegundos = segundos % 60\r\nprint(\"Tempo aproximado para download: \" , (minutos) ,\" minutos e \" , str(segundos) , \" segundos\")","repo_name":"Janerson-Alves/Exercicios-Estrutura-Sequencial-Python","sub_path":"ex18.py","file_name":"ex18.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"70374606660","text":"# pylint: disable=W0401,W0611\nimport logging\nimport re\nimport struct\nimport uuid\nfrom datetime import datetime\n\nfrom twisted.cred import error\nfrom twisted.internet import defer, reactor\n\nfrom smpp.pdu.constants import data_coding_default_value_map\nfrom smpp.pdu.error import (SMPPClientConnectionCorruptedError, SMPPRequestTimoutError,\n SMPPSessionInitTimoutError, SMPPProtocolError,\n SMPPGenericNackTransactionError, SMPPTransactionError,\n SMPPClientError, SessionStateError)\nfrom smpp.pdu.operations import SubmitSM, GenericNack\nfrom smpp.pdu.pdu_types import (CommandId, CommandStatus, DataCoding,\n DataCodingDefault, PDURequest, PDUResponse, EsmClassGsmFeatures)\nfrom smpp.twisted.protocol import SMPPClientProtocol as twistedSMPPClientProtocol\nfrom smpp.twisted.protocol import SMPPServerProtocol as twistedSMPPServerProtocol\nfrom smpp.twisted.protocol import (SMPPSessionStates, SMPPOutboundTxn,\n SMPPOutboundTxnResult)\nfrom .error import *\n\n# @todo: LOG_CATEGORY seems to be unused, check before removing it\nLOG_CATEGORY = \"smpp.twisted.protocol\"\n\n\nclass SMPPClientProtocol(twistedSMPPClientProtocol):\n def __init__(self):\n twistedSMPPClientProtocol.__init__(self)\n\n self.longSubmitSmTxns = {}\n\n def PDUReceived(self, pdu):\n self.log.debug(\"SMPP Client received PDU [command: %s, seq_number: %s, command_status: %s]\",\n pdu.commandId, pdu.seqNum, pdu.status)\n self.log.debug(\"Complete PDU dump: %s\", pdu)\n self.factory.stats.set('last_received_pdu_at', datetime.now())\n\n # A better version than vendor's PDUReceived method:\n # - Dont re-encode pdu !\n # if self.log.isEnabledFor(logging.DEBUG):\n # encoded = self.encoder.encode(pdu)\n # self.log.debug(\"Receiving data [%s]\" % _safelylogOutPdu(encoded))\n\n # Signal SMPP operation\n self.onSMPPOperation()\n\n if isinstance(pdu, PDURequest):\n self.PDURequestReceived(pdu)\n elif isinstance(pdu, PDUResponse):\n self.PDUResponseReceived(pdu)\n else:\n getattr(self, \"onPDU_%s\" % pdu.commandId.name)(pdu)\n\n def connectionMade(self):\n twistedSMPPClientProtocol.connectionMade(self)\n self.factory.stats.set('connected_at', datetime.now())\n self.factory.stats.inc('connected_count')\n\n self.log.info(\"Connection made to %s:%s\", self.config().host, self.config().port)\n\n self.factory.connectDeferred.callback(self)\n\n def connectionLost(self, reason):\n twistedSMPPClientProtocol.connectionLost(self, reason)\n\n self.factory.stats.set('disconnected_at', datetime.now())\n self.factory.stats.inc('disconnected_count')\n\n def doPDURequest(self, reqPDU, handler):\n twistedSMPPClientProtocol.doPDURequest(self, reqPDU, handler)\n\n # Stats\n if reqPDU.commandId == CommandId.enquire_link:\n self.factory.stats.set('last_received_elink_at', datetime.now())\n elif reqPDU.commandId == CommandId.deliver_sm:\n self.factory.stats.inc('deliver_sm_count')\n elif reqPDU.commandId == CommandId.data_sm:\n self.factory.stats.inc('data_sm_count')\n\n def PDUResponseReceived(self, pdu):\n twistedSMPPClientProtocol.PDUResponseReceived(self, pdu)\n\n if pdu.commandId == CommandId.submit_sm_resp:\n if pdu.status == CommandStatus.ESME_RTHROTTLED:\n self.factory.stats.inc('throttling_error_count')\n elif pdu.status != CommandStatus.ESME_ROK:\n self.factory.stats.inc('other_submit_error_count')\n else:\n # We got a ESME_ROK\n self.factory.stats.inc('submit_sm_count')\n\n def sendPDU(self, pdu):\n twistedSMPPClientProtocol.sendPDU(self, pdu)\n\n # Stats:\n self.factory.stats.set('last_sent_pdu_at', datetime.now())\n if pdu.commandId == CommandId.enquire_link:\n self.factory.stats.set('last_sent_elink_at', datetime.now())\n self.factory.stats.inc('elink_count')\n elif pdu.commandId == CommandId.submit_sm:\n self.factory.stats.inc('submit_sm_request_count')\n\n def claimSeqNum(self):\n seqNum = twistedSMPPClientProtocol.claimSeqNum(self)\n\n self.factory.stats.set('last_seqNum_at', datetime.now())\n self.factory.stats.set('last_seqNum', seqNum)\n\n return seqNum\n\n def bindSucceeded(self, result, nextState):\n self.factory.stats.set('bound_at', datetime.now())\n self.factory.stats.inc('bound_count')\n\n return twistedSMPPClientProtocol.bindSucceeded(self, result, nextState)\n\n def bindAsReceiver(self):\n \"\"\"This is a different signature where msgHandler is taken from factory\n \"\"\"\n return twistedSMPPClientProtocol.bindAsReceiver(self, self.factory.msgHandler)\n\n def bindAsTransceiver(self):\n \"\"\"This is a different signature where msgHandler is taken from factory\n \"\"\"\n return twistedSMPPClientProtocol.bindAsTransceiver(self, self.factory.msgHandler)\n\n def bindFailed(self, reason):\n self.log.error(\"Bind failed [%s]. Disconnecting...\", reason)\n self.disconnect()\n if reason.check(SMPPRequestTimoutError):\n raise SMPPSessionInitTimoutError(str(reason))\n\n def endOutboundTransaction(self, respPDU):\n txn = self.closeOutboundTransaction(respPDU.seqNum)\n\n if txn is not None:\n # Any status of a SubmitSMResp must be handled as a normal status\n if isinstance(txn.request, SubmitSM) or respPDU.status == CommandStatus.ESME_ROK:\n if not isinstance(respPDU, txn.request.requireAck):\n txn.ackDeferred.errback(\n SMPPProtocolError, \"Invalid PDU response type [%s] returned for request type [%s]\" % (\n type(respPDU), type(txn.request)))\n return\n # Do callback\n txn.ackDeferred.callback(SMPPOutboundTxnResult(self, txn.request, respPDU))\n return\n\n if isinstance(respPDU, GenericNack):\n txn.ackDeferred.errback(SMPPGenericNackTransactionError(respPDU, txn.request))\n return\n\n txn.ackDeferred.errback(SMPPTransactionError(respPDU, txn.request))\n\n def cancelOutboundTransactions(self, err):\n \"\"\"Cancels LongSubmitSmTransactions when cancelling OutboundTransactions\n \"\"\"\n twistedSMPPClientProtocol.cancelOutboundTransactions(self, err)\n self.cancelLongSubmitSmTransactions(err)\n\n def cancelLongSubmitSmTransactions(self, err):\n for item in list(self.longSubmitSmTxns.values()):\n reqPDU = item['txn'].request\n\n self.log.exception(err)\n txn = self.closeLongSubmitSmTransaction(reqPDU.LongSubmitSm['msg_ref_num'])\n # Do errback\n txn.ackDeferred.errback(err)\n\n def startLongSubmitSmTransaction(self, reqPDU, timeout):\n if reqPDU.LongSubmitSm['msg_ref_num'] in self.longSubmitSmTxns:\n self.log.error(\n 'Transaction with msg_ref_num [%s] is already in progress, open longSubmitSmTxns count: %s',\n reqPDU.LongSubmitSm['msg_ref_num'],\n len(self.longSubmitSmTxns))\n raise LongSubmitSmTransactionError(\n 'Transaction with msg_ref_num [%s] already in progress.' % reqPDU.LongSubmitSm['msg_ref_num'])\n\n # Create callback deferred\n ackDeferred = defer.Deferred()\n # Create response timer\n timer = reactor.callLater(timeout, self.onResponseTimeout, reqPDU, timeout)\n # Save transaction\n self.longSubmitSmTxns[reqPDU.LongSubmitSm['msg_ref_num']] = {\n 'txn': SMPPOutboundTxn(reqPDU, timer, ackDeferred),\n 'nack_count': reqPDU.LongSubmitSm['total_segments']}\n self.log.debug(\"Long submit_sm transaction started with msg_ref_num %s\",\n reqPDU.LongSubmitSm['msg_ref_num'])\n return ackDeferred\n\n def closeLongSubmitSmTransaction(self, msg_ref_num):\n self.log.debug(\"Long submit_sm transaction finished with msg_ref_num %s\", msg_ref_num)\n\n txn = self.longSubmitSmTxns[msg_ref_num]['txn']\n # Remove txn\n del self.longSubmitSmTxns[msg_ref_num]\n # Cancel response timer\n if txn.timer.active():\n txn.timer.cancel()\n\n return txn\n\n def endLongSubmitSmTransaction(self, _SMPPOutboundTxnResult):\n reqPDU = _SMPPOutboundTxnResult.request\n respPDU = _SMPPOutboundTxnResult.response\n\n # Do we have txn with the given ref ?\n if reqPDU.LongSubmitSm['msg_ref_num'] not in self.longSubmitSmTxns:\n self.log.error(\n 'Received a submit_sm_resp in a unknown transaction with msg_ref_num [%s], open longSubmitSmTxns count: %s',\n reqPDU.LongSubmitSm['msg_ref_num'],\n len(self.longSubmitSmTxns)\n )\n raise LongSubmitSmTransactionError(\n 'Received a submit_sm_resp in a unknown transaction with msg_ref_num [%s].' % reqPDU.LongSubmitSm[\n 'msg_ref_num'])\n\n # Decrement pending ACKs\n if self.longSubmitSmTxns[reqPDU.LongSubmitSm['msg_ref_num']]['nack_count'] > 0:\n self.longSubmitSmTxns[reqPDU.LongSubmitSm['msg_ref_num']]['nack_count'] -= 1\n self.log.debug(\n \"Long submit_sm transaction with msg_ref_num %s has been updated, nack_count: %s\",\n reqPDU.LongSubmitSm['msg_ref_num'],\n self.longSubmitSmTxns[reqPDU.LongSubmitSm['msg_ref_num']]['nack_count'])\n\n # End the transaction if no more pending ACKs\n if self.longSubmitSmTxns[reqPDU.LongSubmitSm['msg_ref_num']]['nack_count'] == 0:\n txn = self.closeLongSubmitSmTransaction(reqPDU.LongSubmitSm['msg_ref_num'])\n\n # Do callback\n txn.ackDeferred.callback(SMPPOutboundTxnResult(self, txn.request, respPDU))\n\n def endLongSubmitSmTransactionErr(self, failure):\n # Return on generic NACK\n try:\n failure.raiseException()\n except SMPPClientConnectionCorruptedError as _:\n return\n\n def preSubmitSm(self, pdu):\n \"\"\"Will:\n - Make validation steps\n - Transform unparseable data (because SubmitSm may come from http-api through PB)\n \"\"\"\n # Convert data_coding from int to DataCoding object\n if 'data_coding' in pdu.params and isinstance(pdu.params['data_coding'], int):\n intVal = pdu.params['data_coding']\n if intVal in data_coding_default_value_map:\n name = data_coding_default_value_map[intVal]\n pdu.params['data_coding'] = DataCoding(schemeData=getattr(DataCodingDefault, name))\n else:\n pdu.params['data_coding'] = None\n\n # Set default source_addr if not defined\n if pdu.params['source_addr'] is None and self.config().source_addr is not None:\n pdu.params['source_addr'] = self.config().source_addr\n\n def doSendRequest(self, pdu, timeout):\n if self.connectionCorrupted:\n raise SMPPClientConnectionCorruptedError()\n if not isinstance(pdu, PDURequest) or pdu.requireAck is None:\n raise SMPPClientError(\"Invalid PDU to send: %s\" % pdu)\n\n if pdu.commandId == CommandId.submit_sm:\n # Start a LongSubmitSmTransaction if pdu is a long submit_sm and send multiple\n # pdus, each with an OutboundTransaction\n # - Every OutboundTransaction is closed upon receiving the correct submit_sm_resp\n # - Every LongSubmitSmTransaction is closed upong closing all included OutboundTransactions\n #\n # Update 20150709 #234:\n # If the pdu has no nextPdu attribute then it may be a part of a long message not managed\n # by Jasmin: it may come from SMPPs already parted, in this case Jasmin must pass the\n # message as is without starting LongSubmitSmTransaction.\n # The downside of this behaviour is that each part of the message will be logged in a single\n # line in messages.log\n\n # UDH is set ?\n UDHI_INDICATOR_SET = False\n if hasattr(pdu.params['esm_class'], 'gsmFeatures'):\n for gsmFeature in pdu.params['esm_class'].gsmFeatures:\n if gsmFeature == EsmClassGsmFeatures.UDHI_INDICATOR_SET:\n UDHI_INDICATOR_SET = True\n break\n\n # Discover any splitting method, otherwise, it is a single SubmitSm\n if 'sar_msg_ref_num' in pdu.params:\n splitMethod = 'sar'\n elif UDHI_INDICATOR_SET and pdu.params['short_message'][:3] == b'\\x05\\x00\\x03':\n splitMethod = 'udh'\n else:\n splitMethod = None\n\n if splitMethod is not None and hasattr(pdu, 'nextPdu'):\n partedSmPdu = pdu\n first = True\n\n # Iterate through parted PDUs\n while True:\n partedSmPdu.seqNum = self.claimSeqNum()\n\n # Set LongSubmitSm tracking flags in pdu:\n partedSmPdu.LongSubmitSm = {'msg_ref_num': None, 'total_segments': None,\n 'segment_seqnum': None}\n if splitMethod == 'sar':\n # Using SAR options:\n partedSmPdu.LongSubmitSm['msg_ref_num'] = partedSmPdu.params['sar_msg_ref_num']\n partedSmPdu.LongSubmitSm['total_segments'] = partedSmPdu.params['sar_total_segments']\n partedSmPdu.LongSubmitSm['segment_seqnum'] = partedSmPdu.params['sar_segment_seqnum']\n elif splitMethod == 'udh':\n # Using UDH options:\n partedSmPdu.LongSubmitSm['msg_ref_num'] = pdu.params['short_message'][3]\n partedSmPdu.LongSubmitSm['total_segments'] = pdu.params['short_message'][4]\n partedSmPdu.LongSubmitSm['segment_seqnum'] = pdu.params['short_message'][5]\n\n self.preSubmitSm(partedSmPdu)\n self.sendPDU(partedSmPdu)\n # Unlike parent protocol's sendPDU, we don't return per pdu\n # deferred, we'll return per transaction deferred instead\n self.startOutboundTransaction(\n partedSmPdu, timeout).addCallbacks(self.endLongSubmitSmTransaction,\n self.endLongSubmitSmTransactionErr)\n\n # Start a transaction using the first parted PDU\n if first:\n first = False\n txn = self.startLongSubmitSmTransaction(partedSmPdu, timeout)\n\n try:\n # There still another PDU to go for\n partedSmPdu = partedSmPdu.nextPdu\n except AttributeError:\n break\n\n return txn\n else:\n self.preSubmitSm(pdu)\n\n return twistedSMPPClientProtocol.doSendRequest(self, pdu, timeout)\n\n def sendDataRequest(self, pdu):\n \"\"\"If pdu has a 'vendor_specific_bypass' tag, it will be deleted before sending it\n\n This is a workaround to let Jasmin accepts messages with vendor TLVs but not forwarding them\n to upstream connectors.\n\n Related to #325\n \"\"\"\n if pdu.commandId == CommandId.submit_sm and 'vendor_specific_bypass' in pdu.params:\n del pdu.params['vendor_specific_bypass']\n\n return twistedSMPPClientProtocol.sendDataRequest(self, pdu)\n\n\nclass SMPPServerProtocol(twistedSMPPServerProtocol):\n def __init__(self):\n twistedSMPPServerProtocol.__init__(self)\n\n # Divert received messages to the handler defined in the config\n # Note:\n # twistedSMPPServerProtocol is using a msgHandler from self.config(), this\n # SMPPServerProtocol is using self.factory's msgHandler just like SMPPClientProtocol\n self.dataRequestHandler = lambda *args: self.factory.msgHandler(self.system_id, *args)\n self.system_id = None\n self.user = None\n self.bind_type = None\n self.session_id = str(uuid.uuid4())\n self.log = logging.getLogger(LOG_CATEGORY)\n\n def PDUReceived(self, pdu):\n self.log.debug(\n \"SMPP Server received PDU from system '%s' [command: %s, seq_number: %s, command_status: %s]\",\n self.system_id, pdu.commandId, pdu.seqNum, pdu.status)\n self.log.debug(\"Complete PDU dump: %s\", pdu)\n self.factory.stats.set('last_received_pdu_at', datetime.now())\n\n # A better version than vendor's PDUReceived method:\n # - Dont re-encode pdu !\n # if self.log.isEnabledFor(logging.DEBUG):\n # encoded = self.encoder.encode(pdu)\n # self.log.debug(\"Receiving data [%s]\" % _safelylogOutPdu(encoded))\n\n # Signal SMPP operation\n self.onSMPPOperation()\n\n if isinstance(pdu, PDURequest):\n self.PDURequestReceived(pdu)\n elif isinstance(pdu, PDUResponse):\n self.PDUResponseReceived(pdu)\n else:\n getattr(self, \"onPDU_%s\" % pdu.commandId.name)(pdu)\n\n def connectionMade(self):\n twistedSMPPServerProtocol.connectionMade(self)\n self.factory.stats.inc('connect_count')\n self.factory.stats.inc('connected_count')\n\n def connectionLost(self, reason):\n twistedSMPPServerProtocol.connectionLost(self, reason)\n\n self.factory.stats.inc('disconnect_count')\n self.factory.stats.dec('connected_count')\n if self.sessionState in [SMPPSessionStates.BOUND_RX,\n SMPPSessionStates.BOUND_TX,\n SMPPSessionStates.BOUND_TRX]:\n if self.bind_type == CommandId.bind_transceiver:\n self.factory.stats.dec('bound_trx_count')\n elif self.bind_type == CommandId.bind_receiver:\n self.factory.stats.dec('bound_rx_count')\n elif self.bind_type == CommandId.bind_transmitter:\n self.factory.stats.dec('bound_tx_count')\n\n def onPDURequest_enquire_link(self, reqPDU):\n twistedSMPPServerProtocol.onPDURequest_enquire_link(self, reqPDU)\n\n self.factory.stats.set('last_received_elink_at', datetime.now())\n self.factory.stats.inc('elink_count')\n if self.user is not None:\n self.user.getCnxStatus().smpps['elink_count'] += 1\n\n def doPDURequest(self, reqPDU, handler):\n twistedSMPPServerProtocol.doPDURequest(self, reqPDU, handler)\n\n # Stats\n if reqPDU.commandId == CommandId.enquire_link:\n self.factory.stats.set('last_received_elink_at', datetime.now())\n elif reqPDU.commandId == CommandId.submit_sm:\n self.factory.stats.inc('submit_sm_request_count')\n\n def sendPDU(self, pdu):\n twistedSMPPServerProtocol.sendPDU(self, pdu)\n\n # Prepare for logging\n if pdu.commandId in [CommandId.deliver_sm, CommandId.data_sm]:\n message_content = pdu.params.get('short_message', None)\n if message_content is None:\n message_content = pdu.params.get('message_payload', '')\n\n # Do not log text for privacy reasons\n # Added in #691\n if self.config().log_privacy:\n logged_content = '** %s byte content **' % len(message_content)\n else:\n logged_content = '%r' % re.sub(rb'[^\\x20-\\x7E]+', b'.', message_content)\n\n # Stats:\n self.factory.stats.set('last_sent_pdu_at', datetime.now())\n if pdu.commandId == CommandId.deliver_sm:\n self.factory.stats.inc('deliver_sm_count')\n if self.user is not None:\n self.log.info(\n 'DELIVER_SM [uid:%s] [from:%s] [to:%s] [content:%s]',\n self.user.uid,\n pdu.params['source_addr'],\n pdu.params['destination_addr'],\n logged_content)\n self.user.getCnxStatus().smpps['deliver_sm_count'] += 1\n elif pdu.commandId == CommandId.data_sm:\n self.factory.stats.inc('data_sm_count')\n if self.user is not None:\n self.log.info('DATA_SM [uid:%s] [from:%s] [to:%s] [content:%s]',\n self.user.uid,\n pdu.params['source_addr'],\n pdu.params['destination_addr'],\n logged_content)\n self.user.getCnxStatus().smpps['data_sm_count'] += 1\n elif pdu.commandId == CommandId.submit_sm_resp:\n if pdu.status == CommandStatus.ESME_RTHROTTLED:\n self.factory.stats.inc('throttling_error_count')\n if self.user is not None:\n self.user.getCnxStatus().smpps['throttling_error_count'] += 1\n elif pdu.status != CommandStatus.ESME_ROK:\n self.factory.stats.inc('other_submit_error_count')\n if self.user is not None:\n self.user.getCnxStatus().smpps['other_submit_error_count'] += 1\n else:\n # We got a ESME_ROK\n self.factory.stats.inc('submit_sm_count')\n if self.user is not None:\n self.user.getCnxStatus().smpps['submit_sm_count'] += 1\n\n def onPDURequest_unbind(self, reqPDU):\n twistedSMPPServerProtocol.onPDURequest_unbind(self, reqPDU)\n\n self.factory.stats.inc('unbind_count')\n if self.bind_type == CommandId.bind_transceiver:\n self.factory.stats.dec('bound_trx_count')\n elif self.bind_type == CommandId.bind_receiver:\n self.factory.stats.dec('bound_rx_count')\n elif self.bind_type == CommandId.bind_transmitter:\n self.factory.stats.dec('bound_tx_count')\n\n def PDUDataRequestReceived(self, reqPDU):\n if self.sessionState == SMPPSessionStates.BOUND_RX:\n # Don't accept submit_sm PDUs when BOUND_RX\n errMsg = 'Received submit_sm when BOUND_RX %s' % reqPDU\n self.cancelOutboundTransactions(SessionStateError(errMsg, CommandStatus.ESME_RINVBNDSTS))\n return self.fatalErrorOnRequest(reqPDU, errMsg, CommandStatus.ESME_RINVBNDSTS)\n\n return twistedSMPPServerProtocol.PDUDataRequestReceived(self, reqPDU)\n\n def PDURequestReceived(self, reqPDU):\n # Handle only accepted command ids\n acceptedPDUs = [CommandId.submit_sm, CommandId.bind_transmitter,\n CommandId.bind_receiver, CommandId.bind_transceiver,\n CommandId.unbind, CommandId.unbind_resp,\n CommandId.enquire_link, CommandId.data_sm]\n if reqPDU.commandId not in acceptedPDUs:\n errMsg = 'Received unsupported pdu type: %s' % reqPDU.commandId\n self.cancelOutboundTransactions(SessionStateError(errMsg, CommandStatus.ESME_RSYSERR))\n return self.fatalErrorOnRequest(reqPDU, errMsg, CommandStatus.ESME_RSYSERR)\n\n twistedSMPPServerProtocol.PDURequestReceived(self, reqPDU)\n\n # Update CnxStatus\n if self.user is not None:\n self.user.getCnxStatus().smpps['last_activity_at'] = datetime.now()\n\n @defer.inlineCallbacks\n def doBindRequest(self, reqPDU, sessionState):\n bind_type = reqPDU.commandId\n\n # Update stats\n if bind_type == CommandId.bind_transceiver:\n self.factory.stats.inc('bind_trx_count')\n elif bind_type == CommandId.bind_receiver:\n self.factory.stats.inc('bind_rx_count')\n elif bind_type == CommandId.bind_transmitter:\n self.factory.stats.inc('bind_tx_count')\n\n # Check the authentication\n username = reqPDU.params['system_id'].decode()\n password = reqPDU.params['password'].decode()\n\n # Authenticate username and password\n try:\n iface, auth_avatar, logout = yield self.factory.login(\n username,\n password,\n self.transport.getPeer().host)\n except error.UnauthorizedLogin as e:\n self.log.debug('From host %s and using password: %s', self.transport.getPeer().host, password)\n self.log.warning('SMPP Bind request failed for username: \"%s\", reason: %s', username, str(e))\n self.sendErrorResponse(reqPDU, CommandStatus.ESME_RINVPASWD, username)\n return\n\n # Check we're not already bound, and are open to being bound\n if self.sessionState != SMPPSessionStates.OPEN:\n self.log.warning('Duplicate SMPP bind request received from: %s', username)\n self.sendErrorResponse(reqPDU, CommandStatus.ESME_RALYBND, username)\n return\n\n # Check that username hasn't exceeded number of allowed binds\n if not self.factory.canOpenNewConnection(auth_avatar, bind_type):\n self.log.warning('SMPP System %s has exceeded maximum number of %s bindings',\n username, bind_type)\n self.sendErrorResponse(reqPDU, CommandStatus.ESME_RBINDFAIL, username)\n return\n\n # If we get to here, bind successfully\n self.user = auth_avatar\n self.system_id = username\n self.sessionState = sessionState\n self.bind_type = bind_type\n\n self.factory.addBoundConnection(self, self.user)\n bound_cnxns = self.factory.getBoundConnections(self.system_id)\n self.log.debug('Bind request succeeded for %s in session [%s]. %d active binds',\n username, self.session_id, bound_cnxns.getBindingCount() if bound_cnxns else 0)\n self.sendResponse(reqPDU, system_id=self.system_id)\n\n # Update stats\n if bind_type == CommandId.bind_transceiver:\n self.factory.stats.inc('bound_trx_count')\n elif bind_type == CommandId.bind_receiver:\n self.factory.stats.inc('bound_rx_count')\n elif bind_type == CommandId.bind_transmitter:\n self.factory.stats.inc('bound_tx_count')\n\n def sendDataRequest(self, pdu):\n \"\"\"If pdu has a 'vendor_specific_bypass' tag, it will be deleted before sending it\n\n This is a workaround to let Jasmin accepts messages with vendor TLVs but not forwarding them\n to downstream users.\n\n Related to #325\n \"\"\"\n if pdu.commandId == CommandId.deliver_sm and 'vendor_specific_bypass' in pdu.params:\n del pdu.params['vendor_specific_bypass']\n\n return twistedSMPPServerProtocol.sendDataRequest(self, pdu)\n","repo_name":"jookies/jasmin","sub_path":"jasmin/protocols/smpp/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":26823,"program_lang":"python","lang":"en","doc_type":"code","stars":907,"dataset":"github-code","pt":"80"} +{"seq_id":"69950146820","text":"from .single_stage_rbbox import SingleStageDetectorRbbox\nfrom ..registry import DETECTORS\nfrom mmdet.core.bbox import dbbox2result\n\n@DETECTORS.register_module\nclass R3Det(SingleStageDetectorRbbox):\n\n def __init__(self,\n backbone,\n neck,\n bbox_head=None,\n rbbox_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super(R3Det, self).__init__(backbone, neck, bbox_head, rbbox_head,\n train_cfg, test_cfg, pretrained)\n\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_hbboxes,\n gt_labels,\n gt_masks=None,\n gt_bboxes_ignore=None,\n _gt_labels=None,\n _gt_bboxes=None,\n _gt_masks=None\n ):\n x = self.backbone(img)\n x = self.neck(x)\n box_pred, labels,loss_dict = self.rbbox_head(x,img_metas,gt_bboxes,gt_hbboxes,gt_labels,True,train_cfg=self.train_cfg)\n return loss_dict\n \n def simple_test(self,img,img_meta,rescale=False):\n x = self.backbone(img)\n x = self.neck(x)\n box_pred, cls_prob, labels = self.rbbox_head(x,img_metas,None,None,False,test_cfg=self.test_cfg)\n bbox_inputs = (box_pred, cls_prob, labels) # + ??\n \n box_pred, labels = self.rbbox_head(*bbox_inputs)\n\n rbbox_results= dbbox2result(box_pred, labels,self.rbbox_head.num_classes)\n\n return rbbox_results\n\n","repo_name":"ch-ho00/FCOS_obb","sub_path":"mmdet/models/detectors/r3det.py","file_name":"r3det.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"72244643458","text":"import sys\nfrom sys import argv\nfrom additions.echo import Echo\n\nfrom btrfs_share_checker import CheckShares\nfrom btrfs_share_converter import ConvertShares\nfrom create_snapshot import CreateSnapshot\nfrom get_snapshots import ListSnapshots\nfrom remove_snapshot import RemoveSnapshot\nimport mount_snapshot\nimport unmount_snapshot\n\ndef Usage():\n Echo(\"Usage:\")\n Echo(\"snapshot [arguments]...\")\n Echo(\" check | This will check each share to see if it is capable of\")\n Echo(\" | creating snapshots for.\")\n Echo(\" |\")\n Echo(\" convert | This will convert a share to a btrfs sub volume.\")\n Echo(\" | This is required in order to create snapshots for a share\")\n Echo(\" | Use '-all' to convert all existing shares.\")\n Echo(\" |\")\n Echo(\" create | This will create a snapshot for the share specified.\")\n Echo(\" |\")\n Echo(\" list | This will return a list of all the existing snapshots.\")\n Echo(\" |\")\n Echo(\" remove | This will remove the snapshot and it's data based on the\")\n Echo(\" | ID passed through. Run 'list' to find out what the IDs are.\")\n Echo(\" | If you use '-y' at the end it won't ask for confirmation.\")\n Echo(\" |\")\n # Echo(\" restore | This doesn't work right now\")\n# Echo(\" restore | This will restore the data from a snapshot to it's original\")\n# Echo(\" | share.\\\\e[31;1m This will overwrite the entire share, not just\\\\e[0m\")\n# Echo(\" | \\\\e[31;1mthe files that are affected. You may want to create an\\\\e[0m\")\n# Echo(\" | \\\\e[31;1madditional snapshot of the share if you are unsure.\\\\e[0m\")\n # Echo(\" |\")\n Echo(\" mount | This will mount a copy of a snapshot as a new share.\")\n Echo(\" |\")\n Echo(\" unmount | Unmount a previously mounted snapshot.\")\n sys.exit()\n\n\nif len(argv) == 1:\n Usage()\n\nargs = [None,None,None,None]\n\nfor a in range(1, len(argv)):\n args[a-1] = argv[a]\n\ncommand = args[0].lower()\n\nif command == \"check\":\n CheckShares()\nelif command == \"convert\":\n ConvertShares(args[1])\nelif command == \"create\":\n CreateSnapshot(args[1], args[2], args[3])\nelif command == \"list\":\n ListSnapshots()\nelif command == \"remove\":\n RemoveSnapshot(args[1], args[2])\nelif command == \"mount\":\n if args[1] == \"list\":\n mount_snapshot.ListMounts()\n sys.exit()\n try:\n snapID = int(args[1])\n except: mount_snapshot.Usage()\n\n readOnly = not (args[2] == \"-w\" or args[3] == \"-w\")\n mount_snapshot.MountSnapshot(snapID, args[2], readOnly)\n pass\nelif command == \"unmount\":\n if args[1] == None or not args[1].isdigit():\n unmount_snapshot.Usage()\n\n unmount_snapshot.UnmountSnapshot(int(args[1]))\n pass\nelse:\n Usage()","repo_name":"RBootsGames/Unraid-Snapshots","sub_path":"snapshot files/snapshot_scripts/snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"3826578281","text":"from flask import Flask, jsonify, request, redirect\nfrom flask_cors import CORS\nimport json\nfrom facturas import Factura, Errores, Autorizacion\nfrom functions import Functions\nfrom archivoSalida import Salida\n\napp = Flask(__name__)\nCORS(app)\n\nlistaAutorizaciones = []\n\n@app.before_first_request\ndef leerArchivoSalida():\n salida = Salida()\n global listaAutorizaciones\n listaAutorizaciones = salida.leerSalida()\n\n@app.after_request\ndef actualizarArchivoSalida(response):\n salida = Salida()\n salida.generarSalida(listaAutorizaciones)\n return response\n\n@app.route(\"/entrada\",methods=[\"POST\"])\ndef leerEntrada():\n funcion = Functions()\n texto = request.json['archivo']\n # print(texto)\n global listaAutorizaciones\n listaAutorizaciones = funcion.analizarEntrada(texto, listaAutorizaciones)\n return jsonify({\n \"Mensaje\":\"Se ha leido el archivo\"\n })\n\n@app.route(\"/getSalida\", methods=[\"GET\"])\ndef getSalidaStr():\n filename = \"Flask/autorizaciones.xml\"\n archivo = open(filename, \"r\")\n contenido = archivo.read()\n print(\"salida\")\n objeto = {\n \"xml\":contenido\n }\n\n return jsonify(objeto)\n\n\n@app.route(\"/getTablaIva\", methods=[\"POST\"])\ndef getTablaIva():\n global listaAutorizaciones\n \n tabla = Functions()\n\n fecha = request.json['fecha']\n \n datos = tabla.tablaIva(fecha,listaAutorizaciones)\n \n objeto={\"mensaje\":\"ERROR\"}\n \n if datos is not None:\n\n objeto = {\n \"titulos\":datos[0],\n \"emitido\":datos[1],\n \"recibido\":datos[2],\n \"mensaje\":\"Correcto\",\n \"titulo\":str(\"IVA emitido y recibido el \" + fecha),\n }\n return jsonify(objeto)\n\n\n@app.route(\"/getTablaIva2\", methods=[\"POST\"])\ndef getTablaIva2():\n global listaAutorizaciones\n \n tabla = Functions()\n\n nit = request.json['nit']\n fecha = request.json['fecha']\n \n datos = tabla.tablaIva2(nit,fecha,listaAutorizaciones)\n \n objeto={\"mensaje\":\"ERROR\"}\n \n if datos is not None:\n\n objeto = {\n \"xValues\":datos[1],\n \"yValues\":datos[0],\n \"titulo\":str(\"IVA emitido y recibido el \" + fecha),\n \"mensaje\":\"Correcto\"\n }\n return jsonify(objeto)\n\n\n@app.route(\"/getTablaFecha\", methods=[\"POST\"])\ndef getTablaFecha():\n global listaAutorizaciones\n \n tabla = Functions()\n\n iva = request.json['iva']\n desde = request.json['desde']\n hasta = request.json['hasta']\n \n datos = tabla.tablaFecha(iva,desde,hasta,listaAutorizaciones)\n \n objeto={\"mensaje\":\"ERROR\"}\n \n if datos is not None:\n \n objeto = {\n \"xValues\":datos[0],\n \"total\":datos[1],\n \"mensaje\":\"Correcto\"\n }\n\n return jsonify(objeto)\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef getHome():\n \n objeto = {\n \"Index\":\"Hola\"\n }\n\n return jsonify(objeto)\n\n\n@app.route(\"/reset\", methods=[\"GET\"])\ndef reset():\n global listaAutorizaciones\n listaAutorizaciones = []\n \n filename = \"Flask/autorizaciones.xml\"\n archivo = open(filename, \"w+\")\n archivo.write(\"\")\n archivo.close()\n \n objeto = {\"Mensaje\":\"Borrado\"}\n \n return jsonify(objeto)\n\n\n@app.route(\"/getNits\", methods=[\"GET\"])\ndef getNits():\n global listaAutorizaciones\n function = Functions()\n lista_nits = function.getNits(listaAutorizaciones)\n \n objeto = {\"Lista\":lista_nits}\n \n return jsonify(objeto)\n\n\n@app.route(\"/salidaPDF\", methods=[\"GET\"])\ndef salidaPDF():\n import base64\n import os\n from reportlab.pdfgen import canvas\n \n filenamePDF = \"Flask/IPC2_Proyecto3_202010055_Documentacion.pdf\"\n \n with open(filenamePDF, \"rb\") as pdf_file:\n encoded_string = base64.b64encode(pdf_file.read())\n \n print(\"salida\")\n objeto = {\n \"archivo\":str(encoded_string)\n }\n\n return jsonify(objeto)\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run(threaded=True, debug=True)","repo_name":"Desquivel501/IPC2_Proyecto3_202010055","sub_path":"Flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"27112183900","text":"import discord\nfrom discord import app_commands\nfrom discord.ext import commands, tasks\nfrom discord import Embed\nimport env\nimport aiohttp\nimport re\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom twitch import checkIfLive\nimport asyncio\nfrom tictactoe import TicTacToe\nimport json\nimport random\nimport time\nimport os\nfrom typing import Optional\nfrom asyncio import sleep\nimport requests\nfrom googleapiclient.discovery import build\n\n# Initialize variables\nintents = discord.Intents.all()\n\nuser_message_times = {}\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\n\n# Change the current working directory to the script's directory\nos.chdir(script_dir)\n\nANTI_SPAM_SECONDS = 5\nBASE_EXP = 100\nEXP_MULTIPLIER = 2\n\nisLive = False\n\ncounting_channel_id = 892158631603228672\ntwitch_announcement_id = 793635730826985523\nwelcome_id = 793633905252106250\npolls_id = 832213669764923413\nlog_channel_id = 812313840666542184\nyoutube_id = 812313840666542184\nlevel_up_id = 898895899923714118\n\nlast_user_id = None\n\nyoutube = build('youtube', 'v3', developerKey='AIzaSyBN4ki9nTmY-CNYy2YRuWMwIdRWoZHNoeo')\nchannel_id = 'UCX4nLphiA84NuMw5lMZlI6A'\nlast_video_id = None\n\nGIPHY_API_KEY = env.GIPHY_API_KEY\n\nbot = commands.Bot(command_prefix='!', intents=intents)\n\nwith open('polls_questions.txt', 'r') as f:\n text = f.read()\n\ndef read_count():\n with open(\"count.txt\", \"r\") as f:\n count_data = f.read().strip().split(',')\n count = int(count_data[0])\n last_user = int(count_data[1]) if len(count_data) > 1 else None\n return count, last_user\n\ndef write_count(count, last_user):\n with open(\"count.txt\", \"w\") as f:\n f.write(f\"{count},{last_user}\")\n\ndef load_reaction_roles_data():\n try:\n with open(\"reaction_roles.json\", \"r\") as f:\n return json.load(f)\n except FileNotFoundError:\n with open(\"reaction_roles.json\", \"w\") as f:\n json.dump({}, f)\n return {}\n\ndef save_reaction_roles_data(data):\n with open(\"reaction_roles.json\", \"w\") as f:\n json.dump(data, f, indent=4)\n\n\ndef calculate_new_level(exp):\n level = 1\n required_exp = BASE_EXP\n\n while exp >= required_exp:\n exp -= required_exp\n level += 1\n required_exp = int(BASE_EXP * (EXP_MULTIPLIER ** (level - 1)))\n\n return level\n\nasync def is_spam(user_id):\n current_time = time.time()\n if user_id not in user_message_times:\n user_message_times[user_id] = current_time\n return False\n\n last_message_time = user_message_times[user_id]\n user_message_times[user_id] = current_time\n return current_time - last_message_time < ANTI_SPAM_SECONDS\n\n@bot.event\nasync def on_message(message):\n global counter_data\n exp_to_add = random.randint(10, 20)\n\n if message.author == bot.user:\n return\n \n global count, last_user_id\n\n # Ignore messages from the bot itself\n if message.author == bot.user:\n return\n\n # Check if the message is in the counting channel\n if message.channel.id == counting_channel_id:\n try:\n # Check if the message is the correct number and the user didn't count twice\n if int(message.content) == count + 1 and message.author.id != last_user_id:\n count += 1\n last_user_id = message.author.id\n write_count(count, last_user_id)\n last_user_id = message.author.id\n else:\n await message.delete()\n except ValueError: # If the message is not a number, delete it\n await message.delete()\n \n with open('bad words.txt', 'r') as f:\n bad_words = f.read()\n\n banned_words = bad_words.split(',')\n\n for word in banned_words:\n if word.lower() in message.content.lower():\n await message.delete()\n await message.channel.send(f'{message.author.mention}, please do not use offensive language.')\n return\n \n user_id = str(message.author.id)\n if user_id not in exp_data:\n exp_data[user_id] = {\n \"username\": message.author.name,\n \"exp\": 0,\n \"level\": 1,\n }\n \n if await is_spam(message.author.id):\n return\n\n exp_data[user_id][\"exp\"] += exp_to_add\n save_exp_data(exp_data)\n new_level = calculate_new_level(exp_data[user_id][\"exp\"])\n\n if new_level > exp_data[user_id][\"level\"]:\n save_exp_data(exp_data)\n exp_data[user_id][\"level\"] = new_level\n role_names = [\"Zombie\", \"Skeleton\", \"Creeper\", \"Wither\", \"Ender Dragon\"]\n role_name = role_names[min(new_level - 1, len(role_names) - 1)]\n\n role = discord.utils.get(message.guild.roles, name=role_name)\n if not role:\n role = await message.guild.create_role(name=role_name)\n\n member = message.author\n await member.add_roles(role)\n\n # Create an embed for the level-up message\n embed = discord.Embed(title=\"Level Up!\", description=f\"{message.author.mention} leveled up to {role_name}!\", color=16739179)\n\n # Send the embed to a specific channel\n level_up_channel = bot.get_channel(level_up_id)\n await level_up_channel.send(embed=embed)\n\n await bot.process_commands(message)\n \n\ndef find_duplicate_questions(text):\n questions = re.findall(r\"(^.*\\?$)\", text, re.MULTILINE)\n seen_questions = set()\n duplicates = set()\n\n for question in questions:\n if question in seen_questions:\n duplicates.add(question)\n else:\n seen_questions.add(question)\n\n return duplicates\n\n\ndef remove_duplicates(text, duplicates):\n lines = text.split('\\n')\n unique_questions = []\n i = 0\n\n included_duplicates = set()\n\n while i < len(lines):\n line = lines[i]\n if line.endswith('?') and (line not in duplicates or line not in included_duplicates):\n question_block = [line]\n if line in duplicates:\n included_duplicates.add(line)\n for j in range(1, 5):\n if i + j < len(lines) and not lines[i + j].endswith('?'):\n question_block.append(lines[i + j])\n else:\n break\n unique_questions.append('\\n'.join(question_block))\n i += 4\n else:\n i += 1\n\n return unique_questions\n\nduplicates = find_duplicate_questions(text)\nunique_questions = remove_duplicates(text, duplicates)\n\n\nasync def get_random_greeting_gif():\n async with aiohttp.ClientSession() as session:\n async with session.get(f'https://api.giphy.com/v1/gifs/random?api_key={GIPHY_API_KEY}&tag=waving') as response:\n data = await response.json()\n return data['data']['images']['original']['url']\n\n\nasync def get_random_meme_gif():\n async with aiohttp.ClientSession() as session:\n async with session.get(f'https://api.giphy.com/v1/gifs/random?api_key={GIPHY_API_KEY}&tag=memes&rating=G') as response:\n data = await response.json()\n return data['data']['images']['original']['url']\n\n# Loads PBs from file\ndef load_pbs():\n with open('pbs.json', 'r') as file:\n pbs = json.load(file)\n return pbs\n\n# Saves PBs to file\ndef save_pbs(pbs):\n with open('pbs.json', 'w') as file:\n json.dump(pbs, file)\n\n\n@bot.event\nasync def on_ready():\n checkforvideos()\n global count, last_user_id\n count_data = read_count()\n count = count_data[0]\n last_user_id = count_data[1]\n print(f'{bot.user.name} has connected to Discord!')\n \n try:\n synced = await bot.tree.sync()\n print(f\"Synced {len(synced)} command(s)\")\n except Exception as e:\n print(e)\n \n load_reaction_roles.start()\n \n twitchNotifications.start()\n\n scheduler = AsyncIOScheduler()\n scheduler.add_job(daily_polls, CronTrigger(hour=23, minute=0, second=0), misfire_grace_time=60)\n scheduler.start()\n\ndef get_last_video_id_from_file():\n try:\n with open('last_video_id.txt', 'r') as file:\n return file.read().strip()\n except FileNotFoundError:\n return None\n \ndef update_last_video_id_in_file(video_id):\n with open('last_video_id.txt', 'w') as file:\n file.write(video_id)\n \n@tasks.loop(minutes=30)\nasync def checkforvideos():\n channel = bot.get_channel(890908593241612288)\n\n request = youtube.search().list(\n part='snippet',\n channelId='UCX4nLphiA84NuMw5lMZlI6A',\n maxResults=1,\n type='video',\n order='date'\n )\n response = request.execute()\n video_id = response['items'][0]['id']['videoId']\n\n last_video_id = get_last_video_id_from_file()\n\n if last_video_id != video_id:\n update_last_video_id_in_file(video_id)\n await channel.send(f'<@&814797615358803968> A new video has been uploaded! https://www.youtube.com/watch?v={video_id}')\n\n\n@bot.tree.command(description=\"See my current PBs\", name=\"pb\")\nasync def pb(interaction: discord.Interaction):\n pbs = load_pbs()\n embed = discord.Embed(title=\"Minecraft Speedrunning PBs\", color=discord.Color.red())\n for category, time in pbs.items():\n embed.add_field(name=category, value=time, inline=False)\n await interaction.response.send_message(embed=embed)\n\n@bot.tree.command(description=\"Edit the pbs\", name=\"editpb\")\n@commands.has_permissions(administrator=True)\nasync def editpb(ctx, category: str, new_time: str):\n pbs = load_pbs()\n if category in pbs:\n pbs[category] = new_time\n save_pbs(pbs)\n \n@bot.tree.command(description=\"Get all the commands for the server!\", name=\"help\")\nasync def help(interaction: discord.Interaction):\n member = interaction.user\n is_admin = member.guild_permissions.administrator\n\n general_embed = discord.Embed(\n title=\"General Commands\",\n description=\"Here are the general commands:\",\n color=discord.Color.red()\n )\n general_embed.add_field(name=\"/tictactoe {user}\", value=\"Play a game of Tic Tac Toe with another member in the server!\", inline=False)\n general_embed.add_field(name=\"/meme\", value=\"Sends a random meme GIF\", inline=False)\n general_embed.add_field(name=\"/coinflip\", value=\"Flip a coin\", inline=False)\n general_embed.add_field(name=\"/8ball {question}\", value=\"Ask the magic 8 ball a question\", inline=False)\n general_embed.add_field(name=\"/dadjoke\", value=\"Sends a random dadjoke\", inline=False)\n general_embed.add_field(name=\"/wouldyourather\", value=\"Sends a random would you rather question\", inline=False)\n general_embed.add_field(name=\"/level\", value=\"Shows your current level and role\", inline=False)\n general_embed.add_field(name=\"/rockpaperscissor\", value=\"Play a game of Rock Paper or Scissors against the AI\", inline=False)\n general_embed.add_field(name=\"/pb\", value=\"See my current PBs\", inline=False)\n\n if is_admin: \n admin_embed = discord.Embed(\n title=\"Admin Commands\",\n description=\"React with 🔒 to view admin commands.\",\n color=discord.Color.red()\n )\n general_msg = await interaction.channel.send(embed=general_embed)\n admin_help_msg = await interaction.channel.send(embed=admin_embed)\n await admin_help_msg.add_reaction(\"🔒\")\n\n def check(reaction, user):\n return user == member and str(reaction.emoji) == \"🔒\" and reaction.message.id == admin_help_msg.id\n\n try:\n reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check)\n\n except asyncio.TimeoutError:\n await admin_help_msg.clear_reactions()\n else:\n admin_embed = discord.Embed(\n title=\"Admin Commands\",\n description=\"Here are the admin commands:\",\n color=discord.Color.red()\n )\n admin_embed.add_field(name=\"/Kick {user} {reason}\", value=\"Kick a member, remember to add a reason and a log sends automatically to the log channel with the latest 10 messages of the user\", inline=False)\n admin_embed.add_field(name=\"/Bank {user} {reason}\", value=\"Ban a member and sends a log to the log channel\", inline=False)\n admin_embed.add_field(name=\"/timout {user} {time}\", value=\"Timeout a member with specified time\", inline=False)\n admin_embed.add_field(name=\"/editpb {category} {time}\", value=\"Edit a pb\", inline=False)\n await admin_help_msg.edit(embed=admin_embed)\n await admin_help_msg.clear_reactions()\n else:\n await interaction.channel.send(embed=general_embed)\n \ndef count_options(question):\n options = re.findall(r'^\\d+:', question, re.MULTILINE)\n return len(options)\n\ndef save_question_index(index):\n with open('question_index.txt', 'w') as file:\n file.write(str(index))\n\ndef load_question_index():\n try:\n with open('question_index.txt', 'r') as file:\n content = file.read().strip()\n if content:\n index = int(content) \n return index\n else:\n return 0\n except FileNotFoundError:\n return 0\n\nasync def daily_polls():\n question_index = load_question_index()\n channel = bot.get_channel(polls_id)\n if 0 <= question_index < len(unique_questions):\n question = unique_questions[question_index]\n\n # Create an Embed object\n embed = Embed(title=f\"Daily Poll nr: {question_index} \\u2b50\",\n description=f\"{question}\",\n color=16739179)\n \n embed.set_footer(text=\"React to vote!\")\n\n sent_message = await channel.send(embed=embed)\n \n num_options = count_options(question)\n for i in range(1, num_options + 1):\n await sent_message.add_reaction(f\"{i}\\N{COMBINING ENCLOSING KEYCAP}\")\n \n question_index += 1\n save_question_index(question_index)\n question_index += 1\n \n@bot.tree.command(description=\"Check the bot's responsiveness\", name=\"ping\")\nasync def ping(interaction: discord.Interaction):\n await interaction.response.send_message(\"Pong!\")\n \n@bot.tree.command(description=\"Play a game of rock paper scissor agains the AI\", name=\"rockpaperscissor\")\n@app_commands.describe(user_choice = \"Rock Paper or Scissor?\")\nasync def rockpaperscissor(interaction: discord.Interaction, user_choice: str):\n user_choice = user_choice.lower()\n valid_choices = [\"rock\", \"paper\", \"scissors\"]\n\n if user_choice not in valid_choices:\n await interaction.response.send_message(\"Invalid choice! Please choose rock, paper, or scissors.\")\n return\n\n ai_choice = random.choice(valid_choices)\n result = \"\"\n\n if user_choice == ai_choice:\n result = \"It's a tie!\"\n elif (user_choice == \"rock\" and ai_choice == \"scissors\") or \\\n (user_choice == \"paper\" and ai_choice == \"rock\") or \\\n (user_choice == \"scissors\" and ai_choice == \"paper\"):\n result = \"You win!\"\n else:\n result = \"You lose!\"\n await interaction.response.send_message(f\"You chose {user_choice.capitalize()}, the AI chose {ai_choice.capitalize()}. {result}\")\n \n@bot.tree.command(description=\"Check the bot's responsiveness\", name=\"8ball\")\nasync def ball(interaction: discord.Interaction, *, question: str):\n answers = [\"It is certain\",\"It is decidedly so\",\"Without a doubt\",\"Yes - definitely\",\"You may rely on it\",\"As I see it, yes\",\"Most likely\",\"Outlook good\",\"Yes\",\"Signs point to yes\",\"Reply hazy, try again\",\"Ask again later\",\"Better not tell you now\",\"Cannot predict now\",\"Concentrate and ask again\",\"Don't count on it\",\"My reply is no\",\"My sources say no\",\"Outlook not so good\",\"Very doubtful\"]\n await interaction.response.send_message(f\"Question: {question}\\nAnswer: {answers[random.randint(0, len(answers) - 1)]}\")\n\n@bot.tree.command(description=\"Get all the commands for the server!\", name=\"dadjoke\")\nasync def dadjoke(interaction: discord.Interaction): \n async with aiohttp.ClientSession() as session:\n async with session.get('https://icanhazdadjoke.com/', headers={\"Accept\": \"application/json\"}) as response:\n data = await response.json()\n await interaction.channel.send(data['joke'])\n\n@bot.tree.command(description=\"Send a random Meme!\", name=\"meme\")\nasync def meme(interaction: discord.Interaction):\n gif_url = await get_random_meme_gif()\n embed = Embed()\n embed.set_image(url=gif_url)\n await interaction.response.send_message(embed=embed)\n\n@bot.tree.command(description=\"Send a random Would You Rather question!\", name=\"wouldyourather\")\nasync def wouldyourather(interaction: discord.Interaction):\n with open('current-would-you-rather.txt', 'r') as file:\n current_line = int(file.readline())\n\n # Read questions from the 'would-you-rather.txt' file\n with open('would-you-rather.txt', 'r') as questions:\n data = questions.readlines()\n question = data[current_line].strip()\n\n await interaction.response.send_message(question)\n\n # Update the current line in the file\n with open('current-would-you-rather.txt', 'w') as file:\n file.write(str(current_line))\n \n current_line += 1\n if current_line >= len(data):\n current_line = 0\n\n # Update the current line in the file\n with open('current-would-you-rather.txt', 'w') as file:\n file.write(str(current_line))\n\n\n@app_commands.checks.has_permissions(administrator=True)\n@bot.tree.command(description=\"Announce Something!\", name=\"announce\")\n@app_commands.describe(what_to_announce=\"What should I announce\", to_which_channel=\"To which channel?\", embed_message=\"Embed the message? (True/False)\")\nasync def announce(interaction: discord.Interaction, what_to_announce: str, to_which_channel: discord.TextChannel, embed_message: bool = False):\n if embed_message:\n embed = Embed(description=what_to_announce, color=discord.Color.red())\n await to_which_channel.send(embed=embed)\n else:\n await to_which_channel.send(what_to_announce)\n \n@bot.tree.command(description=\"Flip a coin.\", name=\"coinflip\")\nasync def coinflip(interaction: discord.Interaction):\n await interaction.response.send_message(random.choice([\"Heads!\", \"Tails!\"]))\n\n@bot.tree.command(description=\"Show your current level and role\", name=\"level\")\nasync def level(interaction: discord.Interaction):\n user_id = str(interaction.user.id)\n if user_id not in exp_data:\n await interaction.response.send_message(\"You don't have any experience points yet.\")\n return\n\n exp = exp_data[user_id][\"exp\"]\n level = exp_data[user_id][\"level\"]\n role_names = [\"Creeper\", \"Zombie\", \"Skeleton\", \"Wither\", \"Ender Dragon\"]\n role_name = role_names[min(level - 1, len(role_names) - 1)]\n\n await interaction.response.send_message(f\"Your current level is {level} and your role is {role_name} with an exp of {exp}.\")\n\ndef load_exp_data():\n try:\n with open(\"exp_data.json\", \"r\") as f:\n return json.load(f)\n except FileNotFoundError:\n return {}\n\nexp_data = load_exp_data()\n\ndef save_exp_data(exp_data):\n with open(\"exp_data.json\", \"w\") as f:\n json.dump(exp_data, f)\n\nisLive = False\n\n@tasks.loop(seconds=10)\nasync def twitchNotifications():\n global isLive\n stream = checkIfLive(\"ZodiSP\")\n if stream != \"OFFLINE\":\n if isLive == False:\n isLive = True\n\n title = stream.title\n thumbnail_url = stream.thumbnail_url.format(width=1080, height=608)\n url = f\"https://www.twitch.tv/ZodiSP\"\n\n embed = discord.Embed(\n title=f\"{title}\",\n description=f\"<@&814797613395476503> [ZodiSP is live! Click here to watch the stream.]({url})\",\n color=16739179,\n )\n\n embed.set_author(name=\"ZodiSP\", url=url, icon_url=\"https://i.imgur.com/OVsAABd.jpg\")\n embed.set_thumbnail(url=thumbnail_url)\n embed.set_footer(text=\"Twitch Notifications\")\n\n await bot.get_channel(twitch_announcement_id).send(embed=embed)\n else:\n isLive = False\n\n\n\ndef tictactoe_accept_check(reaction, user, players):\n return (\n user in players\n and str(reaction.emoji) == '✅'\n and reaction.message.author == bot.user\n )\n\n\n@bot.tree.command(description=\"Play a game of Tic Tac Toe towards an opponent\", name=\"tictactoe\")\n@app_commands.describe(opponent = \"Who do you want to play against?\")\nasync def tictactoe(interaction: discord.Interaction, opponent: discord.Member):\n author = interaction.user\n if author == opponent:\n await interaction.response.send_message(\"You can't play against yourself!\")\n return\n \n\n players = [author, opponent]\n game = TicTacToe(*players)\n accept_message = await interaction.channel.send(\n f\"{opponent.mention}, do you accept the challenge from {author.mention}? (React with ✅ to accept)\"\n )\n await accept_message.add_reaction('✅')\n\n try:\n reaction, user = await bot.wait_for(\n 'reaction_add', check=lambda r, u: tictactoe_accept_check(r, u, players), timeout=60\n )\n\n except asyncio.TimeoutError:\n await interaction.channel.send(\"Challenge not accepted in time.\")\n return\n await interaction.channel.send(\"Challenge accepted! The game board is 3x3. Enter the row and column numbers (1-3) separated by a space to make a move. Example: '1 3'\")\n while True:\n await interaction.channel.send(f\"Current board:\\n```\\n{game}\\n```{players[game.current_turn].mention}'s turn.\")\n\n try:\n move_message = await bot.wait_for(\n 'message', check=lambda m: m.author == players[game.current_turn], timeout=60\n )\n except asyncio.TimeoutError:\n await interaction.channel.send(\"Turn not played in time. The game has ended.\")\n break\n\n try:\n x, y = map(int, move_message.content.split())\n if x not in range(1, 4) or y not in range(1, 4):\n raise ValueError\n result, valid_move = game.make_move(x, y)\n if result:\n await interaction.channel.send(f\"Current board:\\n```\\n{game}\\n```{result}\")\n if valid_move:\n break\n except ValueError:\n await interaction.channel.send(\"Invalid input. Please enter the row and column separated by a space, like '1 3'.\")\n \n@bot.tree.command(description=\"Choose a message to turn into a reaction role!\", name = \"reactionrole\")\n@app_commands.checks.has_permissions(administrator=True)\n@app_commands.describe(message_id = \"The ID of the message to add the reaction role to\", emoji = \"The emoji to react with\", role = \"The role to give when the user reacts\")\nasync def setup_reaction_role(ctx, message_id: str, emoji: str, role: discord.Role):\n try:\n message_id = int(message_id)\n except ValueError:\n await ctx.send(\"Invalid message ID provided. Please make sure it's a valid integer.\")\n return\n\n data = load_reaction_roles_data()\n try:\n message = await ctx.channel.fetch_message(message_id)\n except discord.NotFound:\n await ctx.send(\"Message not found. Please make sure the message ID is correct and the bot can access the message.\")\n return\n \n await message.add_reaction(emoji)\n\n if str(ctx.guild.id) not in data:\n data[str(ctx.guild.id)] = []\n\n data[str(ctx.guild.id)].append({\n \"channel_id\": ctx.channel.id,\n \"message_id\": message_id,\n \"emoji\": emoji,\n \"role_id\": role.id\n })\n\n save_reaction_roles_data(data)\n\n@bot.event\nasync def on_raw_reaction_add(payload):\n if payload.member.bot:\n return\n\n data = load_reaction_roles_data()\n\n for item in data[str(payload.guild_id)]:\n if payload.message_id == item[\"message_id\"] and str(payload.emoji) == item[\"emoji\"]:\n role = discord.utils.get(payload.member.guild.roles, id=item[\"role_id\"])\n await payload.member.add_roles(role)\n break\n \n@bot.event\nasync def on_raw_reaction_remove(payload):\n guild = bot.get_guild(payload.guild_id)\n member = guild.get_member(payload.user_id)\n if member.bot:\n return\n\n data = load_reaction_roles_data()\n\n for item in data[str(payload.guild_id)]:\n if payload.message_id == item[\"message_id\"] and str(payload.emoji) == item[\"emoji\"]:\n role = discord.utils.get(guild.roles, id=item[\"role_id\"])\n await member.remove_roles(role)\n break\n\n\n@tasks.loop(seconds=60)\nasync def load_reaction_roles():\n data = load_reaction_roles_data()\n for guild_id, guild_data in data.items():\n for reaction_role in guild_data:\n channel_id = reaction_role.get(\"channel_id\")\n message_id = reaction_role.get(\"message_id\")\n emoji = reaction_role.get(\"emoji\")\n\n guild = bot.get_guild(int(guild_id))\n channel = bot.get_channel(int(channel_id))\n message = await channel.fetch_message(message_id)\n\n await message.add_reaction(emoji)\n\n@bot.event\nasync def on_member_join(member):\n role = discord.utils.get(member.guild.roles, name='Member')\n await member.add_roles(role)\n print(f'Role \"{role}\" assigned to {member}.')\n \n gif_url = await get_random_greeting_gif()\n\n embed = Embed()\n embed.set_image(url=gif_url)\n channel = bot.get_channel(welcome_id)\n \n await channel.send(f\"Welcome {member.name}!\", embed=embed) \n\nload_reaction_roles.before_loop(bot.wait_until_ready)\n\nTOKEN = env.TOKEN\n\nbot.run(TOKEN)\n","repo_name":"ZodiDev/discord-bot-temp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"21591720168","text":"from typing import DefaultDict\nfrom collections import defaultdict\n\ndef palindromePermutation(a:str) ->bool:\n a=a.lower()\n count: int =0\n b: DefaultDict(str,int) = defaultdict(int)\n for i in a:\n if i == \" \":\n continue\n b[i]+=1\n for i in b.values():\n if i%2 ==0:\n continue\n else:\n count+=1\n if count>1:\n return False\n else:\n return True\n print(b)\n\nc = palindromePermutation(\"Tact Coa\")\nprint(c)\nc = palindromePermutation(\"itnin\")\nprint(c)","repo_name":"JeetPatel301095/CTCI","sub_path":"Arrays and Strings/1.4.py","file_name":"1.4.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"9642831719","text":"ACL_HEADER_KEY = 'acl_header'\nAUTH_HEADER_KEY = 'auth_header'\nAWS_HEADER_PREFIX = 'x-amz-'\nCOPY_SOURCE_HEADER_KEY = 'copy_source_header'\nDATE_HEADER_KEY = 'date_header'\nGOOG_HEADER_PREFIX = 'x-goog-'\nHEADER_PREFIX_KEY = 'header_prefix'\nMETADATA_DIRECTIVE_HEADER_KEY = 'metadata_directive_header'\nMETADATA_PREFIX_KEY = 'metadata_prefix'\nSECURITY_TOKEN_KEY = 'security-token'\n\nclass ProviderHeaders:\n\n ProviderHeaderInfoMap = {\n 'aws' : {\n HEADER_PREFIX_KEY : AWS_HEADER_PREFIX,\n METADATA_PREFIX_KEY : AWS_HEADER_PREFIX + 'meta-',\n DATE_HEADER_KEY : AWS_HEADER_PREFIX + 'date',\n ACL_HEADER_KEY : AWS_HEADER_PREFIX + 'acl',\n AUTH_HEADER_KEY : 'AWS',\n COPY_SOURCE_HEADER_KEY : AWS_HEADER_PREFIX + 'copy-source',\n METADATA_DIRECTIVE_HEADER_KEY : AWS_HEADER_PREFIX +\n 'metadata-directive',\n SECURITY_TOKEN_KEY : AWS_HEADER_PREFIX + 'security-token'\n },\n 'google' : {\n HEADER_PREFIX_KEY : GOOG_HEADER_PREFIX,\n METADATA_PREFIX_KEY : GOOG_HEADER_PREFIX + 'meta-',\n DATE_HEADER_KEY : GOOG_HEADER_PREFIX + 'date',\n ACL_HEADER_KEY : GOOG_HEADER_PREFIX + 'acl',\n AUTH_HEADER_KEY : 'GOOG1',\n COPY_SOURCE_HEADER_KEY : GOOG_HEADER_PREFIX + 'copy-source',\n METADATA_DIRECTIVE_HEADER_KEY : GOOG_HEADER_PREFIX +\n 'metadata-directive',\n SECURITY_TOKEN_KEY : GOOG_HEADER_PREFIX + 'security-token'\n }\n }\n\n def __init__(self, provider):\n self.provider = provider\n header_info_map = self.ProviderHeaderInfoMap[self.provider]\n self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY]\n self.header_prefix = header_info_map[HEADER_PREFIX_KEY]\n self.date_header = header_info_map[DATE_HEADER_KEY]\n self.acl_header = header_info_map[ACL_HEADER_KEY]\n self.auth_header = header_info_map[AUTH_HEADER_KEY]\n self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY]\n self.metadata_directive_header = (\n header_info_map[METADATA_DIRECTIVE_HEADER_KEY])\n self.security_token = header_info_map[SECURITY_TOKEN_KEY]\n\n# Static utility method for getting default ProviderHeaders.\ndef get_default():\n return ProviderHeaders('aws')\n","repo_name":"murphy2712/gsutil","sub_path":"src/boto/provider_headers.py","file_name":"provider_headers.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"42535844963","text":"from rest_framework import viewsets, status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom .models import User, Doctor, Appointment\nfrom .serializers import UserSerializer, DoctorSerializer, AppointmentSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n authentication_classes = (TokenAuthentication,)\n permission_classes = (AllowAny,)\n\n\nclass AppointmentViewSet(viewsets.ModelViewSet):\n queryset = Appointment.objects.all()\n serializer_class = AppointmentSerializer\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n def create(self, request, *args, **kwargs):\n user = User.objects.get(username=request.user)\n doctor = Doctor.objects.all()\n doctor = doctor.order_by('has_appointments')[0]\n\n def create(self, request, *args, **kwargs):\n user = User.objects.get(username=request.user)\n doctor = Doctor.objects.all()\n doctor = doctor.order_by('has_appointments')[0]\n appointment = Appointment.objects.create(patient=user, doctor=doctor)\n\n appointment.doctor = doctor\n appointment.save()\n serializer = AppointmentSerializer(appointment, many=False)\n response = {'Appointments': serializer.data}\n return Response(response, status=status.HTTP_200_OK)\n\n def retrieve(self, request, *args, **kwargs):\n user = request.user\n user = User.objects.get(patient=user)\n doctor = Doctor.objects.get(user=user)\n if user.is_doctor:\n appointments = Appointment.objects.filter(doctor=doctor)\n serializer = AppointmentSerializer(appointments, many=True)\n response = {'doctor': serializer.data}\n return Response(response, status=status.HTTP_200_OK)\n else:\n appointments = Appointment.objects.filter(user=user)\n serializer = AppointmentSerializer(appointments, many=True)\n response = {'doctor': serializer.data}\n return Response(response, status=status.HTTP_200_OK)\n\nclass DoctorViewSet(viewsets.ModelViewSet):\n queryset = Doctor.objects.all()\n serializer_class = DoctorSerializer\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n def create(self, request, *args, **kwargs):\n print(request.data)\n user = User.objects.get(username=request.user)\n if user.is_doctor:\n doc_mobile = request.data['doc_mobile']\n doc_spec = request.data['doc_spec']\n doctor = Doctor.objects.create(user=user, doc_mobile=doc_mobile, doc_spec=doc_spec)\n print(doctor)\n\n serializer = DoctorSerializer(doctor, many=False)\n response = {'doctor': serializer.data}\n return Response(response, status=status.HTTP_200_OK)\n else:\n response = {'Message': \"you are not assigned as doctor\"}\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"Sesuraj-git/healthApp","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"74029024579","text":"def logger(\n messaging,\n level,\n filename=None,\n filemode=None,\n encoding=None,\n formating=None,\n handlers=None,\n):\n # import logging resources\n from logging import (\n CRITICAL,\n DEBUG,\n ERROR,\n INFO,\n WARNING,\n basicConfig,\n critical,\n debug,\n error,\n info,\n warning,\n )\n\n # define level\n level = level.upper()\n\n # define basic config\n basicConfig(\n level=level,\n filename=filename,\n filemode=filemode,\n encoding=encoding,\n format=formating,\n )\n\n # run logging command\n if level == 'DEBUG':\n debug(messaging)\n elif level == 'INFO':\n info(messaging)\n elif level == 'WARNING':\n warning(messaging)\n elif level == 'ERROR':\n error(messaging)\n elif level == 'CRITICAL':\n critical(messaging)\n else:\n return 'Level parameter invalid. Please, Inform this correctly.'\n\n return (messaging, level)\n\n\ndef criar_pasta(caminho):\n from pathlib import Path\n\n caminho_interno = Path(caminho)\n caminho_interno.mkdir(parents=True)\n return True\n\n\ndef excluir_pasta(caminho, vazia: bool = True):\n if vazia == True:\n from pathlib import Path\n\n caminho_interno = Path(caminho)\n if caminho_interno.exists():\n caminho_interno.rmdir()\n elif vazia == False:\n from shutil import rmtree\n\n rmtree(caminho)\n return True\n\n\ndef excluir_arquivo(caminho):\n from pathlib import Path\n\n arquivo = Path(caminho)\n if arquivo.exists():\n arquivo.unlink()\n\n return True\n\n\ndef pasta_existente(caminho):\n from pathlib import Path\n\n return Path(caminho).exists()\n\n\ndef pasta_esta_vazia(caminho):\n from pathlib import Path\n\n lista_arquivos_pastas = list(Path(caminho).glob('**/*'))\n if len(lista_arquivos_pastas) == 0:\n return True\n else:\n return False\n\n\ndef arquivo_existente(caminho):\n from pathlib import Path\n\n return Path(caminho).exists()\n\n\ndef abrir_arquivo_texto(caminho, encoding='utf8'):\n from pathlib import Path\n\n arquivo = Path(caminho).read_text(encoding=encoding)\n return arquivo\n\n\ndef abrir_arquivo_em_bytes(caminho):\n from pathlib import Path\n\n arquivo = Path(caminho).read_bytes()\n return arquivo\n\n\ndef criar_arquivo_texto(caminho, data='', encoding='utf8'):\n from pathlib import Path\n\n arquivo = Path(caminho).write_text(encoding=encoding, data=data)\n return True\n\n\ndef coletar_nome_arquivo(caminho):\n from pathlib import Path\n\n if Path(caminho).exists() == True:\n arquivo = Path(caminho).stem\n return arquivo\n\n\ndef coletar_extensao_arquivo(caminho):\n from pathlib import Path\n\n if Path(caminho).exists() == True:\n arquivo = Path(caminho).suffix\n return arquivo\n\n\ndef retornar_arquivos_em_pasta(caminho, filtro='**/*'):\n from pathlib import Path\n\n arquivo = list(Path(caminho).glob(filtro))\n return arquivo\n\n\ndef renomear(caminho, nome_atual, novo_nome):\n from pathlib import Path\n\n nome_atual = Path(caminho) / nome_atual\n novo_nome = Path(caminho) / novo_nome\n if not novo_nome.exists() == True:\n novo_nome = nome_atual.rename(novo_nome)\n else:\n return False\n return novo_nome\n\n\ndef recortar(caminho_atual, caminho_novo):\n from pathlib import Path\n\n caminho_atual = Path(caminho_atual)\n caminho_novo = Path(caminho_novo)\n if not caminho_novo.exists() == True:\n caminho_novo = caminho_atual.rename(caminho_novo)\n else:\n return False\n return caminho_novo\n\n\ndef copiar_arquivo(arquivo, caminho_destino):\n from pathlib import Path\n\n arquivo = Path(arquivo)\n if arquivo.exists() == True:\n arquivo = arquivo.absolute()\n else:\n return False\n caminho_destino = Path(caminho_destino)\n if caminho_destino.exists() == True:\n from shutil import copy2\n\n caminho_destino = copy2(arquivo, caminho_destino)\n # caminho_destino = copytree(arquivo, caminho_destino) # para pastas\n else:\n return False\n return caminho_destino\n\n\ndef copiar_pasta(pasta, caminho_destino):\n from pathlib import Path\n\n pasta_var_interna = Path(pasta)\n if pasta_var_interna.exists() == True:\n caminho_destino_var_interna = Path(caminho_destino)\n if caminho_destino_var_interna.exists() == True:\n from shutil import copytree\n\n caminho_destino = copytree(\n pasta, caminho_destino_var_interna / pasta\n )\n else:\n return False\n else:\n return False\n\n return caminho_destino\n\n\ndef ler_variavel_ambiente(\n arquivo_config='config.ini',\n nome_bloco_config='padrao',\n nome_variavel=None,\n variavel_systema: bool = False,\n):\n import os\n from configparser import ConfigParser\n\n if variavel_systema == False:\n config = ConfigParser()\n config.read(arquivo_config)\n if not nome_variavel == None:\n bloco = dict(config[nome_bloco_config])\n return bloco[nome_variavel]\n else:\n return dict(config[nome_bloco_config])\n else:\n return os.environ.get(nome_variavel)\n\n\ndef formatar_log(*args, delimitador=';'):\n lista = list(args)\n lista_montada = ''\n lista.reverse()\n while len(lista) > 0:\n item = lista.pop()\n if len(lista) == 0:\n lista_montada += item\n else:\n lista_montada += item + delimitador\n return '%(levelname)s;' + lista_montada\n\n\ndef retornar_data_hora_atual(parametro):\n import datetime\n\n return datetime.datetime.now().strftime(parametro)\n","repo_name":"aranseiki/desktop_automation_with_test","sub_path":"lib/python_utils.py","file_name":"python_utils.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"40021184639","text":"kids, q = list(map(int, input().split(' ')))\nget_down = list()\nfor _ in range(q):\n s = input().split(' ')\n if s[0] == 'M':\n get_down.append(list(map(int, s[1:])))\n elif s[0] == 'D':\n station, min_age = list(map(int, s[1:]))\n temp = [kid[1] for kid in get_down if kid[0] <= station and kid[1] >= min_age]\n print(min(temp) if len(temp) != 0 else -1)","repo_name":"AdamZhouSE/pythonHomework","sub_path":"Code/CodeRecords/2710/58610/277767.py","file_name":"277767.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"73842880258","text":"import getopt\nimport json\nimport sys\n\n\ndef die_with_usage(err=\"Usage: \", code=0):\n print(\"\"\"ERROR: %s\n%s: where available are:\n -h/--help : print this message\n -i/--indent : set pretty-print indent (def. 2)\n -l/--line : pretty-print individual lines\n\nPretty print JSON specified, or stdin if none.\n \"\"\" % (err, sys.argv[0]))\n sys.exit(code)\n\n\nif __name__ == '__main__':\n\n ## option parsing\n pairs = [\"h/help\", \"l/line\",\n \"i:/indent=\", ]\n shortopts = \"\".join([pair.split(\"/\")[0] for pair in pairs])\n longopts = [pair.split(\"/\")[1] for pair in pairs]\n try:\n opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts)\n except getopt.GetoptError as err:\n die_with_usage(err, 2)\n\n indent = 2\n per_line = False\n try:\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n die_with_usage()\n elif o in (\"-i\", \"--indent\"):\n indent = int(a)\n elif o in (\"-l\", \"--line\"):\n per_line = True\n else:\n raise Exception(\"unhandled option\")\n except Exception as err:\n die_with_usage(err, 3)\n\n if len(args) == 0:\n args = [sys.stdin]\n else:\n args = map(lambda f: open(f), args)\n\n ## pretty print\n for a in args:\n if per_line:\n for line in a:\n print(json.dumps(json.loads(line), indent=indent))\n else:\n print(json.dumps(json.loads(a.read()), indent=indent))\n","repo_name":"fundor333/StuffWithScript","sub_path":"Python/python-scripts/jsonpretty.py","file_name":"jsonpretty.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"27802887308","text":"import json\nimport re\nimport shutil\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom itertools import cycle\nfrom pathlib import Path\nfrom time import time\nfrom typing import Literal, Optional, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport tyro\nfrom nerfstudio.process_data.colmap_utils import CameraModel, read_images_binary, run_colmap\nfrom nerfstudio.process_data.hloc_utils import run_hloc\n\nfrom nerfuser.utils.utils import avg_trans, complete_trans, compute_trans_diff, decompose_sim3, extract_colmap_pose, gen_hemispheric_poses\nfrom nerfuser.utils.visualizer import Visualizer\nfrom nerfuser.view_renderer import ViewRenderer\n\n\n@dataclass\nclass Registration:\n \"\"\"Register multiple NeRF models to a common coordinate system.\"\"\"\n\n model_dirs: list[Path]\n \"\"\"model checkpoint directories\"\"\"\n output_dir: Path = Path('outputs/registration')\n \"\"\"output directory\"\"\"\n name: Optional[str] = None\n \"\"\"if present, will continue with the existing named experiment\"\"\"\n model_method: Literal['nerfacto'] = 'nerfacto'\n \"\"\"model method\"\"\"\n model_names: Optional[list[str]] = None\n \"\"\"names of models to register\"\"\"\n model_gt_trans: Optional[Path] = None\n \"\"\"path to npy containing ground-truth transforms from the common world coordinate system to each model's local one; can be \"identity\" \"\"\"\n step: Optional[int] = None\n \"\"\"model step to load\"\"\"\n cam_info: Union[Path, list[float]] = field(default_factory=lambda: [400.0, 400.0, 400.0, 300.0, 800, 600])\n \"\"\"either path to json or cam params (fx fy cx cy w h)\"\"\"\n downscale_factor: Optional[float] = None\n \"\"\"downscale factor for NeRF rendering\"\"\"\n training_poses: Optional[list[Path]] = None\n \"\"\"paths to json containing training poses; if present, will be used to render training views and to determine the number of hemispheric poses\"\"\"\n n_hemi_poses: int = 30\n \"\"\"number of hemispheric poses; only applicable when training-poses is not present\"\"\"\n hemi_gamma_lo: float = 0\n \"\"\"lower bound of elevation angle for hemispheric pose sampling\"\"\"\n hemi_gamma_hi: float = np.pi / 6\n \"\"\"upper bound of elevation angle for hemispheric pose sampling\"\"\"\n render_hemi_views: bool = False\n \"\"\"use 1.3x hemispheric poses for rendering\"\"\"\n chunk_size: Optional[int] = None\n \"\"\"number of rays to process at a time\"\"\"\n filter_poses_acc_dist: Optional[float] = None\n \"\"\"starting distance of accumulation for filtering pose samples\"\"\"\n filter_poses_acc_th: Optional[float] = None\n \"\"\"threshold of distant accumulation for filtering poses samples; only applicable when filter-poses-acc-dist is present\"\"\"\n save_extras: bool = False\n \"\"\"whether to save extra outputs (distant accumulation maps)\"\"\"\n fps: Optional[int] = None\n \"\"\"if present, will use this frame rate for video output\"\"\"\n sfm_tool: Literal['hloc', 'colmap'] = 'hloc'\n \"\"\"SfM tool to use\"\"\"\n sfm_w_training_views: bool = True\n \"\"\"when render-hemi-views, set this to False to only use hemispheric views for SfM\"\"\"\n sfm_w_hemi_views: float = 1\n \"\"\"ratio of #hemi-views vs. #training-views or n-hemi-poses for SfM, within range [0, 1.3]\"\"\"\n device: str = 'cuda:0'\n \"\"\"device to use\"\"\"\n render_views: bool = False\n \"\"\"whether to render views\"\"\"\n run_sfm: bool = False\n \"\"\"whether to run SfM\"\"\"\n compute_trans: bool = False\n \"\"\"whether to compute transforms\"\"\"\n vis: bool = False\n \"\"\"whether to visualize the registration\"\"\"\n profiling: bool = False\n \"\"\"whether to enable profiling\"\"\"\n\n def main(self):\n if self.profiling:\n ts = [time()]\n profiling_dict = {}\n\n if not self.name:\n self.name = datetime.now().strftime('%m.%d_%H:%M:%S')\n output_dir = self.output_dir / self.name\n output_dir.mkdir(parents=True, exist_ok=True)\n n_models = len(self.model_dirs)\n if not self.model_names:\n self.model_names = [f'nerf{i}' for i in range(n_models)]\n if self.render_hemi_views:\n cfg = f'hemi{self.sfm_w_hemi_views:.2f}'\n if self.training_poses:\n cfg += f'_train{int(self.sfm_w_training_views)}'\n else:\n cfg = 'train' if self.training_poses else 'hemi'\n if self.filter_poses_acc_dist is not None:\n cfg += f'_acc-dist{self.filter_poses_acc_dist:.2f}'\n if self.filter_poses_acc_th is not None:\n cfg += f'_acc-th{self.filter_poses_acc_th:.4f}'\n sfm_dir = output_dir / f'{self.sfm_tool}~{cfg}'\n log_dict = {}\n for attr in ('model_dirs', 'model_method', 'model_gt_trans', 'step', 'cam_info', 'downscale_factor', 'training_poses', 'n_hemi_poses', 'hemi_gamma_lo', 'hemi_gamma_hi', 'filter_poses_acc_dist', 'filter_poses_acc_th', 'sfm_tool'):\n val = getattr(self, attr)\n if attr in {'model_dirs', 'training_poses'}:\n if val:\n val = dict(zip(self.model_names, [str(item) for item in val]))\n elif isinstance(val, Path):\n val = str(val)\n log_dict[attr] = val\n with (output_dir / f'{cfg}.json').open(mode='w') as f:\n json.dump(log_dict, f, indent=2)\n\n # nerf-to-nerf_norm transforms\n Ts_nerf_norm = []\n Ss_norm_nerf = []\n for model_dir in self.model_dirs:\n with (model_dir.parent / 'dataparser_transforms.json').open() as f:\n transforms = json.load(f)\n s = transforms['scale']\n S_nerf_norm = np.diag((s, s, s, 1)).astype(np.float32)\n Ss_norm_nerf.append(np.linalg.inv(S_nerf_norm))\n Ts_nerf_norm.append(S_nerf_norm @ complete_trans(np.array(transforms['transform'], dtype=np.float32)))\n Ts_nerf_norm = np.array(Ts_nerf_norm)\n Ss_norm_nerf = np.array(Ss_norm_nerf)\n if self.model_gt_trans:\n # gt world-to-nerf transforms\n Ts_gt_world_nerf = np.broadcast_to(np.identity(4, dtype=np.float32), (n_models, 4, 4)) if str(self.model_gt_trans).lower() in {'i', 'identity'} else np.load(self.model_gt_trans)\n # gt world-to-nerf_norm transforms\n Ts_gt_world_norm = Ts_nerf_norm @ Ts_gt_world_nerf\n Ts_gt_norm_world = np.linalg.inv(Ts_gt_world_norm)\n _, s = decompose_sim3(Ts_gt_world_norm)\n Ss_gt_world_norm = np.zeros((n_models, 4, 4))\n for i in range(3):\n Ss_gt_world_norm[:, i, i] = s\n Ss_gt_world_norm[:, 3, 3] = 1\n if self.training_poses:\n frames = []\n ls = []\n for training_pose in self.training_poses:\n with training_pose.open() as f:\n transforms = json.load(f)\n frames.append(transforms['frames'])\n ls.append(len(frames[-1]))\n ls = np.array(ls)\n ks = np.floor(ls * 1.3).astype(int)\n else:\n ls = np.full(n_models, self.n_hemi_poses)\n ks = np.floor(ls * 1.3).astype(int) if self.render_hemi_views else ls\n hemi_gamma_delta = self.hemi_gamma_hi - self.hemi_gamma_lo\n m = (1 + np.sqrt(1 + 2 * hemi_gamma_delta * ks / np.pi)) / 2\n ms = np.ceil(m).astype(int)\n ns = np.ceil(2 * np.pi * (m - 1) / hemi_gamma_delta).astype(int)\n rng = np.random.default_rng(0)\n\n if self.render_views:\n if self.profiling:\n ts.append(time())\n if isinstance(self.cam_info, Path):\n with self.cam_info.open() as f:\n transforms = json.load(f)\n cam_info = {'fx': transforms['fl_x'], 'fy': transforms['fl_y'], 'cx': transforms['cx'], 'cy': transforms['cy'], 'width': transforms['w'], 'height': transforms['h'], 'distortion_params': np.array((transforms['k1'], transforms['k2'], 0, 0, transforms['p1'], transforms['p2']), dtype=np.float32)}\n else:\n cam_info = dict(zip(('fx', 'fy', 'cx', 'cy', 'width', 'height'), self.cam_info))\n if self.downscale_factor:\n for pname in cam_info:\n if pname != 'distortion_params':\n cam_info[pname] /= self.downscale_factor\n cam_info['height'] = int(cam_info['height'])\n cam_info['width'] = int(cam_info['width'])\n for i in range(n_models):\n poses_norm = complete_trans(np.array(gen_hemispheric_poses(1, self.hemi_gamma_lo, gamma_hi=self.hemi_gamma_hi, m=ms[i], n=ns[i])))[np.sort(rng.permutation(ms[i] * ns[i])[:ks[i]])] if not self.training_poses or self.render_hemi_views else np.empty((0, 4, 4), dtype=np.float32)\n if self.training_poses:\n pose_dict = {frame['file_path']: np.array(frame['transform_matrix'], dtype=np.float32) for frame in frames[i]}\n poses_norm = np.concatenate((poses_norm, Ts_nerf_norm[i] @ np.array([pose_dict[k] for k in sorted(pose_dict.keys())]) @ Ss_norm_nerf[i]))\n with torch.no_grad():\n ViewRenderer(self.model_method, self.model_names[i], self.model_dirs[i], load_step=self.step, chunk_size=self.chunk_size, device=self.device).render_views(poses_norm, cam_info, output_dir, filter_poses_acc_dist=self.filter_poses_acc_dist, save_extras=self.save_extras, animate=self.fps)\n np.save(output_dir / f'poses~{self.model_names[i]}_norm.npy', poses_norm)\n if self.profiling:\n elapsed_time = time() - ts.pop()\n print(f'Rendering views takes {elapsed_time:.3g}s.')\n profiling_dict['rendering_views'] = elapsed_time\n\n if self.run_sfm:\n if self.profiling:\n ts.append(time())\n shutil.rmtree(sfm_dir, ignore_errors=True)\n sfm_dir.mkdir(parents=True)\n # uses hemi views, maybe training views\n c1 = not self.training_poses or self.render_hemi_views\n # uses hemi + training views\n c2 = self.training_poses and self.render_hemi_views and self.sfm_w_training_views\n # uses training views only\n c3 = self.training_poses and not self.render_hemi_views\n for i, model_name in enumerate(self.model_names):\n if self.filter_poses_acc_dist is not None:\n dist_accs = np.load(output_dir / model_name / f'dist_accs_{self.filter_poses_acc_dist:.2f}.npy')\n if c1:\n ids = set(rng.permutation(ks[i])[:np.floor(ls[i] * self.sfm_w_hemi_views).astype(int)])\n for f in (output_dir / model_name / 'imgs').iterdir():\n id = int(f.stem)\n if c1 and id in ids and (self.filter_poses_acc_dist is None or dist_accs[id] >= self.filter_poses_acc_th) or c2 and id >= ks[i] or c3:\n (sfm_dir / f'{model_name}_{f.name}').symlink_to(f.absolute())\n run_func = run_hloc if self.sfm_tool == 'hloc' else run_colmap\n run_func(sfm_dir, sfm_dir, CameraModel.OPENCV)\n if self.profiling:\n elapsed_time = time() - ts.pop()\n print(f'Running SfM takes {elapsed_time:.3g}s.')\n profiling_dict['running_sfm'] = elapsed_time\n\n if self.compute_trans or self.vis:\n images = read_images_binary(sfm_dir / 'sparse/0/images.bin')\n poses_sfm = defaultdict(list)\n for im_data in images.values():\n fname = im_data.name\n r = re.fullmatch(r'(.+)_(\\d+).png', fname)\n model_name = r[1]\n id = int(r[2])\n poses_sfm[model_name].append((id, extract_colmap_pose(im_data)))\n\n if self.compute_trans:\n if self.profiling:\n ts.append(time())\n # sfm-to-nerf_norm transforms\n Ts_sfm_norm = {}\n for model_name in self.model_names:\n n = len(poses_sfm[model_name])\n print(f'Got {n} poses for {model_name} from SfM.')\n T_sfm_norm_path = output_dir / f'T~{cfg}~{model_name}_norm.npy'\n if n < 2:\n T_sfm_norm_path.unlink(missing_ok=True)\n continue\n poses_norm = np.load(output_dir / f'poses~{model_name}_norm.npy')\n s_lst = []\n for i in range(n - 1):\n for j in range(i + 1, n):\n tAi_norm = poses_norm[poses_sfm[model_name][i][0]][:3, 3]\n tAj_norm = poses_norm[poses_sfm[model_name][j][0]][:3, 3]\n tAi_sfm = poses_sfm[model_name][i][1][:3, 3]\n tAj_sfm = poses_sfm[model_name][j][1][:3, 3]\n s_lst.append(np.linalg.norm(tAi_norm - tAj_norm) / np.linalg.norm(tAi_sfm - tAj_sfm))\n s = np.median(s_lst)\n T = avg_trans([poses_norm[id] @ np.diag((s, s, s, 1)).astype(np.float32) @ np.linalg.inv(pose_sfm) for id, pose_sfm in poses_sfm[model_name]], s=s, avg_func=np.median)\n Ts_sfm_norm[model_name] = T\n np.save(T_sfm_norm_path, T)\n if not Ts_sfm_norm:\n print(f'failed to recover any transform')\n exit()\n Ts_norm_sfm = {model_name: np.linalg.inv(T_sfm_norm) for model_name, T_sfm_norm in Ts_sfm_norm.items()}\n if self.model_gt_trans:\n # mean world-to-sfm transform\n T_world_sfm = avg_trans([Ts_norm_sfm[model_name] @ Ts_gt_world_norm[i] for i, model_name in enumerate(Ts_sfm_norm)])\n T_sfm_world = np.linalg.inv(T_world_sfm)\n np.save(output_dir / f'T~{cfg}.npy', T_world_sfm)\n else:\n T_sfm_world = np.identity(4, dtype=np.float32)\n for i, model_name in enumerate(self.model_names):\n if model_name not in Ts_sfm_norm:\n print(f'failed to recover {model_name}_norm-to-world transform')\n continue\n T_pred = T_sfm_world @ Ts_norm_sfm[model_name]\n print(f'\\n{model_name}_norm-to-world:')\n print('pred transform\\n', T_pred)\n if self.model_gt_trans:\n T_gt = Ts_gt_norm_world[i]\n print('gt transform\\n', T_gt)\n r, t, s = compute_trans_diff(T_pred, T_gt)\n print(f'rotation error {r:.3g}')\n print(f'translation error {t:.3g}')\n print(f'scale error {s:.3g}')\n np.savez(output_dir / f'eval~{cfg}~{model_name}_norm.npz', r=r, t=t, s=s, n_sfm_input_imgs=len([p for p in sfm_dir.iterdir() if re.fullmatch(fr'{model_name}_\\d+.png', p.name)]), n_sfm_recovered_poses=sum(len(poses_sfm[model_name]) for model_name in self.model_names))\n if self.profiling:\n elapsed_time = time() - ts.pop()\n print(f'Computing transforms takes {elapsed_time:.3g}s.')\n profiling_dict['computing_trans'] = elapsed_time\n\n if self.profiling:\n elapsed_time = time() - ts.pop()\n print(f'In total it takes {elapsed_time:.3g}s.')\n profiling_dict['total'] = elapsed_time\n profiling_dict_ = np.load(output_dir / f'profiling~{cfg}.npz') if (output_dir / f'profiling~{cfg}.npz').exists() else {}\n for k in profiling_dict_:\n if k not in profiling_dict:\n profiling_dict[k] = profiling_dict_[k]\n np.savez(output_dir / f'profiling~{cfg}.npz', **profiling_dict)\n\n if self.vis:\n T_world_sfm = np.load(output_dir / f'T~{cfg}.npy') if self.model_gt_trans else np.identity(4, dtype=np.float32)\n T_sfm_world = np.linalg.inv(T_world_sfm)\n _, s = decompose_sim3(T_world_sfm)\n S_world_sfm = np.diag((s, s, s, 1)).astype(np.float32)\n Ts_norm_sfm = {}\n for model_name in self.model_names:\n T_sfm_norm_path = output_dir / f'T~{cfg}~{model_name}_norm.npy'\n if T_sfm_norm_path.exists():\n Ts_norm_sfm[model_name] = np.linalg.inv(np.load(T_sfm_norm_path))\n colors = cycle(plt.cm.tab20.colors if self.model_gt_trans else plt.cm.tab10.colors)\n vis = Visualizer(show_frame=True)\n vis.add_trajectory([T_sfm_world @ Ts_norm_sfm[model_name] for model_name in Ts_norm_sfm], pose_spec=0, cam_size=0.3, color=next(colors))\n color = next(colors)\n if self.model_gt_trans:\n vis.add_trajectory(Ts_gt_norm_world, pose_spec=0, cam_size=0.28, color=color)\n for i, model_name in enumerate(self.model_names):\n color = next(colors)\n if len(poses_sfm[model_name]):\n vis.add_trajectory(T_sfm_world @ np.array([pose_sfm[1] for pose_sfm in poses_sfm[model_name]]) @ S_world_sfm, cam_size=0.3, color=color)\n color = next(colors)\n if self.model_gt_trans:\n poses_norm = np.load(output_dir / f'poses~{model_name}_norm.npy')\n vis.add_trajectory(Ts_gt_norm_world[i] @ poses_norm @ Ss_gt_world_norm[i], cam_size=0.28, color=color)\n vis.show()\n\n\nif __name__ == '__main__':\n tyro.cli(Registration).main()\n","repo_name":"ripl/nerfuser","sub_path":"nerfuser/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":17370,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"80"} +{"seq_id":"15075682236","text":"from django.shortcuts import render, HttpResponse\nfrom django.http import HttpResponse\nfrom App.models import Familiar\nfrom django.template import Template, context, loader\n\n# Create your views here.\n\ndef familiar(request):\n\n familiar1 = Familiar(nombre=\"Martin Alejandro Gelaf\", parentesco=\"Padre\", dni=\"20430118\", fechaDeNacimiento=\"1969-07-26\")\n familiar1.save()\n\n familiar2 = Familiar(nombre=\"Maria Ines Echague\", parentesco=\"Madre\", dni=\"14386695\", fechaDeNacimiento=\"1963-08-16\")\n familiar2.save()\n\n familiar3 = Familiar(nombre=\"Florencia Cecilia Gelaf\", parentesco=\"Hermana\", dni=\"34997842\", fechaDeNacimiento=\"1989-12-07\")\n familiar3.save()\n\n plantilla = loader.get_template(\"template.html\")\n\n familiares = [familiar1,familiar2,familiar3]\n\n a_mostrar = plantilla.render({\"familiares\":familiares})\n\n return HttpResponse(a_mostrar)\n","repo_name":"MatGelaf/NuestroPrimerMVT","sub_path":"NuestroPrimerMVT/App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"23129934800","text":"import threading\nimport subprocess\nimport os\n\ndef runTouch():\n subprocess.call(\"python3 \" + os.path.dirname(os.path.realpath(__file__)) + \"/touch.py\", shell=True)\n\ndef runPage():\n subprocess.call(\"chromium-browser --allow-insecure-localhost --start-fullscreen \\\"\" + os.path.dirname(os.path.realpath(__file__)) + \"/index.html\\\"\", shell=True)\n\ntouchThread = threading.Thread(target=runTouch)\ntouchThread.start()\npageThread = threading.Thread(target=runPage)\npageThread.start()\n\ntouchThread.join()\npageThread.join()\n","repo_name":"bram0101/Team-Deurklink-Smart-Mirror","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"40157255506","text":"from django.contrib import admin\nfrom django.conf.urls.defaults import patterns, url\nfrom models import Location, LocationType\nfrom djangocms_store_locator.views import get_lat_long, get_locations\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass LocationAdmin(admin.ModelAdmin):\n list_display = ('name', 'address', 'phone', 'url', 'has_image', 'active')\n list_filter = ('active', 'location_types')\n search_fields = ('name', 'address', 'description')\n \n fieldsets = (\n (None, {\n 'fields': (('name', 'active'),'location_types',)\n }),\n (_('Address'), {\n 'fields': ('address', ('latitude', 'longitude'))\n }),\n (_('Other information'), {\n 'fields': ('phone', 'url', 'description', 'image')\n }),\n )\n class Media:\n js = (\"djangocms_store_locator/js/store_locator_admin.js\",)\n\n def get_urls(self):\n old_urls = super(LocationAdmin, self).get_urls()\n new_urls = patterns('',\n url(r'^get_lat_long/$', get_lat_long, name='get_lat_long_url'),\n url(r'^get_locations/$', get_locations, name='get_locations_url'),\n )\n return new_urls + old_urls\n\nadmin.site.register(Location, LocationAdmin)\nadmin.site.register(LocationType)\n\n","repo_name":"alesdotio/djangocms-store-locator","sub_path":"djangocms_store_locator/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"80"} +{"seq_id":"37711717420","text":"import time\nimport logging\nimport datetime\nimport glob\nfrom google.cloud import pubsub\n\nTOPIC = 'instashop'\n\ndef publish(topic, events):\n numobs = len(events)\n if numobs > 0:\n for event_data in events:\n publisher.publish(topic, event_data)\n\n\ndef simulate(topic):\n \n while True:\n topublish = list()\n files = glob.glob(\"./orders/*.json\")\n\n for file_name in files:\n file = open(file_name, \"r\")\n topublish.append(file.read())\n print(\"published transations count - \" + str(len(topublish)) + \" , Current Timestamp - \" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))\n publish(topic, topublish)\n time.sleep(1) \n\nif __name__ == '__main__':\n \n # create Pub/Sub notification topic\n logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n publisher = pubsub.PublisherClient()\n event_type = publisher.topic_path('namita-186919',TOPIC)\n try:\n publisher.get_topic(event_type) \n logging.info('Reusing pub/sub topic {}'.format(TOPIC))\n except:\n publisher.create_topic(event_type)\n logging.info('Reusing pub/sub topic {}'.format(TOPIC))\n \n simulate(event_type)\n","repo_name":"emolamol/dsw","sub_path":"dataflow-instashop/src/python/send_transaction_data.py","file_name":"send_transaction_data.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"28999966930","text":"#import numpy as np\n#import pandas as pd\n#import matplotlib.pyplot as plt\n\nfrom glob import glob\nimport os\n\nfrom subprocess import Popen, PIPE, STDOUT\n\nallfiles = glob(\"data/*/*.enc\")\n\nfor filepath in allfiles:\n _, keys, filename = filepath.split(\"/\")\n name, _ = filename.split(\".\")\n file = \"data/%s/%s.csv\"%(keys,name)\n #print(keys, name, os.path.isfile(file))\n if not os.path.isfile(file):\n script = \"target/debug/decode_data %s %s\"%(keys, name)\n print(\"**********\\n\", script)\n #!$script\n with Popen(script, shell=True, stdout=PIPE, stderr=STDOUT, \n bufsize=1, universal_newlines=True) as p:\n for line in p.stdout:\n print(line, end='') # process line here\n ","repo_name":"rickardbrannvall/teewondee","sub_path":"ewma_GVI/run_decoding.py","file_name":"run_decoding.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"74368310673","text":"from bisect import bisect_left, bisect\nfrom typing import List\n\n\nclass Solution:\n def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:\n res = []\n nums1.sort()\n nums2.sort()\n for n1 in nums1:\n mys = bisect_left(nums2, n1)\n if mys < len(nums2) and nums2[mys] == n1:\n res.append(n1)\n\n return list(set(res))\n\n\nsol = Solution()\nprint(sol.intersection([4, 9, 5], [9, 4, 9, 8, 4]))\n","repo_name":"JSoi/Algorithm_Python","sub_path":"leetcode/binarysearch/349_intersection-of-two-arrays.py","file_name":"349_intersection-of-two-arrays.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"20219072560","text":"# 切割视频到每一帧用来学习动作!!!!!!!!! 学动作要进行图片翻转这样方便.\nimport cv2\nimport os\n# 要提取视频的文件名,隐藏后缀\n\n# 在这里把后缀接上\nvideo_path = 's.mp4'\n# print(video_path)\ntimes = 0\n# 提取视频的频率,每1帧提取一个\nframeFrequency = 2 #//设置频率即可\n# 输出图片到当前目录vedio文件夹下\noutPutDirName = \"output\"\n# print(outPutDirName)\nif not os.path.exists(outPutDirName):\n # 如果文件目录不存在则创建目录\n os.makedirs(outPutDirName)\ncamera = cv2.VideoCapture(video_path)\nwhile True:\n times += 1\n res, image = camera.read()\n image=cv2.flip(image,1)\n if not res:\n print('not res , not image')\n break\n if times % frameFrequency == 0:\n cv2.imwrite(outPutDirName + '\\\\' + str(times)+'.jpg', image)\n print(outPutDirName + '\\\\' + str(times)+'.jpg')\nprint('图片提取结束')\ncamera.release()","repo_name":"zhangbo2008/make_mp4_to_frame_data","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"10327396328","text":"import logging\nfrom asyncio import sleep\nfrom datetime import timedelta\n\nfrom aiogram import types as t\n\nfrom bot import client\nfrom libs.command_parser import ParsedArgs\nfrom libs.user import User\nfrom locales import other, text, buttons\nfrom src import filters as f\nfrom src import utils as u\nfrom src.instances import Database\n\n\n@other.parsers.purge(\n f.message.is_chat,\n f.bot.has_permission(\"can_delete_messages\"),\n f.user.has_permission(\"can_delete_messages\"),\n u.write_action,\n u.get_help\n)\nasync def purge(msg: t.Message, parsed: ParsedArgs):\n from_id = msg.reply_to_message.message_id if msg.reply_to_message else msg.message_id - 1\n to_id = from_id - parsed.count\n\n message_ids = list(range(from_id, to_id, -1))\n message_ids.append(msg.message_id)\n\n await process_purge(message_ids)\n await msg.answer(\n text.chat.admin.purge.format(\n count=parsed.count\n ),\n reply_markup=buttons.delete_this.menu\n )\n\n\n@other.parsers.clear_history(\n f.message.is_chat,\n f.bot.has_permission(\"can_delete_messages\"),\n f.user.has_permission(\"can_delete_messages\"),\n u.write_action,\n u.get_help\n)\nasync def clear_history(msg: t.Message, parsed: ParsedArgs):\n parsed.targets: list[User]\n parsed.time: timedelta\n messages = []\n\n await u.raise_permissions_errors(parsed.targets, await msg.chat.get_administrators())\n if parsed.targets:\n for user in parsed.targets:\n messages += [\n m.message_id for m in\n Database.get_messages(user_id=user.id, chat_id=msg.chat.id, delta=parsed.time)\n ]\n\n await process_purge(messages)\n await msg.answer(\n text.chat.admin.purge.format(\n count=len(messages)\n ),\n reply_markup=buttons.delete_this.menu\n )\n\n\nasync def process_purge(message_ids: list[User]):\n if not message_ids:\n return\n chat_id = t.Chat.get_current().id\n\n Database.disable_autocommit()\n for id in message_ids:\n Database.delete_messages(chat_id=chat_id, message_id=id)\n Database.enable_autocommit()\n\n for ids in u.break_list_by_step(message_ids, 100):\n try:\n await client.delete_messages(chat_id, ids)\n await sleep(0.3)\n except Exception as e:\n logging.warning(f\"Purge warning: {e.__class__.__name__}:{e.args[0]}\")\n","repo_name":"ToolKitProject/ToolKitBot","sub_path":"handlers/chat/admin/purge.py","file_name":"purge.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"84"} +{"seq_id":"11746250439","text":"import json\n\nimport yaml\nfrom schema import And, Or, Schema, SchemaError\n\n\nclass Config:\n \"\"\"configuration settings for the system with user input validation\"\"\"\n\n def __init__(self):\n config = {}\n config[\"general\"] = self._load_config(\"src/general.yaml\")\n config[\"spelling\"] = self._load_json(\"src/spelling.json\")\n config[\"models\"] = self._load_config(\"src/models.yaml\")\n self.valid_config = self._check(config)\n self.settings = config\n\n def _check(self, config):\n \"\"\"Check that config is not throwing any errors\n\n Parameters\n ----------\n config:dict\n configuration settings for the system\n\n Returns\n -------\n bool\n True if there are no errors in the configuration file\n\n Raises\n ------\n SchemaError\n Error describing what is wrong with the config file\n \"\"\"\n try:\n self._validate_config(config)\n return True\n except SchemaError as exception:\n print(exception)\n return False\n\n def _validate_config(self, config: dict) -> None:\n \"\"\"validates configuration file according to pre-defined schema\n\n Parameters\n ----------\n config:dict\n a dictionary containing the configuration settings\n\n Raises\n ------\n SchemaError\n If configuration file does not match the expected schema\n\n Returns\n -------\n None\n \"\"\"\n schema = Schema(\n {\n \"general\": {\n \"raw_data_path\": str,\n \"additional_stopwords\": list,\n \"lemmatize\": bool,\n },\n \"models\": {\n str: {\n \"max_features\": Or(And(int, self._greater_than_zero), None),\n \"ngram_range\": (\n And(int, self._greater_than_zero),\n And(int, self._greater_than_zero),\n ),\n \"min_df\": Or(\n And(float, self._between_zero_and_one),\n And(int, self._greater_than_zero),\n ),\n \"max_df\": Or(\n And(float, self._between_zero_and_one),\n And(int, self._greater_than_zero),\n ),\n \"n_topics\": And(int, self._greater_than_zero),\n \"n_top_words\": And(int, self._greater_than_zero),\n \"max_iter\": {\n \"lda\": And(int, self._greater_than_zero),\n \"nmf\": And(int, self._greater_than_zero),\n },\n \"lowercase\": bool,\n \"topic_labels\": {\n \"lda\": Or(None, [str]),\n \"nmf\": Or(None, [str]),\n },\n }\n },\n \"spelling\": {str: int},\n }\n )\n\n schema.validate(config)\n return None\n\n def _greater_than_zero(self, n: int):\n \"\"\"function to check if n is greater than zero\n\n Parameters\n ----------\n n:int\n a numeric value to check\n\n Returns\n -------\n bool\n True if n is greater than zero\n \"\"\"\n return n > 0\n\n def _between_zero_and_one(self, n: float):\n \"\"\"function to check if n is between zero and one\n\n Parameters\n ----------\n n:float\n a numeric value to check\n\n Returns\n -------\n bool\n True if n is between zero and one\"\"\"\n return 0.0 <= n <= 1.0\n\n def _load_config(self, filepath: str) -> dict:\n \"\"\"Loads configuration settings from given filepath to\n yaml file\n\n Parameters\n ----------\n filepath : str\n The relative filepath to the yaml file\n\n Returns\n -------\n dict\n the configuration settings with key-value pairs\n \"\"\"\n if type(filepath) is not str:\n raise TypeError(\"filepath must be a string\")\n\n with open(filepath, \"r\") as file:\n config = yaml.load(file, Loader=yaml.Loader)\n return config\n\n def _load_json(self, filepath: str) -> dict:\n \"\"\"Loads json file as dictionary\n Parameters\n ----------\n filepath:str\n the filepath to where the json file is stored\n Returns\n -------\n dict\n the json file in dict format\n \"\"\"\n if type(filepath) is not str:\n raise TypeError(\"filepath must be a string\")\n with open(filepath, \"r\") as file:\n json_data = json.load(file)\n return json_data\n","repo_name":"datasciencecampus/consultation_nlp","sub_path":"src/modules/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"84"} +{"seq_id":"27700634899","text":"import unittest\nfrom allcode.controllers.image_converters.SIFT_image_to_vec import SIFTImageToVecConverter\nfrom allcode.controllers.DB_controllers.DB_controller_csv import DBControllerCSV\nimport pickle as pl\nimport numpy as np\n\n\nclass MyTestCase(unittest.TestCase):\n def test_vectorize_image(self):\n siftbowmodel = pl.load(open(\"./stored_models/SIFTmodel.pl\", \"rb\"))\n test_image_loc = \"./data/cat_dog_images/cat.10.jpg\"\n image_converter = SIFTImageToVecConverter()\n image_keypoint_mat = image_converter.get_keypoint_matrix_from_image_loc(test_image_loc)\n res1 = siftbowmodel.get_vector_representation(image_keypoint_mat, np.zeros(image_keypoint_mat.shape[0]))\n res2 = siftbowmodel.classify_images(image_keypoint_mat, [0])\n\n def test_db_controller_CV(self):\n test_image_loc = \"./data/cat_dog_images/cat.10.jpg\"\n K = 5\n image_converter = SIFTImageToVecConverter()\n csv_db_controller = DBControllerCSV(\"./data/cat_dog_images\")\n siftbowmodel = pl.load(open(\"./stored_models/SIFTmodel.pl\", \"rb\"))\n\n image_keypoint_mat = image_converter.get_keypoint_matrix_from_image_loc(test_image_loc)\n image_vec_rep = siftbowmodel.get_vector_representation(image_keypoint_mat, np.zeros(image_keypoint_mat.shape[0]))\n\n knn_res = csv_db_controller.get_knn(image_vec_rep, K)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cornederuijt/CVexperiments","sub_path":"allcode/unit_tests/siftvbowtest.py","file_name":"siftvbowtest.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"14285978974","text":"from collections import Counter\nimport sqlite3\n\nconn = sqlite3.connect(\"duke/duke.db\")\ndb = conn.cursor()\n\n\ndef format(bind, data, title):\n return {'bindto': bind,\n 'data': {'columns': [[k, v] for k, v in data.items()],\n 'type': 'donut'},\n 'donut': {'title': title}}\n\n\ndef get_activities(chem):\n return [act[0] for act in db.execute('''\n SELECT activity\n FROM aggregac\n WHERE chem==\"%s\"\n ''' % chem).fetchall()]\n\n\ndef get_super_activities(act):\n return [sact[0] for sact in db.execute('''\n SELECT superact\n FROM superact\n WHERE activity==\"%s\"\n ''' % act).fetchall()]\n\n\ndef make_summary(f, g, s):\n summary = {\"acts\": {},\n \"sup_acts\": {},\n \"ethnobot\": {},\n \"countries\": {},\n \"cnames\": set(),\n \"taxon\": \"\"}\n \n test = db.execute('''\n SELECT fnfnum, taxon\n FROM fnftax\n WHERE family=\"%s\" AND genus=\"%s\" AND species=\"%s\"\n ''' % (f, g, s)).fetchall()\n # If it fails, it was from ethnobot.\n if test != []:\n fnfnum, summary[\"taxon\"] = test[0]\n \n summary[\"cnames\"] = {name[0] for name in\n db.execute('''\n SELECT cnnam\n FROM common_names\n WHERE fnfnum==\"%s\"\n ''' % fnfnum).fetchall()}\n\n # Don't forget to grab dosages later.\n # The dosages in aggregac and dosages are different.\n chems_classes = db.execute('''\n SELECT chem, chemclass\n FROM farmacy_new\n WHERE fnfnum==\"%s\"\n ''' % fnfnum).fetchall()\n summary[\"chems\"] = [chem[0] for chem in chems_classes]\n\n acts = []\n for c in summary[\"chems\"]:\n acts.extend(get_activities(c))\n acts_sum = {a: acts.count(a) for a in set(acts)}\n summary[\"acts\"] = dict(Counter(acts_sum).most_common(10))\n\n super_acts = []\n for a in acts:\n super_acts.extend(get_super_activities(a))\n super_acts_sum = {sa: super_acts.count(sa) for sa in set(super_acts)}\n summary[\"sup_acts\"] = dict(Counter(super_acts_sum).most_common(10))\n\n # summary.update({\"taxon\": taxon,\n # \"cnames\": cnames,\n # # The one with classes was used since the other is only\n # # used as an aggregate.\n # \"chems\": chems_classes,\n # \"acts\": acts_sum,\n # \"sup_acts\": super_acts_sum})\n\n test = db.execute('''\n SELECT activity, cname, country, taxon\n FROM ethnobot\n WHERE family=\"%s\" AND genus=\"%s\" AND species=\"%s\"\n ''' % (f, g, s)).fetchall()\n if test != []:\n ethnobot = [t[0] for t in test]\n summary[\"ethnobot\"] = dict(Counter({e: ethnobot.count(e) for e in set(ethnobot)}).most_common(10))\n summary[\"cnames\"] = summary[\"cnames\"] | {t[1] for t in test if t[1] != \"\"}\n countries = [t[2] for t in test]\n summary[\"countries\"] = dict(Counter({c: countries.count(c) for c in set(countries)}).most_common(10))\n summary[\"taxon\"] = t[3]\n\n return summary\n\n\ndef get_summary(f, g, s):\n temp = make_summary(f, g, s)\n temp['acts'] = format(\"#activities-chart\",\n temp['acts'],\n \"Chemical Activities\")\n temp['sup_acts'] = format(\"#superactivities-chart\",\n temp['sup_acts'],\n \"Chemical Syndromes\")\n temp['ethnobot'] = format(\"#ethno-chart\",\n temp['ethnobot'],\n \"Ethnobotanical Uses\")\n temp['countries'] = format(\"#countries-chart\",\n temp['countries'],\n \"Uses by Country\")\n return temp\n","repo_name":"ebusha/herbfest","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"5673508036","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\nclass MaintenanceWindow(models.Model):\n start = models.DateTimeField(auto_now_add=True, db_index=True)\n estimated_end = models.DateTimeField(null=True)\n actual_end = models.DateTimeField(null=True, blank=True, editable=False, db_index=True)\n description = models.TextField(max_length=500)\n responsible = models.ForeignKey(User, related_name='app_maintenances', editable=False)\n\n class Meta():\n ordering = ['-start']\n get_latest_by = 'start'\n\n def ended(self):\n return True if self.actual_end else False\n ended.boolean = True\n\n @classmethod\n def active_maintenance(cls):\n try:\n return cls.objects.latest() if not cls.objects.latest().ended() else None\n except cls.DoesNotExist:\n return None","repo_name":"bireme/opentrials","sub_path":"opentrials/maintenance/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"84"} +{"seq_id":"33262024454","text":"\"\"\"\nImplementation of Travelling Salesman Heuristic Algorithm\nKatie Foster\n\"\"\"\n\nimport numpy as np\nimport time\nimport timeit\n\ndef read_tsp(filename):\n ''' Reads a TSPLIB instance given by filename and returns the corresponding\n distance matrix C. Assumes that the edge weights are given in lower diagonal row\n form. '''\n f = open(filename,'r')\n n, C = 0, None\n i, j = -1, 0\n for line in f:\n words = line.split()\n if words[0] == 'DIMENSION:':\n n = int(words[1])\n C = np.zeros(shape=(n,n))\n elif words[0] == 'EDGE_WEIGHT_SECTION':\n i = 0 # Start indexing of edges\n elif i >= 0:\n for k in range(len(words)):\n if words[k] == 'EOF':\n break\n elif words[k] == '0':\n i += 1 # Continue to next row of the matrix\n j = 0\n else:\n C[i,j] = int(words[k])\n C[j,i] = int(words[k])\n j += 1\n return C, n\n\nC, n = read_tsp(\"gr17.tsp\")\n\ndef get_distance(C, path):\n \"\"\" Returns the sum of all the edge lengths in a certain path\n \"\"\"\n dist = 0\n for i in range(len(path)-1):\n dist += C[path[i], path[i+1]]\n dist += C[path[len(path)-1], path[0]]\n return dist\n\n\ndef greedyHeuristic(C, num_nodes):\n \"\"\" Nearest Neighbor:\n starts at the first city, repeatedly adds the closest unvisited city, until\n the path has visited all cities\n This is the greediest of algorithms, because it adds the closest city, which\n will optimize the path in the short term, but does not make any long term\n calculations at all.\n \"\"\"\n path = []\n total_dist = 0\n for i in range(num_nodes):\n dist = float('inf') # set initial distance to infinity\n nearest = None\n for j in range(num_nodes): # add the index of the min of C[i, :]\n if C[i, j] < dist and i != j and j not in path:\n dist = C[i, j]\n nearest = j\n path.append(nearest)\n total_dist += dist\n return path, get_distance(C, path)\n\npath, dist = greedyHeuristic(C, n)\nprint(path, dist)\n# print(len(set(path)) == len(path))\n# print(len(path) == n)\n\ndef localSearch(C, curr_path):\n \"\"\" Starts with potential solution and looks for improvments until it can't\n find any more improvments\n curr_path is a python list of nodes, representing a path\n 2-opt: removes 2 edges then reconnects optimally\n \"\"\"\n\n num_nodes = len(curr_path)\n max_iterations = 100\n curr_dist = get_distance(C, curr_path)\n for i in range(max_iterations):\n # if no changes are made, break loop\n for j in range(num_nodes):\n for k in range(j, num_nodes): # might need j+1\n # swap two edges\n new_path = curr_path[:]\n new_path[k-1] = curr_path[k]\n new_path[k] = curr_path[k-1]\n new_dist = get_distance(C, new_path)\n if new_dist < curr_dist and len(set(new_path)) == len(new_path):\n curr_path = new_path\n curr_dist = new_dist\n return curr_path, int(curr_dist)\n\n\n\n\ndef test():\n \"\"\" Tests runtime and optimality gaps of greedy heuristic and local search\n\n \"\"\"\n optimal_distances = {\n \"gr17\": 2085,\n \"gr21\": 2707,\n \"gr24\": 1272,\n \"gr48\": 5046\n }\n\n filenames = ['gr17.tsp', 'gr21.tsp', 'gr24.tsp', 'gr48.tsp']\n for file in filenames:\n print(file, \":\")\n C, n = read_tsp(file)\n optimal_dist = optimal_distances[file[0:4]]\n path, heuristic_dist = greedyHeuristic(C, n)\n print(\"Distance from Greedy Heuristic:\", heuristic_dist)\n print(\"Greedy Heuristic optimality ratio:\", (heuristic_dist-optimal_dist)/optimal_dist)\n t = timeit.Timer('greedyHeuristic(C, n)', globals=globals())\n print(\"Time of Greedy Heuristic:\", t.timeit(20))\n local_dist = localSearch(C, path)[1]\n print(\"Distance from Local Search:\", local_dist)\n print(\"Local Search optimality ratio:\", (local_dist-optimal_dist)/optimal_dist)\n t = timeit.Timer('localSearch(C, path)', globals=globals())\n print(\"Time of Local Search:\", t.timeit(20))\n print(\"Optimal Distance:\", optimal_dist)\n print(\"\")\n\n# import __builtin__\n# __builtin__.__dict__.update(locals())\n\ntest()\n\n\"\"\"\nResults from test:\n\ngr17.tsp :\nDistance from Greedy Heuristic: 4865.0\nGreedy Heuristic optimality ratio: 1.3333333333333333\nTime of Greedy Heuristic: 0.0014523319550789893\nDistance from Local Search: 4220\nLocal Search optimality ratio: 1.023980815347722\nTime of Local Search: 1.370985360990744\nOptimal Distance: 2085\n\ngr21.tsp :\nDistance from Greedy Heuristic: 7320.0\nGreedy Heuristic optimality ratio: 1.7041004802364241\nTime of Greedy Heuristic: 0.0014145320164971054\nDistance from Local Search: 5422\nLocal Search optimality ratio: 1.0029553010712966\nTime of Local Search: 1.4083389870356768\nOptimal Distance: 2707\n\ngr24.tsp :\nDistance from Greedy Heuristic: 3446.0\nGreedy Heuristic optimality ratio: 1.7091194968553458\nTime of Greedy Heuristic: 0.0014453090261667967\nDistance from Local Search: 2784\nLocal Search optimality ratio: 1.1886792452830188\nTime of Local Search: 1.4419725780026056\nOptimal Distance: 1272\n\ngr48.tsp :\nDistance from Greedy Heuristic: 19273.0\nGreedy Heuristic optimality ratio: 2.8194609591755846\nTime of Greedy Heuristic: 0.0015203019720502198\nDistance from Local Search: 15128\nLocal Search optimality ratio: 1.9980182322631788\nTime of Local Search: 1.3918146709911525\nOptimal Distance: 5046\n\n\n\nIt seems like my greedy heurisitc algorithm does a decent job with smaller\ndatasets, but does a really bad job with big sets. Also the local search\nhelps optimize the algorithm in every instance, which is good.The local search\nhowever takes quite a while, while the greedy heuristic is very fast.\n\n\"\"\"\n","repo_name":"katie608/dsa","sub_path":"hw10/hw10.py","file_name":"hw10.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"16206469339","text":"import argparse\nimport multiprocessing as mp\nimport os\nimport utils\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--resize', type=tuple, default=(224, 224), metavar='RES', help=\"Size of the resized frame\")\n args = parser.parse_args()\n\n # create subfolder to store frames if it does not already exist\n if not os.path.exists(utils.images_path): os.makedirs(utils.images_path)\n\n # recover names of all videos\n video_names = utils.get_train_test_video_names()\n all_video_names = video_names['train'] + video_names['test']\n \n print(\"Number of processors: \", mp.cpu_count())\n print(\"Number of videos to process\", len(all_video_names))\n\n pool = mp.Pool(mp.cpu_count())\n # save resized frames of all videos\n for i in range(len(all_video_names)):\n pool.apply_async(utils.save_frames, args=(all_video_names[i], args.resize))\n pool.close()\n pool.join()","repo_name":"anvdn/SurgicalPhaseRecognition","sub_path":"videos-to-images.py","file_name":"videos-to-images.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"26856811654","text":"# -*- coding: UTF-8 -*-\nimport datetime\n\nfrom ..error import QError\nfrom ..command import Command\nfrom ..ticket import Ticket\n\n\nclass CommandLast(Command):\n \"\"\"\n Show the latest started and completed tickets.\n \"\"\"\n def run(self):\n \"\"\"\n usage: q last [--all]\n \"\"\"\n from ..q import Q\n\n if self.opts.get('all'):\n LIMIT = '1900-01-01'\n else:\n LIMIT = str(datetime.date.today() + datetime.timedelta(days=-7))\n\n results = {}\n for code in self.app.all_codes():\n self.load(code)\n if self.ticket['Finished'] is None:\n continue\n started = self.ticket['Started'][0:10]\n finished = None\n if self.ticket['Status'] == 'Done':\n finished = self.ticket['Finished'][0:10]\n\n if finished == started and started >= LIMIT:\n if started not in results:\n results[started] = []\n results[started].append(code + ' ' + self.ticket['Title'] + Q.MAGENTA + ' [Started and Finished]' + Q.END)\n else:\n if finished >= LIMIT:\n if finished not in results:\n results[finished] = []\n else:\n results[finished].append(code + ' ' + self.ticket['Title'] + Q.GREEN + ' [Finished]' + Q.END)\n\n if started >= LIMIT:\n if started not in results:\n results[started] = []\n results[started].append(code + ' ' + self.ticket['Title'] + Q.YELLOW + ' [Started]' + Q.END)\n\n for title in sorted(results.keys()):\n self.wr(Q.TITLE + \"\\n\" + title + Q.END + \"\\n\")\n for subtitle in sorted(results[title]):\n self.wr(' ' + subtitle)\n self.wr('')\n","repo_name":"wigy/q","sub_path":"lib/python/Q/commands/last.py","file_name":"last.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"4650242792","text":"#!/usr/bin/env python\n\nimport json\nimport pycurl\nimport sys\nfrom StringIO import StringIO\n\n'''\nsimple zone/record backups called with api key following, and outputs json\n'''\n\nAUTH = 'X-NSONE-Key: ' + sys.argv[1]\nURL = 'https://api.nsone.net/v1/zones'\n\ndef curl_api(url, verb, authhead, *args):\n buffer = StringIO()\n c = pycurl.Curl()\n c.setopt(c.URL, url)\n c.setopt(c.CUSTOMREQUEST, verb)\n c.setopt(c.HTTPHEADER, [authhead])\n for arg in args:\n c.setopt(c.POSTFIELDS, arg)\n c.setopt(c.WRITEDATA, buffer)\n c.perform()\n c.close()\n return buffer.getvalue()\n\nzone_arg = URL + \"/\" + sys.argv[2]\njsons = json.loads(curl_api(zone_arg, 'GET', AUTH))\ndomain_list = []\n\nfor x in jsons['records']:\n domain_list.append([x['domain'], x['type']])\n\nrecord_list = []\n\nfor y in domain_list:\n record_arg = URL + \"/\" + sys.argv[2] + \"/\" + y[0] + \"/\" + y[1]\n record_list.append(json.loads(curl_api(record_arg, \"GET\", AUTH)))\n\nprint(json.dumps(record_list))\n","repo_name":"ponbiki/Py","sub_path":"scripts/zone_detail.py","file_name":"zone_detail.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"39385701693","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 17 17:07:26 2022\n### first: add your rmsd to one file:\npaste <(awk '{print $1, $2}' systems-1.rmsd) <(awk '{print $2}' systems-2.rmsd) > rmsds.dat\n@author: Brunis\n\"\"\"\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\ndef general_canvas(figsize, dpi):\n \"\"\"\n Customization of plots up2you\n \"\"\"\n mpl.rc('figure', figsize=figsize, dpi=dpi)\n mpl.rc('xtick', direction='in', top=False)\n mpl.rc('xtick.major', top=False)\n mpl.rc('xtick.minor', top=False)\n mpl.rc('ytick', direction='in', right=True)\n mpl.rc('ytick.major', right=False)\n mpl.rc('ytick.minor', right=False)\n mpl.rc('axes', labelsize=20)\n plt.rcParams['axes.autolimit_mode'] = 'data'\n mpl.rc('lines', linewidth=2, color='k')\n mpl.rc('font', family='sans-serif', size=20)\n mpl.rc('grid', alpha=0.5, color='gray', linewidth=1, linestyle='--')\n\n return\n\n# Set the desired figsize and dpi values\nfigsize = (8, 6)\ndpi = 300\n\n# Call the general_canvas function with the updated dpi value\ngeneral_canvas(figsize, dpi)\n\n# load the data from the file, skipping the first row (header)\ndata = np.loadtxt(\"rmsds.dat\", skiprows=1)\n\n# extract the x and y values from the data array\nx_values = data[:, 0]\ny_values1 = data[:, 1]\ny_values2 = data[:, 2]\ny_values3 = data[:, 3]\n\n# create the plot with increased line thickness\nplt.plot(x_values, y_values1, color='orange', linewidth=2, label='System-1')\nplt.plot(x_values, y_values2, color='blue', linewidth=2, label='System-2')\nplt.plot(x_values, y_values3, color='red', linewidth=2, label='System-3')\nplt.ylim(0,20)\nplt.xlabel(\"Time (ns)\")\nplt.ylabel(\"RMSD (Å)\")\nplt.title(\"RMSD SYSTEM\")\nplt.legend()\n\n### save figure with the updated DPI\nplt.savefig(\"rmsd-systems.png\", dpi=dpi)\n","repo_name":"BruDiGe/cMD","sub_path":"plot-rmsds.py","file_name":"plot-rmsds.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"4618694631","text":"import os\nimport ast\nfrom pathlib import Path\nfrom datetime import date\nfrom tkinter import Tk, filedialog\n\ntoday = date.today().strftime(\"%d-%m-%Y\")\n\nFILENAME = 'take_names.txt'\nCHAR_TO_REPLACE = ['(', ')', '{', '}', '\\n', '\\t']\n\n\ndef get_skus_from_file(filename):\n skus = []\n with open(rf'{filename}', 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n for char in CHAR_TO_REPLACE:\n line = line.replace(char, '')\n\n if '/' in line or line == '':\n continue\n skus.append(line)\n\n return skus\n\n\ndef create_directories_from_sku_list(sku_list, file_path):\n dir_name, basename = os.path.split(file_path)\n\n root = f\"{dir_name}\\\\\"\n try:\n for sku in sku_list:\n create_path = f\"{root}\\\\{sku}\"\n # os.mkdirs(create_path)\n Path(create_path).mkdir(parents=True, exist_ok=True)\n except FileExistsError:\n print(f'{basename} already exists')\n except Exception as ex:\n print(f'{ex}')\n return False\n\n return True\n\n\ndef main():\n root = Tk()\n root.withdraw()\n root.attributes('-topmost', True)\n\n file_path = filedialog.askopenfilename()\n\n skus = get_skus_from_file(file_path)\n success = create_directories_from_sku_list(skus, file_path)\n\n if success:\n print(f'{len(skus)} folders created for skus:')\n for sku in skus:\n print(sku, end=', ')\n\n print('Done')\n\n\nmain()","repo_name":"AluminumPirate/Images-Scraper","sub_path":"scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19535659150","text":"import logging\nimport os\nimport ConfigParser\n\nclass Conf:\n\n def __init__(self):\n # [log]\n config = ConfigParser.ConfigParser()\n path = os.path.split(os.path.realpath(__file__))[0]\n config.read(path+\"/agent.conf\")\n log_path = path + \"/log/\" + config.get(\"log\", \"name\")\n self.log_file = log_path\n log_level_dict = {\"NOTSET\": logging.NOTSET, \"DEBUG\": logging.DEBUG, \"INFO\": logging.INFO, \"WARNING\": logging.WARNING, \"ERROR\": logging.ERROR, \"CRITICAL\": logging.CRITICAL}\n log_level = log_level_dict[config.get(\"log\", \"level\")]\n handler = logging.FileHandler(log_path)\n handler.setLevel(log_level)\n formatter = logging.Formatter(\"%(asctime)s [%(levelname)s] [%(filename)s] [%(funcName)s:%(lineno)d] -- %(message)s\")\n handler.setFormatter(formatter)\n self.log = logging.getLogger(\"agent\")\n self.log.addHandler(handler)\n self.log.setLevel(log_level)\n # [zk]\n self.zk_address = config.get(\"zk\", \"address\")\n self.task_path = config.get(\"zk\", \"task\")\n self.host_path = config.get(\"zk\", \"host\")\n # [agent_type]\n self.agent_type = {}\n for option in config.options(\"agent_type\"):\n self.agent_type[option] = config.get(\"agent_type\", option)\n # [data_number]\n self.data_number = {}\n for option in config.options(\"data_number\"):\n self.data_number[option] = config.get(\"data_number\", option)\n # [data]\n self.hdfs = config.get(\"data\", \"hdfs\")\n self.keep_number = config.get(\"data\", \"keep_number\")\n self.ignore_number = []\n for num in config.get(\"data\", \"ignore_number\").split(\",\"):\n self.ignore_number.append(num)\n\nCONF = Conf() \n","repo_name":"alibaba/alimama-common-performance-platform-acp","sub_path":"Agent/agent/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"84"} +{"seq_id":"21799100547","text":"#! /usr/bin/python3\n\nimport os\n\nfrom evolutek.lib.settings import ROBOT\nfrom cellaserv.service import Service\nimport json\nfrom evolutek.lib.interface import Interface\nfrom evolutek.lib.settings import SIMULATION\nfrom evolutek.lib.watchdog import Watchdog\nif SIMULATION:\n\tfrom evolutek.simulation.simulator import read_config\n\nfrom sys import argv\nfrom tkinter import Button, Canvas, Label, ttk\n\n\n# TODO: clean\n\nclass AIInterface(Interface):\n\n\tdef __init__(self):\n\n\t\tsuper().__init__('Ai interface', 3)\n\t\tself.init_robot(ROBOT)\n\t\tself.match_status = None\n\t\tself.bau_status = None\n\t\tself.states_ai = None\n\t\tself.client.add_subscribe_cb(ROBOT + '_infos_interfaces', self.infos_ai)\n\t\tself.match_status_watchdog = Watchdog(3, self.reset_match_status) # float(match_config['refresh']) * 2, self.reset_match_status)\n\t\tif SIMULATION:\n\t\t\tself.init_simulation()\n\n\t\tself.window.after(500, self.update_interface)\n\t\tprint('[AI NTERFACE] Window looping')\n\t\tself.window.mainloop()\n\n\tdef infos_ai(self, states_ai, bau_status):\n\t\tself.bau_status = bau_status\n\t\tself.states_ai = states_ai\n\n\tdef init_simulation(self):\n\t\tenemies = read_config('enemies')\n\n\t\tif enemies is None:\n\t\t\treturn\n\n\t\tfor enemy, config in enemies['robots'].items():\n\t\t\tself.robots[enemy] = {'telemetry': None, 'size': config['config']['robot_size_y'], 'color': 'red'}\n\t\t\tself.client.add_subscribe_cb(enemy + '_telemetry', self.telemetry_handler)\n\t#\n\t# def match_status_handler(self, status):\n\t# \tself.match_status_watchdog.reset()\n\t# \tprint(status, end=\"\\n\")\n\t# \tself.match_status = status\n\n\tdef reset_match_status(self):\n\t self.match_status = None\n\n\tdef reset_match(self):\n\t\ttry:\n\t\t\tself.cs.match.reset_match()\n\t\texcept Exception as e:\n\t\t\tprint('[IA INTERFACE] Failed to reset match : %s' % str(e))\n\n\tdef action_color(self):\n\t\tif self.cs.match.get_color() == self.color1:\n\t\t\tself.cs.match.set_color(self.color2)\n\t\telse:\n\t\t\tself.cs.match.set_color(self.color1)\n\n\tdef action_strategy(self):\n\t\tself.client.publish(ROBOT + \"_strategy\", strategy=self.select_strategy.get(), ai=ROBOT)\n\n\tdef shutdown(self):\n\t\tos.system(\"sudo shutdown now\")\n\t\tprint('Gros je me casse')\n\n\tdef event_recalibration(self):\n\t\tself.client.publish(ROBOT + \"_recalibration\")\n\t\tself.client.publish(ROBOT + \"_reset\")\n\n\tdef event_set_pos(self):\n\t\tself.client.publish(ROBOT + \"_reset\")\n\n\tdef parse_strategy(self, file):\n\t\tdata = None\n\t\ttry:\n\t\t\twith open(file, 'r') as goals_file:\n\t\t\t\tdata = goals_file.read()\n\t\texcept Exception as e:\n\t\t\tprint('[GOALS] Failed to read file: %s' % str(e))\n\t\t\treturn False\n\n\t\tgoals = json.loads(data)\n\t\tlist_robot = []\n\n\t\tfor i in goals[\"strategies\"]:\n\t\t\tif ROBOT in i[\"available\"]:\n\t\t\t\tlist_robot.append(i)\n\t\treturn list_robot\n\n\t# Init match interface\n\tdef init_interface(self):\n\n\t\t# button color\n\t\tself.color_button = Button(self.window, text='Change color', command=self.action_color)\n\t\tself.color_button.grid(row=7, column=0)\n\n\t\t# Close button\n\t\tself.close_button = Button(self.window, text='Close', command=self.close)\n\t\tself.close_button.grid(row=9, column=0)\n\n\t\t# Reset Button\n\t\tself.reset_button = Button(self.window, text='Reset Match', command=self.reset_match)\n\t\tself.reset_button.grid(row=8, column=0)\n\n\t\tself.shutdown_button = Button(self.window, text='shutdown', command=self.shutdown)\n\t\tself.shutdown_button.grid(row=10, column=0)\n\n\t\tself.recalibration_button = Button(self.window, text='Recalibration', command=self.event_recalibration)\n\t\tself.recalibration_button.grid(row=11, column=0)\n\n\t\t# Reset Button\n\t\tself.resset_pos = Button(self.window, text='Reset position', command=self.event_set_pos)\n\t\tself.resset_pos.grid(row=13, column=0)\n\n\t\t# select strategy\n\t\tlist_strategy = self.parse_strategy(file='/etc/conf.d/strategies.json')\n\t\tself.select_strategy = ttk.Combobox(self.window, values=list_strategy[0])\n\t\tself.select_strategy.current(0)\n\t\tself.select_strategy.bind(\"<>\", self.action_strategy)\n\t\tself.select_strategy.grid(row=6, column=0)\n\n\t\t# Map\n\t\tself.canvas = Canvas(self.window, width=3000 * self.interface_ratio, height=2000 * self.interface_ratio)\n\t\tself.canvas.grid(row=6, column=1, columnspan=4, rowspan=14)\n\n\t\t# Color\n\t\tself.color_label = Label(self.window)\n\t\tself.color_label.grid(row=0, column=1)\n\t\tself.color_label.config(font=('Arial', 12))\n\n\t\t# Score\n\t\tself.score_label = Label(self.window)\n\t\tself.score_label.grid(row=0, column=0)\n\t\tself.score_label.config(font=('Arial', 25))\n\n\t\t# Match status\n\t\tself.match_status_label = Label(self.window)\n\t\tself.match_status_label.grid(row=0, column=2)\n\t\tself.match_status_label.config(font=('Arial', 12))\n\n\t\t# # BAU STATUS\n\t\t#self.bau_status_label = Label(self.window)\n\t\t#self.bau_status_label.grid(row=0, column=3)\n\t\t#self.bau_status_label.config(font=('Arial', 12))\n\n\t\t# Match time\n\t\tself.match_time_label = Label(self.window)\n\t\tself.match_time_label.grid(row=0, column=4)\n\t\tself.match_time_label.config(font=('Arial', 12))\n\n\t\tself.canvas.create_image(1500 * self.interface_ratio, 1000 * self.interface_ratio, image=self.map)\n\n\tdef update_interface(self):\n\t\tself.canvas.delete('all')\n\t\tself.canvas.create_image((3000 * self.interface_ratio) / 2, (2000 * self.interface_ratio) / 2, image=self.map)\n\t\t#self.bau_status_label.config(text='%s' % ' Bau Status: ON' if self.bau_status else 'Bau Status: OFF')\n\n\t\tif self.cs.match.get_status() is not None:\n\t\t\tself.color_label.config(text=\"Color: %s\" % self.cs.match.get_color(), fg=self.cs.match.get_status()['color'])\n\t\t\tself.score_label.config(text=\"Score: %d\" % self.cs.match.get_status()['score'])\n\t\t\tself.match_status_label.config(text=\"Match status: %s\" % self.cs.match.get_status()['status'])\n\t\t\tself.match_time_label.config(text=\"Match time: %d\" % self.cs.match.get_status()['time'])\n\t\t\tself.color_button.config(bg=self.cs.match.get_color())\n\n\t\telse:\n\t\t\tprint(\"[FIX] match not running or problem with this\")\n\n\t\t\tself.color_label.config(text=\"Color: %s\" % 'M.C')\n\t\t\tself.score_label.config(text=\"Score: %s\" % 'M.C')\n\t\t\tself.match_status_label.config(text=\"Match status: %s\" % 'M.C')\n\t\t\tself.match_time_label.config(text=\"Match time: %s\" % 'M.C')\n\n\t\tself.tmp.clear()\n\t\tfor robot in self.robots:\n\t\t\tif robot in ['pal', 'pmi']:\n\t\t\t\tself.print_robot_image(robot, self.robots[robot]['telemetry'])\n\t\t\telse:\n\t\t\t\tself.print_robot(*self.robots[robot].values())\n\n\t\tself.print_path(self.paths[ROBOT], 'yellow', 'violet')\n\n\t\tself.window.after(500, self.update_interface)\n\ndef main():\n\tif len(argv) > 1:\n\t\tglobal ROBOT\n\t\tROBOT = argv[1]\n\tAIInterface()\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"evolutek/archive-services","sub_path":"2020/evolutek/utils/interfaces/ai_interface.py","file_name":"ai_interface.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"35460230579","text":"import tabulate\nfrom tabulate import tabulate\nimport sys\nimport os.path\nfrom os import path\nimport csv\n\nif len(sys.argv) < 2:\n sys.exit(\"Must enter a file name\")\nelif len(sys.argv) > 2:\n sys.exit(\"Too many command-line arguments\")\nelif not path.exists(sys.argv[1]):\n sys.exit(\"File does not exist\")\nelse:\n a,b = sys.argv[1].split(\".\")\n if b != \"csv\":\n sys.exit(\"not a csv file\")\n if b == \"csv\" and len(sys.argv) == 2:\n with open(sys.argv[1]) as file:\n reader = csv.reader(file, delimiter = \",\")\n headers = next(reader)\n mylines = []\n for row in reader:\n mylines.append(row)\n print(tabulate(mylines, headers, tablefmt=\"grid\"))\n\n\n","repo_name":"KariHab/CS50P_Harvard-University","sub_path":"week 6/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"8793351204","text":"'''\nKarobben\nChain behaviors detect\n'''\nimport math\nimport pandas as pd\n# TB example\n# TB = pd.read_csv(\"csv/20210712-C0147-WASH_backgroud-5th-29C_6d_Trim.mp4.csv\", sep=\" \", header = None)\nclass Chain_finder:\n\n Chain_result = []\n\n def __init__(self,TB, Diamiter=.1, Chain_Num = 3, Angle = 15):\n TB = TB[TB[0]==3]\n tmp_result = []\n Flies = [[float(TB.iloc[i, 1]), float(TB.iloc[i,2]), float(TB.iloc[i,3]), float(TB.iloc[i,4])] for i in range(len(TB))]\n for i in Flies:\n for ii in Flies:\n if i != ii:\n dist = self.dist_f(i, ii)\n print(\"\\n\\n\\n\", dist, \"\\n\\n\")\n if dist <=Diamiter:\n #print(dist)\n tmp_result += [[i, ii]]\n self.Chain_result = tmp_result\n\n\n def dist_f(self, F1, F2):\n Dist = math.sqrt((F1[0]-F2[0])**2 + (F1[1]- F2[1])**2)\n return Dist\n","repo_name":"Dengflylab/YoloFly","sub_path":"utils/Chain_detect.py","file_name":"Chain_detect.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"23067957253","text":"import sys\nfrom itertools import combinations\n\nn = list(map(str, sys.stdin.readline().strip()))\nanswer = set()\nstack, pos = [], []\n\nfor idx, word in enumerate(n):\n if word == '(':\n stack.append(idx)\n elif word == ')':\n pos.append((stack.pop(), idx)) # 괄호의 시작점과 끝점 저장\n\nfor i in range(1, len(pos) + 1):\n c = combinations(pos, i) # combinations을 통해 모든 경우의 수를 확인\n\n # 반복문을 통해 경우의 수를 확인\n for j in c:\n temp = list(n)\n # 괄호 제거\n for s, e in j:\n temp[s] = ''\n temp[e] = ''\n answer.add(''.join(temp))\n\nfor ans in sorted(list(answer)):\n print(ans)","repo_name":"2022-PACOS/python-codingtest-study","sub_path":"youngmi/week4/boj_2800_괄호제거.py","file_name":"boj_2800_괄호제거.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"37381678663","text":"from bson import ObjectId\n\nfrom core.db import mongo\nfrom core.handlers import BaseAPIView, TemplateHTTPView\n\n\nclass MainView(BaseAPIView):\n template_name = 'admin/main.html'\n\n async def get(self, request, user):\n # CURRENT USER\n user = await mongo.users.find_one({'_id': ObjectId(user.id)})\n\n return self.render_template(\n request=request,\n user=user\n )\n","repo_name":"mirzafar/alert","sub_path":"server/admin/api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"39084230777","text":"import dash_bootstrap_components as dbc\nfrom dash import html\nimport plotly.express as px\nimport matplotlib.pyplot as plt\nimport yfinance as yf\nimport numpy as np\nfrom scipy.stats import norm\nfrom statistics import mean\n\nGREEN = \"#00b51a\"\nRED = \"#ff2d21\"\n\n\ndef prepare_summary_tab_data(ticker_text, period=1):\n \"\"\"Returns data needed for summary tab charts and some stats about price data\"\"\"\n\n price_data = yf.download(\n tickers=ticker_text,\n interval=\"1d\",\n period=f\"{period}y\",\n prepost=False,\n threads=True,\n )\n price_data = price_data.reset_index()\n\n period_change = round(\n (\n (price_data.iloc[-1, 4] - price_data.iloc[0, 4])\n / price_data.iloc[0, 4]\n * 100\n ),\n 2,\n )\n period_max = round(price_data[\"Close\"].max(), 2)\n period_min = round(price_data[\"Close\"].min(), 2)\n price_data[\"Change\"] = (\n (price_data[\"Close\"] - price_data[\"Close\"].shift(1))\n / price_data[\"Close\"].shift(1)\n * 100\n )\n max_gain = round(price_data[\"Change\"].max(), 2)\n max_surge = round(price_data[\"Change\"].min(), 2)\n\n if period_change >= 0:\n line_color = GREEN\n else:\n line_color = RED\n\n return {\n \"Price data\": price_data,\n \"Line color\": line_color,\n \"Period change\": period_change,\n \"Period max\": period_max,\n \"Period min\": period_min,\n \"Max gain\": max_gain,\n \"Max surge\": max_surge,\n }\n\n\ndef add_indicator_button(\n indicator_short,\n indicator_name,\n param_2_name,\n param_1_name,\n param_1_value,\n param_2_value,\n s2_display,\n button_visible,\n):\n \"\"\"Based on of indicator and indicator settings selected by user, returns html.Div container with main button and clear button\"\"\"\n\n return html.Div(\n children=[\n dbc.Button(\n f\"{indicator_name}({param_1_value, param_2_value}\",\n id=f\"{indicator_short}_button\",\n color=\"primary\",\n className=\"indicator-buttons\",\n style={\"display\": button_visible,},\n ),\n dbc.Button(\n \"X\",\n id=f\"{indicator_short}_x_button\",\n color=\"primary\",\n className=\"indicator-buttons\",\n style={\"display\": button_visible,},\n ),\n dbc.Modal(\n [\n dbc.ModalHeader(dbc.ModalTitle(indicator_name)),\n dbc.ModalBody(\n [\n dbc.Label(f\"{param_1_name}:\"),\n dbc.Input(\n id=f\"{indicator_short}_param1\",\n type=\"number\",\n disabled=False,\n ),\n dbc.Label(\n f\"{param_2_name}:\", style={\"display\": s2_display}\n ),\n dbc.Input(\n id=f\"{indicator_short}_param2\",\n type=\"number\",\n disabled=False,\n style={\"display\": s2_display},\n ),\n ]\n ),\n dbc.ModalFooter(\n [dbc.Button(\"OK\", color=\"primary\", id=f\"{indicator_short}_ok\"),]\n ),\n ],\n id=f\"{indicator_short}_modal\",\n ),\n ],\n style={\n \"display\": \"flex\",\n \"justify-content\": \"flex-start\",\n \"margin-top\": \"10px\",\n \"background-color\": \"#211F32\",\n \"align-items\": \"center\",\n \"gap\": \"0px\",\n \"margin-bottom\": \"0px\",\n },\n )\n\n\ndef prepare_price_statistics(ticker):\n \"\"\"Returns html.H5 labels with statistics about price data of provided ticker\"\"\"\n\n ticker[\"Perc change\"] = (ticker[\"Open\"] - ticker[\"Close\"]) / ticker[\"Open\"] * 100\n ticker[\"Perc range\"] = (ticker[\"High\"] - ticker[\"Low\"]) / ticker[\"High\"] * 100\n ticker[\"Gap\"] = (ticker[\"Open\"] - ticker[\"Close\"].shift(1)) / ticker[\"Open\"] * 100\n ticker[\"Gap\"] = ticker[\"Gap\"].fillna(0)\n\n min = round(ticker[\"Low\"].min(), 2)\n avg = round(ticker[\"Close\"].mean(), 2)\n max = round(ticker[\"High\"].max(), 2)\n perc_change = round(\n (\n 100\n * (\n (int(ticker.iloc[-1, 4]) - int(ticker.iloc[0, 1]))\n / int(ticker.iloc[0, 1])\n )\n ),\n 2,\n )\n price_range = round((max - min), 2)\n perc_range = round((price_range / max * 100), 2)\n # Gap\n avg_gap = round(ticker[\"Gap\"].mean(), 2)\n min_gap = round(ticker[\"Gap\"].min(), 2)\n max_gap = round(ticker[\"Gap\"].max(), 2)\n # Single candles\n avg_perc_change = round(ticker[\"Perc change\"].mean(), 2)\n min_perc_change = round(ticker[\"Perc change\"].min(), 2)\n max_perc_change = round(ticker[\"Perc change\"].max(), 2)\n avg_perc_range = round(ticker[\"Perc range\"].mean(), 2)\n min_perc_range = round(ticker[\"Perc range\"].min(), 2)\n max_perc_range = round(ticker[\"Perc range\"].max(), 2)\n\n stats = [\n html.H5(f\"Percentage change: {perc_change}%\", className=\"stats_styling\"),\n html.H5(f\"Price range: {price_range}\", className=\"stats_styling\"),\n html.H5(f\"Percentage range: {perc_range}%\", className=\"stats_styling\"),\n html.H5(f\"Min: {min}\", className=\"stats_styling\"),\n html.H5(f\"Avg: {avg}\", className=\"stats_styling\"),\n html.H5(f\"Max: {max}\", className=\"stats_styling\"),\n ]\n\n return stats\n\n\ndef format_table_data(table_data, frequency):\n \"\"\"Formats a financial data to the format matching Dash Table\"\"\"\n\n table_data.reset_index(inplace=True)\n table_data = table_data.transpose()\n table_data.reset_index(inplace=True)\n table_data.drop(0, 0, inplace=True)\n\n table_data.columns = table_data.iloc[0]\n table_data = table_data[1:]\n\n table_data.index = table_data.iloc[:, 0]\n table_data = table_data.iloc[:, 1:]\n\n rows_to_keep = []\n for i in range(len(table_data)):\n if not all(\n str(cell) == \"\" or cell == 0 or cell is None for cell in table_data.iloc[i]\n ):\n rows_to_keep.append(i)\n table_data = table_data.iloc[rows_to_keep]\n table_data.dropna(inplace=True)\n\n table_data.index.name = \"\"\n table_data.columns.name = \"Date\"\n\n columns_to_keep = []\n for x in range(len(table_data.columns)):\n if table_data.iloc[0, x] != \"TTM\":\n columns_to_keep.append(x)\n table_data = table_data.iloc[:, columns_to_keep]\n\n table_data.drop(table_data.index[1], 0, inplace=True)\n table_data.drop(table_data.index[0], 0, inplace=True)\n\n length = len(table_data.index)\n width = len(table_data.columns)\n\n for l in range(length):\n for w in range(width):\n if type(table_data.iloc[l, w]) == float:\n table_data.iloc[l, w] = format(table_data.iloc[l, w], \",\")\n\n new_columns = []\n for x in range(len(table_data.columns)):\n if frequency == \"a\":\n new_label = str(table_data.columns[x])[:7]\n elif frequency == \"q\":\n new_label = str(table_data.columns[x])[:7]\n new_columns.append(new_label)\n\n table_data.columns = new_columns\n table_data = table_data.iloc[:, -4:]\n table_data.reset_index(inplace=True)\n\n # Formatting table labels text\n for r in range(len(table_data.index)):\n for c in range(len(table_data.columns)):\n new_x = \"\"\n x = table_data.iloc[r, c]\n for y in range(len(x)):\n if x[y].isupper() and x[y - 1].isupper() == False and y != 0:\n z = f\" {x[y]}\"\n new_x = new_x + z\n else:\n new_x = new_x + x[y]\n table_data.iloc[r, c] = new_x\n\n return table_data\n\n\ndef prepare_distribution_and_price_data(ticker_text, interval, start_date, end_date):\n \"\"\"Downloads price OHLC data for provided ticker, then formats it and calculates values needed for distribution and percentage returns charts\"\"\"\n\n price_data = yf.download(\n tickers=ticker_text,\n interval=interval,\n start=start_date,\n end=end_date,\n prepost=False,\n threads=True,\n )\n\n if price_data.empty:\n return None, True\n\n price_data[\"Daily returns\"] = (\n (price_data[\"Close\"] - price_data[\"Close\"].shift(1))\n / price_data[\"Close\"].shift(1)\n * 100\n )\n average_return = price_data[\"Daily returns\"].mean()\n zeros_after_decimal = count_zeros_after_decimal(average_return)\n multiplier = float(\"0.\" + \"0\" * zeros_after_decimal + \"5\")\n\n price_data[\"Rounded daily returns\"] = (\n round(price_data[\"Daily returns\"] / multiplier) * multiplier\n )\n price_data[\"Counted returns\"] = None\n for i in range(len(price_data.index)):\n price_data[\"Counted returns\"].iloc[i] = price_data[\n price_data[\"Rounded daily returns\"]\n == price_data[\"Rounded daily returns\"].iloc[i]\n ][\"Rounded daily returns\"].count()\n\n distribution_data = price_data.iloc[:, -2:]\n distribution_data.drop_duplicates(subset=[\"Rounded daily returns\"])\n distribution_data = distribution_data.set_index(\"Rounded daily returns\")\n distribution_data.sort_index(inplace=True)\n\n price_data[\"Daily log returns\"] = round(np.log(1 + price_data[\"Daily returns\"]), 2)\n\n if price_data.index.name == \"Datetime\":\n x_values = price_data.index.strftime(\"%d-%m-%Y %H:%M\")\n else:\n x_values = price_data.index.strftime(\"%d-%m-%Y\")\n\n return {\n \"Price data\": price_data,\n \"x values\": x_values,\n \"Distribution data\": distribution_data,\n }\n\n\ndef count_zeros_after_decimal(number):\n \"\"\"Counts number of \"0\" after decimal in selected number\"\"\"\n\n number_after_decimal = str(number).split(\".\")[1]\n count = 1\n for symbol in number_after_decimal:\n if symbol == \"0\":\n count += 1\n else:\n break\n return count\n\n\ndef get_linear_regression_params(ticker, interval, start_date, end_date):\n \"\"\"Calculates and creates linear regression chart with trendline of provided stock\"\"\"\n\n data = yf.download(\n tickers=f\"{ticker} SPY\", interval=interval, start=start_date, end=end_date\n )\n\n data = data[\"Close\"]\n\n returns = np.log(data).diff()\n returns = returns.dropna()\n correlation = returns.corr()\n reg = np.polyfit(returns[\"SPY\"], returns[ticker], deg=1)\n trend = np.polyval(reg, returns[ticker])\n output = {\"Returns\": returns, \"Correlation\": correlation, \"Trend\": trend}\n\n return output\n\n\ndef calculate_streak(data, up=True):\n \"\"\"Calculates longest streak of up or down values in provided DataFrame\"\"\"\n\n max_streak = 0\n current_streak = 0\n if up == True:\n for row in range(len(data.index)):\n if data.iloc[row, 0] > 0:\n current_streak += 1\n if current_streak > max_streak:\n max_streak = current_streak\n else:\n current_streak = 0\n else:\n for row in range(len(data.index)):\n if data.iloc[row, 0] < 0:\n current_streak += 1\n if current_streak > max_streak:\n max_streak = current_streak\n else:\n current_streak = 0\n\n return max_streak\n\n\ndef get_percentage_returns_statistics(data):\n \"\"\"Returns a dictionary containing statistics on the percentage returns of the provided OHLC data.\"\"\"\n\n data = data[[\"Close\"]]\n data[\"Percentage returns\"] = data[\"Close\"].pct_change() * 100\n data.dropna(inplace=True)\n data = data[[\"Percentage returns\"]]\n\n number_of_candles = len(data)\n avg_candle = data[\"Percentage returns\"].mean()\n max_candle = data[\"Percentage returns\"].max()\n min_candle = data[\"Percentage returns\"].min()\n\n number_of_up_candles = len(data[data[\"Percentage returns\"] > 0])\n avg_up_candle = (data[\"Percentage returns\"][data[\"Percentage returns\"] > 0]).mean()\n max_up_candle = (data[\"Percentage returns\"][data[\"Percentage returns\"] > 0]).max()\n min_up_candle = (data[\"Percentage returns\"][data[\"Percentage returns\"] > 0]).min()\n longest_up_streak = calculate_streak(data, True)\n\n number_of_down_candles = len(data[data[\"Percentage returns\"] < 0])\n avg_down_candle = (\n data[\"Percentage returns\"][data[\"Percentage returns\"] < 0]\n ).mean()\n max_down_candle = (data[\"Percentage returns\"][data[\"Percentage returns\"] > 0]).min()\n min_down_candle = (data[\"Percentage returns\"][data[\"Percentage returns\"] > 0]).min()\n longest_down_streak = calculate_streak(data, False)\n\n output = {\n \"Number of candles\": number_of_candles,\n \"Number of up candles\": number_of_up_candles,\n \"Number of down candles\": number_of_down_candles,\n \"Longest up streak\": longest_up_streak,\n \"Longest down streak\": longest_down_streak,\n \"Average candle\": f\"{round(avg_candle, 2)}%\",\n \"Biggest candle\": f\"{round(max_candle, 2)}%\",\n \"Smallest candle\": f\"{round(min_candle, 2)}%\",\n \"Average up candle\": f\"{round(avg_up_candle, 2)}%\",\n \"Average down candle\": f\"{round(avg_down_candle, 2)}%\",\n # \"Biggest up candle\": f\"{round(max_up_candle, 2)}%\",\n # \"Smallest up candle\": f\"{round(min_up_candle, 2)}%\",\n # \"Biggest down candle\": f\"{round(max_down_candle, 2)}%\",\n # \"Smallest down candle\": f\"{round(min_down_candle, 2)}%\",\n }\n\n return output\n\n\ndef monte_carlo_simulation(data, number_of_simulations=150, forecast_period=100):\n \"\"\"Returns numpy array of simulated prices for provided forecast period and number of simulations\"\"\"\n\n returns = data[\"Close\"].pct_change()\n initial_price = data[\"Close\"].iloc[-1]\n average_return = returns.mean()\n return_std = returns.std()\n\n simulated_prices = np.zeros((forecast_period, number_of_simulations))\n\n for simulation in range(number_of_simulations):\n prices = [initial_price]\n for x in range(forecast_period):\n daily_return = np.random.normal(average_return, return_std)\n price = prices[-1] * (1 + daily_return)\n prices.append(price)\n simulated_prices[:, simulation] = prices[1:]\n\n return simulated_prices\n\n\ndef monte_carlo_statistics(simulated_prices, initial_price):\n \"\"\"Returns basic statistics of provided simulated prices\"\"\"\n\n ending_prices = []\n\n for simulation in simulated_prices:\n ending_prices.append(simulation[-1])\n\n max_ending_price = round(max(ending_prices), 2)\n min_ending_price = round(min(ending_prices), 2)\n average_ending_price = round(mean(ending_prices), 2)\n no_ending_price_higher_than_initial = len(\n [price for price in ending_prices if price > initial_price]\n )\n perc_of_ending_price_above_initial = round(\n no_ending_price_higher_than_initial / len(ending_prices) * 100, 2\n )\n perc_of_ending_price_below_initial = 100 - perc_of_ending_price_above_initial\n\n return {\n \"Max ending price\": max_ending_price,\n \"Min ending price\": min_ending_price,\n \"Average ending price\": average_ending_price,\n \"Perc of ending prices above initial price\": f\"{perc_of_ending_price_above_initial}%\",\n \"Perc of ending prices below initial price\": f\"{perc_of_ending_price_below_initial}%\",\n }\n\n\ndef historical_and_parametric_var_and_cvar(data):\n \"\"\"Returns VaR and CVaR for 0.95, 0.99 and 0.999 confidence level both for historical and parametric calculation method\"\"\"\n\n data[\"Returns\"] = data[\"Close\"].pct_change()\n data = data[[\"Returns\"]]\n data = data.dropna()\n std = data[\"Returns\"].std()\n mean_return = data[\"Returns\"].mean()\n\n # HISTORICAL\n\n var95 = data[\"Returns\"].quantile(0.05)\n cvar95 = data.loc[data[\"Returns\"] <= var95, \"Returns\"].mean()\n\n var99 = data[\"Returns\"].quantile(0.01)\n cvar99 = data.loc[data[\"Returns\"] <= var99, \"Returns\"].mean()\n\n var999 = data[\"Returns\"].quantile(0.001)\n cvar999 = data.loc[data[\"Returns\"] <= var999, \"Returns\"].mean()\n\n # PARAMETRIC\n\n p_var95 = mean_return - norm.ppf(0.95) * std\n p_cvar95 = -(mean_return + (1 - 0.95) ** -1 * norm.pdf(norm.ppf(1 - 0.95)) * std)\n\n p_var99 = mean_return - norm.ppf(0.99) * std\n p_cvar99 = -(mean_return + (1 - 0.99) ** -1 * norm.pdf(norm.ppf(1 - 0.99)) * std)\n\n p_var999 = mean_return - norm.ppf(0.999) * std\n p_cvar999 = -(mean_return + (1 - 0.999) ** -1 * norm.pdf(norm.ppf(1 - 0.999)) * std)\n\n output = {\n \"Historical\": {\n \"VaR\": {\n \"95\": round(var95 * 100, 2),\n \"99\": round(var99 * 100, 2),\n \"99.9\": round(var999 * 100, 2),\n },\n \"CVaR\": {\n \"95\": round(cvar95 * 100, 2),\n \"99\": round(cvar99 * 100, 2),\n \"99.9\": round(cvar999 * 100, 2),\n },\n },\n \"Parametric\": {\n \"VaR\": {\n \"95\": round(p_var95 * 100, 2),\n \"99\": round(p_var99 * 100, 2),\n \"99.9\": round(p_var999 * 100, 2),\n },\n \"CVaR\": {\n \"95\": round(p_cvar95 * 100, 2),\n \"99\": round(p_cvar99 * 100, 2),\n \"99.9\": round(p_cvar999 * 100, 2),\n },\n },\n }\n\n return output\n\n\ndef datatable_settings_multiindex(df, flatten_char=\"_\"):\n \"\"\" Plotly dash datatables do not natively handle multiindex dataframes. \n This function generates a flattend column name list for the dataframe, \n while structuring the columns to maintain their original multi-level format.\n\n Function returns the variables datatable_col_list, datatable_data for the columns and data parameters of\n the dash_table.DataTable\"\"\"\n datatable_col_list = []\n\n levels = df.columns.nlevels\n if levels == 1:\n for i in df.columns:\n datatable_col_list.append({\"name\": i, \"id\": i})\n else:\n columns_list = []\n for i in df.columns:\n col_id = flatten_char.join(i)\n datatable_col_list.append({\"name\": i, \"id\": col_id})\n columns_list.append(col_id)\n df.columns = columns_list\n\n datatable_data = df.to_dict(\"records\")\n\n return datatable_col_list, datatable_data\n\n","repo_name":"Ravdar/tickery","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"24293281917","text":"# Mark 0 values as missing and impute with the mean\n\nimport numpy as np\nimport pandas as pd\n#import urllib\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.impute import SimpleImputer\n\n# Load the Pima Indians Diabetes dataset\n#url = \"http://goo.gl/j0Rvxq\"\n#raw_data = urllib.urlopen(url)\n#dataset = np.loadtxt(raw_data, delimiter=\",\")\n\ndataset = pd.read_csv('diabetes.csv')\nprint(dataset.columns)\nprint(dataset.shape)\n\ndataset = dataset.values\n\n# separate the data and target attributes\nX = dataset[:, 0:7]\ny = dataset[:, 8]\nprint(X)\n# Mark all zero values as 0\nX[X == 0] = np.nan\nprint(X)\n\n# Impute all missing values with the mean of the attribute\nimp = Imputer(missing_values='NaN', strategy='mean')\nimputed_X = imp.fit_transform(X)\n\nprint(imputed_X)","repo_name":"prabhathkota/MLPractise","sub_path":"jumpStartScikitLearn/5_impute.py","file_name":"5_impute.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"6053971033","text":"import telebot\r\nfrom functions_student import registration\r\nfrom config import TOKEN\r\nfrom database import print_res\r\n\r\nbot = telebot.TeleBot(TOKEN)\r\n\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef start_message(message):\r\n bot.send_message(message.chat.id, f'{message.from_user.first_name}, отправь свое задание.\\n'\r\n f'Формат сообщения должен быть вида: \"ФИО, номер группы, '\r\n f'номер задания, номер варианта, гит репозиторий\"\\n'\r\n f'Пример: \"Иванов Иван Иванович, 212Б, 2, 3, '\r\n f'github.com/ivanov/task\"')\r\n\r\n\r\n@bot.message_handler(commands=['database'])\r\ndef database(message):\r\n bot.send_message(message.chat.id, print_res())\r\n\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef student_register(message):\r\n registration(message)\r\n\r\n\r\nif __name__ == '__main__':\r\n bot.polling(none_stop=True)\r\n","repo_name":"Nazim-Danila-Dima/telegram_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"18535363737","text":"import zmq\nfrom threading import Lock, Thread\nfrom time import sleep, time\nimport traceback\nimport socket\nimport pickle\n\n\ndef get_localhost_ip():\n # Get local IP address\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n address = s.getsockname()[0]\n s.close()\n\n return address\n\n\nclass paired_messenger(object):\n \"\"\"\n This class can exchange messages with another pair_messenger instance running on the same\n or different machine, pointed to the same IP address.\n Bind one of the instances to address='localhost' \n and the other pointing to the corresponding IP address.\n Use add_callback method to add callbacks which will be called when a message\n is received. You can couple arguments with the callback if presented as a list\n or tuple, with callback being the first element.\n NOTE! On slower systems, the initialization process may take some time.\n If messages are expected to be received or sent immediately, a delay may be necessary\n after instantiation of this object. Depending on system, 0.25 to 1.0 seconds.\n \"\"\"\n def __init__(self, address='localhost', port=5884, timeout=0.5, printMessages=False):\n\n # Identify if client or server instance\n if address == 'localhost':\n self.localhost = True\n address = get_localhost_ip()\n else:\n self.localhost = False\n\n # Connect to an address\n self.url = \"tcp://%s:%d\" % (address, port)\n context = zmq.Context()\n self.socket = context.socket(zmq.PAIR)\n if self.localhost:\n self.socket.bind(self.url)\n else:\n self.socket.connect(self.url)\n self.socket.RCVTIMEO = int(timeout * 1000) # in milliseconds\n\n # Set callbacks list\n self.callbacks = []\n \n if printMessages:\n self.add_callback(lambda msg: print(msg))\n\n # Add verification callback\n self.verification_dict = {}\n self.add_callback(self._verification_check)\n\n # Wait a moment to allow connection to be established\n sleep(2)\n \n # Start listening thread\n self.lock = Lock()\n self.is_running = True\n self.thread = Thread(target=self._run)\n self.thread.start()\n\n def sendMessage(self, message, verify=False):\n \"\"\"Sends message to the paired messenger.\n\n :param bytes message: sent to paired device\n :param bool verify: if True, sendMessage() waits until such message is received back\n \"\"\"\n if verify:\n self.verification_dict[message] = False\n self.socket.send(message)\n if verify:\n while not self.verification_dict[message]:\n sleep(0.05)\n del self.verification_dict[message]\n\n def close(self):\n\n if self.thread.is_alive():\n\n self.lock.acquire()\n self.is_running = False\n self.lock.release()\n\n self.thread.join()\n\n self.socket.disconnect(self.url)\n\n def add_callback(self, cb):\n \"\"\"\n The callback function is started in a new thread if a message is received.\n To pass arguments with callback function, input a list or a tuple with\n callback function as the first element and the rest as individual arguments.\n The callback function should be expecting a message input as a string. If additional\n arguments were passed using the list or tuple method, these arguments should be\n expected by the function in the same order after the message string.\n \"\"\"\n self.callbacks.append(cb)\n\n def _send_message_to_callbacks(self, msg):\n\n for cb in self.callbacks:\n if isinstance(cb, list) or isinstance(cb, tuple):\n Thread(target=cb[0], args=(msg,) + tuple(cb[1:])).start()\n else:\n Thread(target=cb, args=(msg,)).start()\n\n def _process_message(self, msg):\n \"\"\"\n Called for each received message.\n Execution blocks reception of new messages. Threading recommended.\n \"\"\"\n self._send_message_to_callbacks(msg)\n\n def _run(self):\n\n while True:\n\n self.lock.acquire()\n running = self.is_running\n self.lock.release()\n\n if not running:\n break\n\n try:\n msg = self.socket.recv()\n self._process_message(msg)\n\n except zmq.ZMQError:\n pass\n\n sleep(.01)\n\n def _verification_check(self, msg):\n if msg in list(self.verification_dict.keys()):\n self.verification_dict[msg] = True\n\n\ndef decode_pickled_message(msg):\n return pickle.loads(msg)\n\n\ndef encode_pickled_message(data):\n return pickle.dumps(data)\n\n\nclass remote_controlled_object(paired_messenger):\n \"\"\"\n When instantiated with an object this class executes any incoming commands on that object.\n The incoming commands are expected to be sent using remote_object_controller.\n\n Note! remote_object_controller must be instantiated pair() method called before\n remote_controlled_object is instantiated.\n\n This class and the object must remain in scope of an active process.\n\n Sending a 'close' command will call close command on the object,\n return value if requested and then closes the remote_controlled_object instance.\n \"\"\"\n def __init__(self, obj, *args, **kwargs):\n \"\"\"\n remote_controlled_object must be instantiated with the object as first input argument.\n\n See paired_messenger for other input arguments.\n \"\"\"\n self.obj = obj\n super(remote_controlled_object, self).__init__(*args, **kwargs)\n self.sendMessage('handshake'.encode())\n\n @staticmethod\n def _parse_message(msg):\n # Extract command to call from message string\n command, msg = msg.split(' '.encode(), 1)\n command = command.decode()\n # Extract return_value request from remaining message string\n return_value, msg = msg.split(' '.encode(), 1)\n if return_value == 'True'.encode():\n return_value = True\n elif return_value == 'False'.encode():\n return_value = False\n else:\n raise ValueError('return_value was not as expected.')\n # Extract input arguments from remaining message string\n input_arguments = decode_pickled_message(msg)\n args = input_arguments['args']\n kwargs = input_arguments['kwargs']\n\n return command, return_value, args, kwargs\n\n def _process_command(self, msg):\n # Parse raw message\n command, return_value, args, kwargs = remote_controlled_object._parse_message(msg)\n # Execute command with or without return value\n if return_value:\n self._execute_command_with_return(command, args, kwargs)\n else:\n self._execute_command(command, args, kwargs)\n # If close command sent, this remote_controlled_object is also closed.\n if command == 'close':\n self.close()\n\n def _execute_command(self, command, args, kwargs):\n \"\"\"\n Parses incoming ZMQ message for function name, input arguments and calls that function.\n \"\"\"\n getattr(self.obj, command)(*args, **kwargs)\n \n def _execute_command_with_return(self, command, args, kwargs):\n \"\"\"\n Parses incoming ZMQ message for function name, input arguments and calls that function.\n \"\"\"\n return_value = getattr(self.obj, command)(*args, **kwargs)\n encoded_return_value = encode_pickled_message(return_value)\n self.sendMessage(encoded_return_value)\n\n def _process_message(self, msg):\n Thread(target=self._process_command, args=(msg,)).start()\n\n\nclass remote_controlled_class(remote_controlled_object):\n \"\"\"\n Allows using a class with remote_controlled_object before it is instantiated.\n\n Note! remote_object_controller must be instantiated pair() method called before\n remote_controlled_class is instantiated.\n\n remote_controlled_class must remain in scope to function. This can be achieved by calling\n it with block=True (see __init__() method) or by regularly checking isAlive() method\n to see if 'close' command has been received.\n\n On a paired remote_object_controller instance the sendInitCommand method\n must be called to instantiate the class before sendCommand method can be used.\n\n Once the class has been instantiated, remote_controlled_class behaves as remote_controlled_object.\n \"\"\"\n def __init__(self, C, block, *args, **kwargs):\n \"\"\"\n C - class to be used\n block - bool - if True, remote_controlled_class blocks until 'close' command is received.\n\n See paired_messenger for other input arguments.\n \"\"\"\n self.C = C\n self.class_instantiated = False\n self.keep_class_alive = True\n super(remote_controlled_class, self).__init__(None, *args, **kwargs)\n if block:\n while self.keep_class_alive:\n sleep(0.1)\n\n def _init_object(self, args, kwargs):\n self.obj = self.C(*args, **kwargs)\n self.class_instantiated = True\n self.sendMessage('init_confirmation'.encode())\n\n def _process_command(self, msg):\n if self.class_instantiated:\n super(remote_controlled_class, self)._process_command(msg)\n else:\n command, return_value, args, kwargs = self._parse_message(msg)\n if command == '__init__':\n self._init_object(args, kwargs)\n\n def isAlive(self):\n \"\"\"\n Returns boolean whether the remote_controlled_class is still alive.\n Returns False after close command has been received and instantiated class has been closed.\n \"\"\"\n return self.keep_class_alive\n\n def close(self, *args, **kwargs):\n super(remote_controlled_class, self).close(*args, **kwargs)\n self.keep_class_alive = False\n\n\nclass remote_object_controller(paired_messenger):\n \"\"\"\n Provides access to an object on a remote device via ZMQ.\n\n Can be paired with either remote_controlled_object or remote_controlled_class.\n\n Note! remote_object_controller must be instantiated and pair() method called\n before remote_controlled_object or remote_controlled_class is instantiated.\n\n If paired with remote_controlled_class, sendInitCommand must be called once paired\n to use other methods.\n\n See sendCommand() method for how to send commands and receive return values.\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"\n See paired_messenger for other input arguments.\n \"\"\"\n self.wait_for_handshake = True\n self.new_return_message = False\n self.wait_for_init_confirmation = False\n super(remote_object_controller, self).__init__(*args, **kwargs)\n\n def pair(self, timeout=0):\n \"\"\"\n Returns True if successfully paired. Returns False if timeout reached.\n\n timeout - float - in seconds. If timeout=0 (default), pair() waits indefinitely.\n \"\"\"\n wait_start_time = time()\n while self.wait_for_handshake:\n sleep(0.1)\n if timeout > 0 and (time() - wait_start_time) > timeout:\n break\n \n return not self.wait_for_handshake\n\n @staticmethod\n def encode_input_arguments(args, kwargs):\n input_arguments = {'args': args, \n 'kwargs': kwargs}\n return encode_pickled_message(input_arguments)\n\n def sendInitCommand(self, timeout, *args, **kwargs):\n \"\"\"\n Returns True if class successfully instantiated. Returns False if timeout reached.\n\n timeout - float - in seconds. If timeout=0 (default), sendInitCommand() waits indefinitely.\n\n Any following arguments are passed into the class __init__().\n \"\"\"\n self.wait_for_init_confirmation = True\n encoded_input_arguments = remote_object_controller.encode_input_arguments(args, kwargs)\n self.sendMessage('__init__'.encode() + ' '.encode()\n + 'True'.encode() + ' '.encode()\n + encoded_input_arguments)\n wait_start_time = time()\n while self.wait_for_init_confirmation:\n sleep(0.1)\n if 0 < timeout < (time() - wait_start_time):\n break\n \n return not self.wait_for_init_confirmation\n\n def sendCommand(self, command, return_value, *args, **kwargs):\n \"\"\"\n command - str - Name of the command to call on the object controlled via ZMQ\n return_value - bool - Whether to return value from command call.\n return_value=True blocks until return value is received.\n Any additional input arguments are used as input arguments in command call on controlled object.\n These additional input arguments are pickled, compressed on this end\n and uncompressed, unpickled on the paired device.\n \"\"\"\n encoded_input_arguments = remote_object_controller.encode_input_arguments(args, kwargs)\n self.sendMessage(command.encode() + ' '.encode()\n + str(return_value).encode() + ' '.encode()\n + encoded_input_arguments)\n if return_value:\n return self._wait_for_return_value()\n\n def _wait_for_return_value(self):\n \"\"\"\n Blocks until return value is available.\n Returns the return value.\n \"\"\"\n while not self.new_return_message:\n sleep(0.1)\n return_value = decode_pickled_message(self.return_message)\n self.new_return_message = False\n \n return return_value\n\n def _process_return_message(self, msg):\n assert not self.new_return_message # Assuming return values are always processed before next arrives.\n self.return_message = msg\n self.new_return_message = True\n\n def _process_message(self, msg):\n if self.wait_for_handshake:\n if msg == 'handshake'.encode():\n self.wait_for_handshake = False\n elif self.wait_for_init_confirmation:\n if msg == 'init_confirmation'.encode():\n self.wait_for_init_confirmation = False\n else:\n Thread(target=self._process_return_message, args=(msg,)).start()\n\n\nclass PublishToOpenEphys(object):\n \"\"\"\n This class allows sending messages to Open Ephys GUI over ZMQ.\n When created with defulat inputs, it will connect to Open Ephys GUI.\n Use sendMessage method to send messages to Open Ephys GUI\n \"\"\"\n def __init__(self, address='localhost', port=5556):\n # Set up ZMQ connection to OpenEphysGUI\n url = \"tcp://%s:%d\" % (address, port)\n context = zmq.Context()\n self.socket = context.socket(zmq.REQ)\n self.socket.connect(url)\n\n def sendMessage(self, message):\n \"\"\"Encodes message string into bytes and sends to OpenEphysGUI\n\n :param str message:\n :return:\n \"\"\"\n self.socket.send(message.encode())\n _ = self.socket.recv()\n\n def close(self):\n self.socket.close()\n\n\ndef SendOpenEphysSingleMessage(message):\n \"\"\"\n This function creates ZMQ connection with Open Ephys GUI just to send one message.\n This is sufficiently fast for single messages that are not very time sensitive.\n \"\"\"\n messenger = PublishToOpenEphys()\n messenger.sendMessage(message)\n messenger.close()\n\n\nclass SubscribeToOpenEphys(object):\n \"\"\"subscription-based zmq network event receiver\n\n This can be used to receive events published using the EventPublisher\n plugin.\n \"\"\"\n\n def __init__(self, address='localhost', port=5557, timeout=2,\n message_filter=''.encode(), verbose=True, save_messages=False):\n\n self.address = address\n self.port = port\n self.timeout = timeout\n self.message_filter = message_filter\n self.verbose = verbose\n self.save_messages = save_messages\n\n socket = None\n context = None\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.RCVTIMEO = int(timeout * 1000) # in milliseconds\n\n self.socket = socket\n self.context = context\n\n self.current_url = None\n self.thread = None\n self.messages = []\n self.lock = Lock()\n self.is_running = False\n self.callbacks = []\n\n def connect(self):\n\n if self.socket is None:\n return\n\n if self.is_connected():\n self.disconnect()\n\n url = \"tcp://%s:%d\" % (self.address, self.port)\n if self.verbose:\n print(\"Connecting subscriber to:\", url)\n\n self.socket.connect(url)\n self.socket.setsockopt(zmq.SUBSCRIBE, self.message_filter)\n self.current_url = url\n\n self.is_running = True\n self.thread = Thread(target=self._run)\n self.thread.start()\n\n def disconnect(self):\n\n if self.socket is None:\n return\n\n if self.thread is not None and self.thread.is_alive():\n\n self.lock.acquire()\n self.is_running = False\n self.lock.release()\n\n self.thread.join()\n\n if self.is_connected():\n if self.verbose:\n print(\"Disconnecting subscriber from:\", self.current_url)\n self.socket.disconnect(self.current_url)\n self.current_url = None\n\n def __del__(self):\n\n if self.socket is None:\n return\n\n if self.is_connected():\n if self.verbose:\n print(\"Disconnecting network subscriber ...\")\n self.disconnect()\n\n if self.verbose:\n print(\"Terminating network context ...\")\n self.socket.close()\n self.context.term()\n\n def is_connected(self):\n\n if self.socket is None:\n return False\n\n return self.current_url is not None\n\n def get_messages(self, clear=False):\n\n self.lock.acquire()\n msg = [m for m in self.messages]\n if clear:\n del self.messages[:]\n self.lock.release()\n\n return msg\n\n def add_callback(self, cb):\n\n self.callbacks.append(cb)\n\n def remove_callback(self, cb):\n\n if cb in self.callbacks:\n self.callbacks.remove(cb)\n\n def _send_message_to_callbacks(self, msg):\n\n for cb in self.callbacks:\n Thread(target=cb, args=(msg,)).start()\n\n def _run(self):\n\n while True:\n\n self.lock.acquire()\n running = self.is_running\n self.lock.release()\n\n if not running:\n break\n\n try:\n msg = self.socket.recv()\n\n if self.save_messages:\n self.lock.acquire()\n self.messages.append(msg)\n self.lock.release()\n\n self._send_message_to_callbacks(msg.decode())\n\n except zmq.ZMQError:\n pass\n\n sleep(.01)\n\ndef SubscribeToOpenEphys_message_callback(msg):\n print(\"received event:\", msg)\n\ndef SubscribeToOpenEphys_run_example(args):\n \"\"\"\n An example script for using SubscribeToOpenEphys\n \"\"\"\n\n # parse arguments\n address = 'localhost' # address of system on which OE is running\n port = 5557 # port\n T = 10 # how long to listen for messages\n\n if len(args) > 1:\n address = args[1]\n if len(args) > 2:\n port = int(args[2])\n if len(args) > 3:\n T = float(args[3])\n\n try:\n # connect subscriber to event publisher\n sub = SubscribeToOpenEphys(address=address, port=port)\n sub.add_callback(SubscribeToOpenEphys_message_callback)\n sub.connect()\n\n # _run for T seconds\n sleep(T)\n\n except BaseException:\n traceback.print_exc()\n\n finally:\n # make sure the background thread is being stopped before exiting\n sub.disconnect()\n","repo_name":"Barry-lab/openEPhys_DACQ","sub_path":"openEPhys_DACQ/ZMQcomms.py","file_name":"ZMQcomms.py","file_ext":"py","file_size_in_byte":20037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"5354888451","text":"# How to train:\n# python3 tools/train.py configs/convnext_nancho_mod.py\n\n\n\"\"\"\npython3 tools/analysis_tools/analyze_logs.py plot_curve work_dirs/hr32/20220914_155157.log.json --keys s1.loss_bbox\npython3 tools/analysis_tools/analyze_logs.py plot_curve work_dirs/hr32/20220914_155157.log.json --keys loss\n\npython3 tools/test.py configs/kuzushiji.py work_dirs/hr32/latest.pth --eval mAP\npython3 tools/test.py configs/kuzushiji.py work_dirs/hr32/latest.pth --out work_dirs/hr32/test_result.pkl\npython3 tools/analysis_tools/analyze_results.py configs/kuzushiji.py work_dirs/hr32/test_result.pkl work_dirs/hr32/show/\n\n# todo:\ncambiar base= cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py\nver como agregar special metrics\n\n\"\"\"\n#Test\n\n# The new config inherits a base config to highlight the necessary modification\n_base_ = 'convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' #'cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py' #cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py\n\n# We also need to change the num_classes in head to match the dataset's annotation\n# model = dict(\n# roi_head=dict(\n# _delete_=True,\n# bbox_head=dict(num_classes=2),) #4789\n\n# norm_cfg = dict(type='BN', requires_grad=True)\nnorm_cfg = dict(type='GN', num_groups=32, requires_grad=True) #nas_fpn (best performance) https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn\n# norm_cfg = dict(type='BN', requires_grad=True) # FPG\nmodel = dict(\n # backbone=dict(\n # norm_cfg=norm_cfg,\n # ),\n # ------------\n neck=dict( # FPG BEST PERFORMANCE https://github.com/open-mmlab/mmdetection/blob/master/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py\n # type='FPN',\n # in_channels=[96, 192, 384, 768], #[256, 512, 1024, 2048],\n # out_channels=256,\n # relu_before_extra_convs=True,\n # no_norm_on_lateral=True,\n norm_cfg=norm_cfg),\n # num_outs=5),\n roi_head=dict(\n bbox_roi_extractor=dict( #groie https://github.com/open-mmlab/mmdetection/blob/master/configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py\n type='GenericRoIExtractor',\n aggregation='sum',\n roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32],\n pre_cfg=dict(\n type='ConvModule',\n in_channels=256,\n out_channels=256,\n kernel_size=5,\n padding=2,\n inplace=False,\n ),\n post_cfg=dict(\n type='GeneralizedAttention',\n in_channels=256,\n spatial_range=-1,\n num_heads=6,\n attention_type='0100',\n kv_stride=2)),\n bbox_head=[\n dict(\n norm_cfg=norm_cfg, # FPG\n type='Shared2FCBBoxHead',\n # explicitly over-write all the `num_classes` field from default 80 to 5.\n num_classes=2),\n dict(\n norm_cfg=norm_cfg, # FPG\n type='Shared2FCBBoxHead',\n # explicitly over-write all the `num_classes` field from default 80 to 5.\n num_classes=2),\n dict(\n norm_cfg=norm_cfg, # FPG\n type='Shared2FCBBoxHead',\n # explicitly over-write all the `num_classes` field from default 80 to 5.\n num_classes=2)\n ]),\n train_cfg=dict(\n rpn_proposal=dict(\n nms=dict(type='nms', iou_threshold=0.25))),\n test_cfg=dict(\n rpn=dict(\n nms_pre=1000,\n max_per_img=1000,\n nms=dict(type='nms', iou_threshold=0.25),\n min_bbox_size=0),\n rcnn=dict(\n score_thr=0.05,\n nms=dict(type='nms', iou_threshold=0.25),\n max_per_img=1000))\n)\n\n# Modify dataset related settings\ndataset_type = 'CustomDataset'\ndata_root = '/home/mauricio/Documents/Pytorch/mmdetection/mmdetection_mau/data/Nancho_dataset/' # kuzushiji_morpho2_section\nclasses = ('background', 'c',)\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n\nalbu_train_transforms = [\n dict(\n type='OneOf',\n transforms=[\n dict(\n type='HueSaturationValue',\n hue_shift_limit=7,\n sat_shift_limit=10,\n val_shift_limit=10,\n )\n ]\n ),\n dict(\n type='RandomBrightnessContrast'\n ),\n dict(\n type='RandomGamma'\n ),\n dict(\n type='RandomContrast',\n limit=0.05,\n p=0.75\n ),\n dict(\n type='RandomBrightness',\n limit=0.05,\n p=0.75\n ),\n]\n\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True, with_mask=False),\n dict(\n type='Resize',\n img_scale=[(576, 1024), (544, 1024), #(480, 1024), (512, 1024),\n (608, 1024), (640, 1024), (672, 1024), (704, 1024),\n (736, 1024), (768, 1024), (800, 1024),\n (924, 1024), (1024, 1024)],\n # img_scale=[(480, 1024), (512, 1024), (544, 1024), (576, 1024),\n # (608, 1024), (640, 1024)], ##[(size, size) for size in range(540, 1180 + 1, 128)], #(640, 1280 + 1, 128)\n keep_ratio=True,\n multiscale_mode='value'),\n dict(\n type='Albu',\n transforms=albu_train_transforms\n ),\n dict(type='RandomFlip', flip_ratio=float(0)), #commented\n # dict(type='PhotoMetricDistortion'),\n dict(type='Normalize', **img_norm_cfg),\n # dict(type='RandomCrop', crop_size=(640, 640)), #fpg\n # dict(type='Pad', size=(640, 640)),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\nval_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug', #'MultiScaleAug',\n img_scale=[(576, 1024), (544, 1024), #(480, 1024), (512, 1024),\n (608, 1024), (640, 1024), (672, 1024), (704, 1024),\n (736, 1024), (768, 1024), (800, 1024),\n (924, 1024), (1024, 1024)],\n # img_scale=[(640, 640)],\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n # dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n # dict(type='Pad', size=(640, 640)),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=[(576, 1024), (544, 1024), #(480, 1024), (512, 1024),\n (608, 1024), (640, 1024), (672, 1024), (704, 1024),\n (736, 1024), (768, 1024), (800, 1024),\n (924, 1024), (1024, 1024)], # [(size, size) for size in range(768, 1280 + 1, 128)], ##[(size / 1024) for size in range(768, 1280 + 1, 128)], [0.75, 0.875, 1.0, 1.125, 1.25]\n # img_scale=(640, 640),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n # dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n # dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\n\ndata = dict(\n samples_per_gpu=1,\n workers_per_gpu=8,\n # workers_per_gpu=1,\n train=dict(\n type=dataset_type,\n classes=classes,\n ann_file=data_root + 'dtrain_crop.pkl', #dtrainval_crop #modified\n img_prefix=data_root + 'train_crops1/',\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n classes=classes,\n ann_file=data_root + 'dval.pkl', #modified\n img_prefix=data_root + 'train_images/',\n pipeline=val_pipeline),\n test=dict(\n type=dataset_type,\n classes=classes,\n ann_file=data_root + 'dtest.pkl',\n img_prefix=data_root + 'test_images/',\n pipeline=test_pipeline))\n\nevaluation = dict(interval=1, metric='mAP', save_best='mAP') # ,save_best='mAP', dynamic_intervals=[(max_epochs - num_last_epochs, 1)]\n# optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,\n# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),\n# 'relative_position_bias_table': dict(decay_mult=0.),\n# 'norm': dict(decay_mult=0.)}))\n\noptimizer = dict( ## original optimizer\n _delete_=True,\n constructor='LearningRateDecayOptimizerConstructor',\n type='AdamW',\n lr=0.0004, #0.0004\n betas=(0.9, 0.999),\n weight_decay=0.05,\n paramwise_cfg={\n 'decay_rate': 0.7, #0.7\n 'decay_type': 'layer_wise',\n 'num_layers': 6\n })\n\n# # 1 GPU * 4 samples_per_gpu * 4 cumulative_iters\n# # to simulate 4 GPUs * 4 samples_per_gpu\n# optimizer_config = dict(cumulative_iters=4)\n# lr_config = dict(warmup_iters=2000) # 500 * cumulative_iters\n\n# optimizer_config = dict(max_norm=35, norm_type=2) # dict(grad_clip=None) #dict(max_norm=35, norm_type=2)\n# learning policy\nlr_config = dict(\n warmup_iters=100,\n # policy='step',\n # warmup='linear',\n # warmup_ratio=1.0 / 3,\n min_lr=3.294e-06, #added\n step=[8, 17, 26, 35, 42], #[8, 17, 26, 35, 42], [8,14,20]\n)\n# We can set the checkpoint saving interval to reduce the storage cost\ncheckpoint_config = dict(interval=25) #10\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\nepochs = 50\n# total_epochs = epochs #12\nrunner = dict(type='EpochBasedRunner', max_epochs=epochs)\nfp16 = None\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/convnext_nancho_necks_2/news'\n# We can use the pre-trained Mask RCNN model to obtain higher performance\nload_from = './checkpoints/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth' # mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth\n# load_from = './work_dirs/convnext_nancho_kuzh_section/best_mAP_epoch_13.pth'\n# resume_from = './work_dirs/convnext_nancho_pre_kuzh_PAFPN/epoch_10.pth'\nworkflow = [('train', 1), ('val', 1)]\n# https://github.com/open-mmlab/mmdetection/blob/master/demo/MMDet_Tutorial.ipynb","repo_name":"Shanoa00/mmdetection_mau","sub_path":"code/mod_models/convnext_nancho_mod_2.py","file_name":"convnext_nancho_mod_2.py","file_ext":"py","file_size_in_byte":11105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"2599981280","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\nfrom tkinter.ttk import *\n\n'''\nimport sqlite3\nfrom database import add_book\nfrom database import update_price\nfrom database import delete_book\nfrom database import list_books\n'''\n\nimport database\nfrom objects import Book\n\ndef display_menu():\n print(\"COMMAND MENU\")\n print(\"list - List the books in the database\")\n print(\"add - Add a book to the database\")\n print(\"update - Update price of a book using book name\")\n print(\"del - Delete a book using Book ID\")\n print(\"end - Exit the application\")\n print()\n print(\"Enter a command:\")\n\n \ndef add():\n \n root = tk.Tk()\n root.title(\"Add Book UI\")\n root.geometry(\"900x600\")\n\n frame = ttk.Frame(root, padding=\"10 10 10 10\")\n frame.pack(fill=tk.BOTH, expand=True)\n\n def add_books():\n publisher_id = publisherEntry.get()\n book_name = bookNameEntry.get()\n year = yearEntry.get()\n price = priceEntry.get()\n\n book = Book(publisher_id=publisher_id, book_name=book_name, year=year, price=price)\n database.add_book(book)\n commentLabel.config(text=\"Record successfully added!!\")\n \n\n \n def clear():\n publisherEntry.delete(0, END)\n bookNameEntry.delete(0, END)\n yearEntry.delete(0, END)\n priceEntry.delete(0, END)\n commentLabel.config(text=\" \")\n\n def exitWindow():\n main()\n root.destroy()\n \n\n publisherLabel = ttk.Label(frame, text=\"Publisher ID: \")\n bookNameLabel = ttk.Label(frame, text=\"Book Name: \")\n yearLabel = ttk.Label(frame, text=\"Year: \")\n priceLabel = ttk.Label(frame, text=\"Price: \")\n\n publisherLabel.grid(row=0, column=0, sticky=W, pady=2)\n bookNameLabel.grid(row=1, column=0, sticky=W, pady=2)\n yearLabel.grid(row=2, column=0, sticky=W, pady=2)\n priceLabel.grid(row=3, column=0, sticky=W, pady=2)\n\n publisherText = tk.StringVar()\n publisherEntry = ttk.Entry(frame, width=25, textvariable=publisherText)\n\n bookNameText = tk.StringVar()\n bookNameEntry = ttk.Entry(frame, width=25, textvariable=bookNameText)\n\n yearText = tk.StringVar()\n yearEntry = ttk.Entry(frame, width=25, textvariable=yearText)\n\n priceText = tk.StringVar()\n priceEntry = ttk.Entry(frame, width=25, textvariable=priceText)\n\n publisherEntry.grid(row=0, column=1, pady=2)\n bookNameEntry.grid(row=1, column=1, pady=2)\n yearEntry.grid(row=2, column=1, pady=2)\n priceEntry.grid(row=3, column=1, pady=2)\n\n addButton = ttk.Button(frame, text=\"ADD\", command=add_books)\n clearButton = ttk.Button(frame, text=\"CLEAR\", command = clear)\n exitButton = ttk.Button(frame, text=\"EXIT\", command=exitWindow)\n\n addButton.grid(row=4, column=0, sticky=W, pady=2)\n clearButton.grid(row=4, column=1, sticky=W, pady=2)\n exitButton.grid(row=8, column=4, sticky=W, pady=2)\n\n commentLabel=ttk.Label(frame, text=\"Record successfully added!!\")\n commentLabel.grid(row=6, column=0, sticky=W, pady=2)\n commentLabel.config(text=\" \")\n\n root.mainloop()\n\n\ndef update():\n root = tk.Tk()\n root.title(\"Update Price UI\")\n root.geometry(\"900x600\")\n\n frame = ttk.Frame(root, padding=\"10 10 10 10\")\n frame.pack(fill=tk.BOTH, expand=True)\n\n def updateprice():\n book_name = bookNameEntry.get()\n new_price = priceEntry.get()\n book = Book(book_name=book_name, price=new_price)\n database.update_price(book)\n commentLabel.config(text=\"Price successfully updated!\")\n\n\n def clear():\n bookNameEntry.delete(0, END)\n priceEntry.delete(0, END)\n commentLabel.config(text=\" \")\n\n def exitWindow():\n main()\n root.destroy()\n \n\n\n\n bookNameLabel = ttk.Label(frame, text=\"Book Name: \")\n priceLabel = ttk.Label(frame, text=\"Price: \")\n\n bookNameLabel.grid(row=0, column=0, sticky=W, pady=2)\n priceLabel.grid(row=1, column=0, sticky=W, pady=2)\n\n bookNameText = tk.StringVar()\n bookNameEntry = ttk.Entry(frame, width=25, textvariable=bookNameText)\n\n priceText = tk.StringVar()\n priceEntry = ttk.Entry(frame, width=25, textvariable=priceText)\n\n bookNameEntry.grid(row=0, column=1, pady=2)\n priceEntry.grid(row=1, column=1, pady=2)\n\n updateButton = ttk.Button(frame, text=\"UPDATE\", command=updateprice)\n clearButton = ttk.Button(frame, text=\"CLEAR\", command=clear)\n exitButton = ttk.Button(frame, text=\"EXIT\", command=exitWindow)\n\n updateButton.grid(row=2, column=0, sticky=W, pady=2)\n clearButton.grid(row=2, column=1, sticky=W, pady=2)\n exitButton.grid(row=8, column=4, sticky=W, pady=2)\n\n commentLabel=ttk.Label(frame, text=\"Record successfully added!!\")\n commentLabel.grid(row=6, column=0, sticky=W, pady=2)\n commentLabel.config(text=\" \")\n\n root.mainloop()\n\ndef delete():\n root = tk.Tk()\n root.title(\"Delete Book UI\")\n root.geometry(\"900x600\")\n\n frame = ttk.Frame(root, padding=\"10 10 10 10\")\n frame.pack(fill=tk.BOTH, expand=True)\n\n def deletebook():\n book_id = bookIDEntry.get()\n book = Book(id=book_id)\n database.delete_book(book)\n commentLabel.config(text=\"Book Successfully Deleted!\")\n\n \n\n def clear():\n bookIDEntry.delete(0, END)\n commentLabel.config(text=\" \")\n\n def exitWindow():\n main()\n root.destroy()\n \n\n \n bookIDLabel = ttk.Label(frame, text=\"Book ID: \")\n bookIDLabel.grid(row=0, column=0, sticky=W, pady=2)\n\n bookIDText = tk.StringVar()\n bookIDEntry = ttk.Entry(frame, width=25, textvariable=bookIDText)\n\n bookIDEntry.grid(row=0, column=1, pady=2)\n\n deleteButton = ttk.Button(frame, text=\"DELETE\", command=deletebook)\n clearButton = ttk.Button(frame, text=\"CLEAR\", command=clear)\n exitButton = ttk.Button(frame, text=\"EXIT\", command=exitWindow)\n\n deleteButton.grid(row=1, column=0, sticky=W, pady=2)\n clearButton.grid(row=1, column=1, sticky=W, pady=2)\n exitButton.grid(row=8, column=4, sticky=W, pady=2)\n\n commentLabel=ttk.Label(frame, text=\"Record successfully added!!\")\n commentLabel.grid(row=6, column=0, sticky=W, pady=2)\n commentLabel.config(text=\" \")\n\n root.mainloop()\n\n\n#add()\n#update()\n#delete()\n\ndef main():\n display_menu()\n while True: \n command = input(\"Command: \")\n if command == \"list\":\n list_books()\n elif command == \"add\":\n add()\n elif command == \"update\":\n update()\n elif command == \"del\":\n delete()\n elif command == \"end\":\n break\n else:\n print(\"Not a valid command. Please try again.\\n\")\n display_menu()\n print(\"Bye!\")\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"mudzingwa/library","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"31219257283","text":"import lz4.frame\nimport json\nimport glob\nimport sys\nimport os\n\nif len(sys.argv) < 1:\n print(f\"Usage: {sys.argv[0]} \")\n sys.exit(0)\n\ndef load_compressed_json(file_path):\n \"\"\"\n Loads a compressed JSON file using the LZ4 compression algorithm.\n\n :param file_path: The path to the compressed JSON file.\n :return: The contents of the JSON file as a dictionary.\n \"\"\"\n try:\n\n with lz4.frame.open(file_path, 'rb') as f:\n compressed_data = f.read()\n data = json.loads(lz4.frame.decompress(compressed_data))\n return data\n\n except:\n\n with open(file_path, 'rb') as f:\n compressed_data = f.read()\n\n decompressed_data = lz4.frame.decompress(compressed_data)\n data = json.loads(decompressed_data.decode('utf-8', errors='replace'))\n return data\n\n\nMemory = {\"movabs\", \"push\", \"mov\", \"movzx\", \"pop\", \"cmove\", \"movsd\", \"cmpxchg\", \"cmps\", \"lods\", \"stos\", \"scas\", \"xchg\", \"lodsd\", \"stosd\", \"scasd\", \"xlatb\", \"scasb\", \"movsb\", \"movdqa\", \"movups\", \"movaps\", \"cmovns\", \"cmovbe\", \"cmovle\", \"cmova\", \"cmovae\" \"cmovg\", \"cmovge\", \"cmovb\", \"cmovl\", \"cmovo\", \"rep stosd\", \"rep movsd\", \"stosb\", \"lodsb\", \"rep stosb\", \"rep movsb\", \"movdqu\", \"popal\", \"pushal\", \"cmovne\", \"cmovs\", \"bswap\", \"cmovae\", \"storw\", \"mvn\", \"movq\", \"movsw\", \"movd\", \"ldr\", \"str\", \"ldrb\", \"strb\", \"ldm\", \"stp\"}\nFloatingPoint = {\"fnstenv\", \"fcom\", \"fistp\", \"fisub\", \"fiadd\", \"fidiv\", \"ficom\", \"fimul\", \"fsubr\", \"f2xm1\", \"fidivr\", \"fdiv\", \"fild\", \"fnstcw\", \"fnstsw\", \"lahf\", \"fistp\", \"sets\", \"fstp\", \"fld\", \"fadd\", \"fcomp\", \"fdivr\", \"fmul\", \"fst\", \"fldcw\", \"fsub\", \"fxch\", \"fisttp\", \"cmpsd\", \"fmul\", \"fst\", \"fldcw\", \"fsub\", \"fxch\", \"fisttp\", \"cmpsd\"}\nConditionalJump = {\"jno\", \"js\", \"jae\", \"je\", \"jb\", \"jne\", \"jns\", \"ja\", \"jbe\", \"jle\", \"jg\", \"jge\", \"jl\", \"jle\", \"jo\", \"jp\", \"jpe\", \"jpo\", \"sete\", \"seta\", \"setne\", \"jnp\", \"bne\", \"beq\", \"loop\", \"loope\", \"loopne\", \"repne scasb\", \"jecxz\", \"setge\", \"daa\", \"aas\", \"ble\", \"bgt\", \"setl\", \"blo\", \"cmn\", \"bhs\", \"asrs\", \"asr\"}\nArithmetic = {\"sub\", \"add\", \"nop\", \"shl\", \"sar\", \"div\", \"imul\", \"dec\", \"mul\", \"inc\", \"neg\", \"not\", \"adc\", \"sbb\", \"rol\", \"ror\", \"shr\", \"sal\", \"idiv\", \"rcr\", \"rcl\", \"aaa\", \"das\", \"aad\", \"shrd\", \"shld\", \"adds\", \"subs\", \"rsb\"}\nSystem = {\"in\", \"out\", \"cli\", \"sti\", \"hlt\", \"nop\", \"wait\", \"cmc\", \"cld\", \"std\", \"sahf\", \"pushfd\", \"clc\", \"cpuid\", \"syscall\", \"stc\", \"salc\"}\nTypeConversion = {\"cdq\", \"cbw\", \"cwde\", \"cwtl\", \"cwd\", \"movsx\", \"movsxd\", \"movzx\", \"aam\", \"cdqe\"}\nAtomicOperations = {\"lock cmpxchg\", \"lock sbb\", \"lock xadd\", \"lock dec\", \"lock inc\", \"lock add\", \"lock or\"}\nUnconditionalJump = {\"jmp\", \"call\", \"ret\", \"iretd\", \"retf\", \"jrcxz\", \"xbegin\", \"b\", \"bl\", \"bsr\", \"ljmp\"}\nPrivileged = {\"int\", \"endbr64\", \"bnd jmp\", \"bnd ret\", \"int3\", \"endbr32\"}\nCrypto = {\"aesenc\", \"aesdec\", \"aesimc\", \"aeskeygenassist\"}\nLogic = {\"xor\", \"test\", \"and\", \"or\", \"bt\", \"bts\", \"xorps\"}\nAddressing = {\"lea\", \"leave\", \"enter\"}\nComparison = {\"cmp\", \"setg\", \"setb\"}\n\n\ndef group(inst_freq, num_inst) -> dict:\n groups = {\n \"Arithmetic\": 0,\n \"Memory\": 0,\n \"ConditionalJump\": 0,\n \"UnconditionalJump\": 0,\n \"Logic\": 0,\n \"System\": 0,\n \"Privileged\": 0,\n \"Crypto\": 0,\n \"Comparison\": 0,\n \"Addressing\": 0,\n \"TypeConversion\": 0,\n \"AtomicOperations\": 0,\n \"FloatingPoint\": 0,\n \"Undefined\": 0\n }\n\n for (inst, freq) in inst_freq.items():\n\n if inst in Arithmetic:\n groups[\"Arithmetic\"] += freq\n elif inst in Memory:\n groups[\"Memory\"] += freq\n elif inst in ConditionalJump:\n groups[\"ConditionalJump\"] += freq\n elif inst in UnconditionalJump:\n groups[\"UnconditionalJump\"] += freq\n elif inst in Logic:\n groups[\"Logic\"] += freq\n elif inst in System:\n groups[\"System\"] += freq\n elif inst in Privileged:\n groups[\"Privileged\"] += freq\n elif inst in Crypto:\n groups[\"Crypto\"] += freq\n elif inst in Comparison:\n groups[\"Comparison\"] += freq\n elif inst in Addressing:\n groups[\"Addressing\"] += freq\n elif inst in TypeConversion:\n groups[\"TypeConversion\"] += freq\n elif inst in AtomicOperations:\n groups[\"AtomicOperations\"] += freq\n elif inst in FloatingPoint:\n groups[\"FloatingPoint\"] += freq\n else:\n groups[\"Undefined\"] += freq\n\n return groups\n\n\ndataset = os.path.abspath(sys.argv[1])\n\nfor featurefile in glob.glob('**/features.json', recursive=True, root_dir=dataset):\n output_file = f\"{dataset}/{os.path.dirname(featurefile)}/groups.feature\"\n insts_number = 0\n groups = {}\n try:\n j = load_compressed_json(featurefile)\n insts_freq = j[\"CODE\"][\"INSTS_STATS\"][\"inst_type_freq\"]\n insts_number = j[\"CODE\"][\"INSTS_STATS\"][\"num_insts\"]\n groups = group(insts_freq, insts_number)\n\n except:\n pass\n\n with open(output_file, 'w') as outfile:\n json.dump(groups, outfile)\n print(output_file)\n","repo_name":"sg1o/devAV","sub_path":"research/feature_generation/features/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"3222105813","text":"#! /usr/bin/env python3\n\n# TODO: Refactor code into classes.... Ideas for classes: DNAseq, RNAseq, PROTseq?\n\n'''\nA collection of tools for processing bioinformatics data\n'''\n\nimport re\n\n# globals\npossible_nts = ['A', 'T', 'U', 'C', 'G']\ncomp_dna = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\ncomp_rna = {'A': 'U', 'U': 'A', 'C': 'G', 'G': 'C'}\ntransitions = {'A': 'G', 'G': 'A', 'C': 'T', 'T': 'C'}\ncodon_list = {'UUU': 'F', 'CUU': 'L', 'AUU': 'I', 'GUU': 'V',\n 'UUC': 'F', 'CUC': 'L', 'AUC': 'I', 'GUC': 'V',\n 'UUA': 'L', 'CUA': 'L', 'AUA': 'I', 'GUA': 'V',\n 'UUG': 'L', 'CUG': 'L', 'AUG': 'M', 'GUG': 'V',\n 'UCU': 'S', 'CCU': 'P', 'ACU': 'T', 'GCU': 'A',\n 'UCC': 'S', 'CCC': 'P', 'ACC': 'T', 'GCC': 'A',\n 'UCA': 'S', 'CCA': 'P', 'ACA': 'T', 'GCA': 'A',\n 'UCG': 'S', 'CCG': 'P', 'ACG': 'T', 'GCG': 'A',\n 'UAU': 'Y', 'CAU': 'H', 'AAU': 'N', 'GAU': 'D',\n 'UAC': 'Y', 'CAC': 'H', 'AAC': 'N', 'GAC': 'D',\n 'UAA': '_', 'CAA': 'Q', 'AAA': 'K', 'GAA': 'E',\n 'UAG': '_', 'CAG': 'Q', 'AAG': 'K', 'GAG': 'E',\n 'UGU': 'C', 'CGU': 'R', 'AGU': 'S', 'GGU': 'G',\n 'UGC': 'C', 'CGC': 'R', 'AGC': 'S', 'GGC': 'G',\n 'UGA': '_', 'CGA': 'R', 'AGA': 'R', 'GGA': 'G',\n 'UGG': 'W', 'CGG': 'R', 'AGG': 'R', 'GGG': 'G'}\n\n\nclass NotInFastaError(Exception):\n pass\n\n\ndef validate_sequence(nt_sequence):\n temp, checkedUT = nt_sequence.upper(), False\n # 1. check if sequence contains U and T both (error)\n if not checkedUT:\n if 'U' in temp and 'T' in temp:\n exit(\"Sequence error: contains 'T' and 'U'.\")\n checkedUT = True\n # 2. parse nts and validate they are\n for nt in temp:\n if nt not in possible_nts:\n return False\n return temp\n\n\ndef hamming_distance(seq1, seq2):\n dist = 0\n for s, t in zip(seq1, seq2):\n if s != t:\n dist += 1\n return dist\n\n\ndef read_fasta(filepath):\n seqDict = None\n try:\n with open(filepath) as fasta:\n if not re.search('>', open(filepath).read()):\n raise NotInFastaError\n else:\n read = [line.strip() for line in fasta]\n seqID, seqDict = '', {}\n\n for line in read:\n if line.startswith('>'):\n seqID = line\n seqDict[seqID] = ''\n else:\n validated = validate_sequence(line)\n seqDict[seqID] += validated\n except NotInFastaError:\n with open(filepath) as fasta:\n seqDict = [line.strip() for line in fasta]\n finally:\n return seqDict\n\n\ndef transitions_transversions(seq1, seq2):\n ts, tv = 0.0, 0.0\n for s, t in zip(seq1, seq2):\n if s != t:\n if transitions[s] == t:\n ts += 1\n else:\n tv += 1\n return ts/tv\n\n\ndef translate_rna_protein(seq, initial_pos=0):\n # if not rna, transcribe it\n rna = validate_sequence(seq)\n prot = []\n if 'T' in rna:\n rna = transcribe_dna_rna(rna)\n for pos in range(initial_pos, len(rna)-2, 3):\n if codon_list[rna[pos:pos+3]] == '_':\n break\n prot += codon_list[rna[pos:pos+3]]\n # return [codon_list[rna[pos:pos+3]] for pos in range(initial_pos, len(rna)-2, 3)]\n return prot\n\n\ndef transcribe_dna_rna(dna):\n return dna.replace('T', 'U')\n\n\ndef reverse_complement(seq):\n # validate that sequence is not wrong\n if 'T' in seq and 'U' in seq:\n exit('Error: Sequence cannot contain both \\'T\\' and \\'U\\' nucleotides.')\n if 'U' in seq:\n return reverse(rna_complement(seq))\n if 'T' in seq:\n return reverse(dna_complement(seq))\n\n\ndef reverse(seq):\n return ''.join(seq[::-1])\n\n\ndef dna_complement(seq):\n return [comp_dna[base] for base in seq]\n\n\ndef rna_complement(seq):\n return [comp_rna[base] for base in seq]\n\n\ndef gc_content(seq):\n return (seq.count('G') + seq.count('C'))/len(seq)\n\n\ndef find_orfs(seq: str) -> list:\n orfs = []\n if not 'U' in seq:\n seq = transcribe_dna_rna(seq)\n for stop in ['UAA', 'UAG', 'UGA']:\n regex = f'AUG[AUCG]*{stop}'\n orfs += re.findall(r'(?=(%s))' % regex, seq)\n return orfs\n\n\ndef infer_rna(protein: str) -> int:\n \"\"\"infer_rna takes an input protein string and calculates the number of unique RNA strands it may come from.\n\n Args:\n protein (str): A protein sequence.\n\n Returns:\n int: The number of possible parent RNAs, modulo 1000000.\n \"\"\"\n\n infer = sum([value == '_' for value in codon_list.values()])\n\n for aa in protein:\n infer *= sum([value == aa for value in codon_list.values()])\n\n return infer % 1000000\n","repo_name":"ryancey1/Rosalind","sub_path":"Bioinformatics-Stronghold/bioinformatics_tools.py","file_name":"bioinformatics_tools.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"24478762717","text":"from collections import deque\nimport sys\nsys.stdin = open(\"W03-W04/Week03/3055/input.txt\",\"r\")\n\nR, C = map(int, sys.stdin.readline().split())\n\nforest = []; spring = set(); visited = set()\nS = ()\nfor i in range(R):\n line = sys.stdin.readline().strip()\n for j,a in enumerate(line):\n if a == 'S' : S = ('S', i,j); visited.add((i,j))\n if a == '*' : spring.add((i,j))\n forest.append(line)\n\n# forest 구조에 따라 숫자 배열을 따로 만들면 메모리 초과.\n# encode = {'.':0, 'S':0 , '*':1 , 'X':1 , 'D':float('inf')}\n# for i in range(R):\n# l = []\n# for j,a in enumerate(sys.stdin.readline().strip()):\n# if a == 'S' : S = (i,j)\n# if a == '*' : spring.append((i,j))\n# l.append(encode[a])\n# forest.append(l)\n \ndef bound(nr,nc):\n if nr < 0 or nr >= R:\n return False\n if nc < 0 or nc >= C:\n return False\n if (nr,nc) in spring:\n return False\n return True\n\ndef flood(v, q, spring, forest):\n for d in (1,-1):\n for nr,nc in [(v[1]+d,v[2]), (v[1],v[2]+d)]:\n if bound(nr,nc) and forest[nr][nc] in '.S':\n spring.add((nr,nc))\n q.append(('*',nr,nc))\n \ndef flee(v, q, forest):\n for d in (1,-1):\n for nr,nc in [(v[1]+d,v[2]), (v[1],v[2]+d)]:\n if bound(nr,nc) and (nr,nc) not in visited:\n if forest[nr][nc] == '.':\n q.append(('S', nr,nc))\n visited.add((nr,nc))\n elif forest[nr][nc] == 'D':\n q.append(('D', nr,nc)) # 여기서 appendleft해줬다가 한참 해맸다.\n # appendleft해주면 step이 지켜지지 않음.\n \nq = deque()\nfor s in spring:\n q.append(('*',s[0],s[1]))\nq.append(S)\n\n# 물과 고슴도치 BFS 한번에 다 실행. 대신 물이 먼저 움직이게만 하면 됨.\ndef go(q):\n step = 0\n while q :\n adj_cnt = len(q)\n while adj_cnt > 0:\n v = q.popleft()\n if v[0] == 'D':\n print(step)\n return True\n elif v[0] == '*':\n flood(v, q, spring, forest)\n elif v[0] == 'S':\n flee(v, q, forest)\n adj_cnt -= 1\n step += 1\n\n return False\n\nif not go(q):\n print('KAKTUS')","repo_name":"jaehyeonkim2358/SW_Jungle_5B","sub_path":"Week03/3055/3055_rivolt0421.py","file_name":"3055_rivolt0421.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"73533555473","text":"def get_info_numeral_ending(ending):\n if ending in num_ending_ordinal:\n return \"ord\"\n elif ending in num_ending_fraction_and_top:\n return \"top\"\n elif ending in num_ending_collective:\n return \"coll\"\n elif ending in num_ending_not_sure:\n return \"chamalama\"\n else:\n return 'none'\n\ndef check_numerals(self, word, list):\n root = ''\n if word == 'үчүнчү':\n self.set_symbol('num_ord', 'үнчү')\n list.append('num_ord')\n root = 'үч'\n elif word == 'алтынчы':\n self.set_symbol('num_ord', 'нчы')\n list.append('num_ord')\n root = 'алты'\n elif word == 'кыркча':\n self.set_symbol('num_appr1', 'ча')\n list.append('num_appr1')\n root = 'кырк'\n elif word == 'кырктай':\n self.set_symbol('num_appr2', 'тай')\n list.append('num_appr2')\n root = 'кырк'\n elif word == 'кырктаган':\n self.set_symbol('num_appr3', 'таган')\n list.append('num_appr3')\n root = 'кырк'\n elif word == 'кырктан':\n self.set_symbol('num_top', 'тан')\n list.append('num_top')\n root = 'кырк'\n elif word == 'бирөө':\n self.set_symbol('num_coll', 'өө')\n list.append('num_coll')\n root = 'бир'\n elif word == 'экөө':\n self.set_symbol('num_coll', 'өө')\n list.append('num_coll')\n root = 'эки'\n elif word == 'үчөө':\n self.set_symbol('num_coll', 'өө')\n list.append('num_coll')\n root = 'үч'\n elif word == 'төртөө':\n self.set_symbol('num_coll', 'өө')\n list.append('num_coll')\n root = 'төрт'\n elif word == 'бешөө':\n self.set_symbol('num_coll', 'өө')\n list.append('num_coll')\n root = 'беш'\n elif word == 'алтоо':\n self.set_symbol('num_coll', 'оо')\n list.append('num_coll')\n root = 'алты'\n elif word == 'жетөө':\n self.set_symbol('num_coll', 'өө')\n list.append('num_coll')\n root = 'жети'\n elif word == 'сезизөө':\n self.set_symbol('num_coll', 'өө')\n list.append('num_coll')\n root = 'сегиз'\n return root, list\nnum_ending_ordinal = { # иреттик сан атооч\n 'ынчы', 'инчи', 'үнчү', 'унчу', 'нчы', 'нчи'\n}\nnum_ending_collective = { # жамдама сан атооч\n 'оо', 'өө'\n}\nnum_ending_not_sure = { # чамалама сан атооч\n 'дай', 'дой', 'дей', 'дөй',\n 'тай', 'той', 'тей', 'төй',\n 'ча', 'чө', 'че', 'чо',\n 'догон', 'дөгөн', 'деген', 'даган',\n 'тогон', 'төгөн', 'теген', 'таган',\n 'дук', 'дап', 'дүк', 'дөп', 'деп', 'дик', 'доп',\n 'лөп', 'лап', 'леп', 'лоп',\n 'тап', 'теп', 'топ', 'төп'\n}\nnum_root_not_sure = { # чамалама сан атооч\n 'чамалуу', 'чакты', 'чамалаган', 'ашык', 'артык', 'ашуун', 'аша', 'көп', 'аз', 'жакын'\n}\nnum_ending_fraction_and_top = { # бөлчөк жана топ сан атоочтор\n 'дан', 'тан', 'ден', 'тен', 'дон', 'тон', 'дөн', 'төн'\n}\nnum_root_fraction = {\n 'жарым', 'жарты', 'бүтүн', 'чейрек',\n}\nnum_root = {\"нөл\", \"бир\", \"эки\", \"үч\", \"төрт\", \"беш\", \"алты\", \"жети\", \"сегиз\", \"тогуз\", \"он\",\n \"жыйырма\", \"отуз\", \"кырк\", \"элүү\", \"алтымыш\", \"жетимиш\", \"сексен\", \"токсон\", \"жүз\",\n \"миң\", \"миллион\", \"миллиард\"}\nnum_numeral = {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"}\nnum_degree = {\"10\", \"100\", \"1000\", \"1000000\", \"1000000000\"}\n\n\ndef get_num(number):\n num = 0\n if number == \"бир\":\n num = 1\n elif number == \"нөл\":\n num = 0\n elif number == \"он\":\n num = 10\n elif number == \"жүз\":\n num = 100\n elif number == \"миң\":\n num = 1000\n elif number == \"эки\":\n num = 2\n elif number == \"үч\":\n num = 3\n elif number == \"төрт\":\n num = 4\n elif number == \"беш\":\n num = 5\n elif number == \"алты\":\n num = 6\n elif number == \"жети\":\n num = 7\n elif number == \"сегиз\":\n num = 8\n elif number == \"тогуз\":\n num = 9\n elif number == \"жыйырма\":\n num = 20\n elif number == \"отуз\":\n num = 30\n elif number == \"кырк\":\n num = 40\n elif number == \"элүү\":\n num = 50\n elif number == \"алтымыш\":\n num = 60\n elif number == \"жетимиш\":\n num = 70\n elif number == \"сексен\":\n num = 80\n elif number == \"токсон\":\n num = 90\n elif number == \"миллион\":\n num = 10 ** 6\n elif number == \"миллиард\":\n num = 10 ** 9\n return num\n\n\ndef get_degree(num):\n sum = 0\n num_degree_thousand = 0\n num_degree_million = 0\n num_degree_milliard = 0\n if True:\n for n in num:\n if str(n) in num_degree:\n if str(n) == \"1000\":\n num_degree_thousand = sum * n\n sum = 0\n elif str(n) == \"1000000000\":\n num_degree_milliard = sum * n\n sum = 0\n elif str(n) == \"1000000\":\n num_degree_million = sum * n\n sum = 0\n else:\n sum = sum * n\n elif str(n) in num_numeral:\n sum = sum + n\n else:\n sum = sum + n\n return sum + num_degree_thousand + num_degree_million + num_degree_milliard\n\n\ndef get_info_numeral(numbers):\n total_number = 0\n all_numbers = []\n for number in numbers:\n number = number.lower()\n i = get_num(number)\n all_numbers.append(i)\n total_number = get_degree(all_numbers)\n return total_number\n\n\ndef get_info_numeral_root(root): # word numeral to digit numeral\n # Ex:сексен алты миллион беш жүз кырк төрт миң бир жүз токсон беш = 86544195\n if len(root) > 2:\n rezult = get_info_numeral(root)\n return rezult\n else:\n return 'none'\n","repo_name":"Aynagul/kyrgyz_morph_analyzer","sub_path":"two_level_morphanalyzer/analyzer/backend/analyzer/endings/Numeral.py","file_name":"Numeral.py","file_ext":"py","file_size_in_byte":6445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"22006695765","text":"opcao = \"\"\r\nwhile opcao != \"0\":\r\n print(\"(1) - Verificar valor na tabela de Fibonnaci\\n(0) - Encerrar programa\\n\")\r\n opcao = (input(\"Escolha uma opção: \"))\r\n\r\n if opcao == \"1\":\r\n num = int(input(\"\\nDigite um número inteiro: \"))\r\n i = \"\"\r\n n1 = 0\r\n n2 = 1\r\n cont = 0\r\n while cont <= num:\r\n n3 = n1 + n2\r\n print(n3 , end=\" \")\r\n n1 = n2\r\n n2 = n3\r\n cont = cont + 1\r\n if num == n3:\r\n i = \"sim\"\r\n break\r\n if i == \"sim\":\r\n print(\"\\nAção bem sucedida!\\n\")\r\n else:\r\n print(\"\\nA Ação falhou!\\n\")\r\n\r\n elif opcao > \"1\" or opcao < \"0\":\r\n print(\"\\n *** Opção inválida... Tente novamente! ***\\n\")\r\n\r\nprint(\"\\n ***** Programa encerrado! *****\")\r\n\r\n","repo_name":"Mir4nd4s/Lista01_cap3-FIAP-ADS-2021","sub_path":"EX03.py","file_name":"EX03.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"4292957068","text":"import numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ..datasource import tensor as astensor\nfrom ..array_utils import as_same_device, device\nfrom .core import TensorReduction, TensorReductionMixin, numel\n\n\nclass TensorMean(TensorReduction, TensorReductionMixin):\n _op_type_ = OperandDef.MEAN\n\n def __init__(self, axis=None, keepdims=None, combine_size=None, stage=None, **kw):\n stage = self._rewrite_stage(stage)\n super().__init__(\n _axis=axis,\n _keepdims=keepdims,\n _combine_size=combine_size,\n stage=stage,\n **kw\n )\n\n @classmethod\n def execute_agg(cls, ctx, op):\n axis = cls.get_axis(op.axis)\n\n a = ctx[op.inputs[0].key]\n if not isinstance(a, (list, tuple)):\n (inp,), device_id, xp = as_same_device(\n [a], device=op.device, ret_extra=True\n )\n\n with device(device_id):\n ctx[op.outputs[0].key] = xp.mean(\n inp, axis=axis, dtype=op.dtype, keepdims=bool(op.keepdims)\n )\n else:\n (_data, _count), device_id, xp = as_same_device(\n a, device=op.device, ret_extra=True\n )\n\n with device(device_id):\n chunk_count = xp.sum(\n _count, axis=axis, dtype=op.dtype, keepdims=bool(op.keepdims)\n )\n chunk_sum = xp.sum(\n _data, axis=axis, dtype=op.dtype, keepdims=bool(op.keepdims)\n )\n ctx[op.outputs[0].key] = xp.true_divide(\n chunk_sum, chunk_count, dtype=op.dtype\n )\n\n @classmethod\n def execute_map(cls, ctx, op):\n (in_chunk,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True\n )\n\n axis = cls.get_axis(op.axis)\n\n with device(device_id):\n chunk_count = numel(\n in_chunk, axis=axis, dtype=np.int64, keepdims=bool(op.keepdims)\n )\n chunk_sum = xp.sum(\n in_chunk, axis=axis, dtype=op.dtype, keepdims=bool(op.keepdims)\n )\n ctx[op.outputs[0].key] = (chunk_sum, chunk_count)\n\n @classmethod\n def execute_combine(cls, ctx, op):\n axis = cls.get_axis(op.axis)\n (_data, _count), device_id, xp = as_same_device(\n ctx[op.inputs[0].key], device=op.device, ret_extra=True\n )\n\n with device(device_id):\n chunk_count = xp.sum(\n _count, axis=axis, dtype=np.int64, keepdims=bool(op.keepdims)\n )\n chunk_sum = xp.sum(\n _data, axis=axis, dtype=op.dtype, keepdims=bool(op.keepdims)\n )\n ctx[op.outputs[0].key] = (chunk_sum, chunk_count)\n\n\ndef mean(a, axis=None, dtype=None, out=None, keepdims=None, combine_size=None):\n \"\"\"\n Compute the arithmetic mean along the specified axis.\n\n Returns the average of the array elements. The average is taken over\n the flattened tensor by default, otherwise over the specified axis.\n `float64` intermediate and return values are used for integer inputs.\n\n Parameters\n ----------\n a : array_like\n Tensor containing numbers whose mean is desired. If `a` is not an\n tensor, a conversion is attempted.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the means are computed. The default is to\n compute the mean of the flattened array.\n\n If this is a tuple of ints, a mean is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the mean. For integer inputs, the default\n is `float64`; for floating point inputs, it is the same as the\n input dtype.\n out : Tensor, optional\n Alternate output tensor in which to place the result. The default\n is ``None``; if provided, it must have the same shape as the\n expected output, but the type will be cast if necessary.\n See `doc.ufuncs` for details.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input tensor.\n\n If the default value is passed, then `keepdims` will not be\n passed through to the `mean` method of sub-classes of\n `Tensor`, however any non-default value will be. If the\n sub-classes `sum` method does not implement `keepdims` any\n exceptions will be raised.\n combine_size: int, optional\n The number of chunks to combine.\n\n Returns\n -------\n m : Tensor, see dtype parameter above\n If `out=None`, returns a new tensor containing the mean values,\n otherwise a reference to the output array is returned.\n\n See Also\n --------\n average : Weighted average\n std, var, nanmean, nanstd, nanvar\n\n Notes\n -----\n The arithmetic mean is the sum of the elements along the axis divided\n by the number of elements.\n\n Note that for floating-point input, the mean is computed using the\n same precision the input has. Depending on the input data, this can\n cause the results to be inaccurate, especially for `float32` (see\n example below). Specifying a higher-precision accumulator using the\n `dtype` keyword can alleviate this issue.\n\n By default, `float16` results are computed using `float32` intermediates\n for extra precision.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> a = mt.array([[1, 2], [3, 4]])\n >>> mt.mean(a).execute()\n 2.5\n >>> mt.mean(a, axis=0).execute()\n array([ 2., 3.])\n >>> mt.mean(a, axis=1).execute()\n array([ 1.5, 3.5])\n\n In single precision, `mean` can be inaccurate:\n\n >>> a = mt.zeros((2, 512*512), dtype=mt.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> mt.mean(a).execute()\n 0.54999924\n\n Computing the mean in float64 is more accurate:\n\n >>> mt.mean(a, dtype=mt.float64).execute()\n 0.55000000074505806\n\n \"\"\"\n a = astensor(a)\n if dtype is None:\n dtype = np.mean(np.empty((1,), dtype=a.dtype)).dtype\n op = TensorMean(\n axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size\n )\n return op(a, out=out)\n","repo_name":"mars-project/mars","sub_path":"mars/tensor/reduction/mean.py","file_name":"mean.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","stars":2654,"dataset":"github-code","pt":"84"} +{"seq_id":"7609326258","text":"import sys\nsys.path.append('/home/pi/.local/lib/python3.7/site-packages')\nimport socket\nimport pyttsx3\nimport time\n\nengine = pyttsx3.init() # object creation\n\n\"\"\" RATE\"\"\"\nrate = engine.getProperty('rate') # getting details of current speaking rate\nprint(rate) # printing current voice rate\nengine.setProperty('rate', 125) # setting up new voice rate\n\n\n\"\"\"VOLUME\"\"\"\nvolume = engine.getProperty(\n 'volume') # getting to know current volume level (min=0 and max=1)\nprint(volume) # printing current volume level\nengine.setProperty('volume', 1.0) # setting up volume level between 0 and 1\n\n\"\"\"VOICE\"\"\"\nvoices = engine.getProperty('voices') # getting details of current voice\n# engine.setProperty('voice', voices[0].id)\n# changing index, changes voices. o for male\n# changing index, changes voices. 1 for female\nengine.setProperty('voice', voices[8].id)\n\n# for idx, voice in enumerate(voices):\n# print(idx, voice.id)\n\n\nengine.say('Guten Tag.')\n\ntime.sleep(30) # wait 30s for internet connection\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.connect((\"8.8.8.8\", 80))\nip = s.getsockname()[0]\ns.close()\n\nprint(ip)\nip = ip.replace('', ' ').replace('.', 'Punkt')\nengine.say('Meine IP Adresse ist ' + ip)\nengine.runAndWait()\nengine.stop()\n","repo_name":"oliverruoff/PiBot_v3","sub_path":"Say_Ip_Adress.py","file_name":"Say_Ip_Adress.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"33508796896","text":"class Node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\n\r\nclass Queue:\r\n def __init__(self, value):\r\n newNode = Node(value)\r\n self.first = newNode\r\n self.last = newNode\r\n self.length = 1\r\n\r\n def print_queue(self):\r\n temp = self.first\r\n while temp is not None:\r\n print(temp.value)\r\n temp = temp.next\r\n\r\n def enqueue(self, value):\r\n newNode = Node(value)\r\n if not self.last:\r\n self.first = newNode\r\n self.last = newNode\r\n else:\r\n self.last.next = newNode\r\n self.last = newNode\r\n\r\n self.length += 1\r\n return newNode\r\n\r\n def dequeue(self):\r\n if not self.first:\r\n return None\r\n\r\n temp = self.first\r\n if self.first is self.last:\r\n self.first = None\r\n self.last = None\r\n else:\r\n self.first = self.first.next\r\n temp.next = None\r\n self.length -= 1\r\n return temp\r\n\r\narr = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\r\ns = \"\".join(arr).replace(\"2\",\"4\")\r\n\r\nprint(s, set(s))","repo_name":"mahdirahman1/algo_and_ds","sub_path":"Stack & Queue/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"22108340178","text":"def solution(clothes):\n ans={}\n \n for i in clothes:\n if i[1] in ans:\n ans[i[1]] += 1\n else:\n ans[i[1]] = 1\n cnt = 1\n for i in ans.values():\n cnt *= (i+1)\n return cnt -1 # 다 입지 않았을 때 수를 뺴줘야함\n\n\n\nclothes = [[\"yellow_hat\", \"headgear\"], [\"blue_sunglasses\", \"eyewear\"], [\"green_turban\", \"headgear\"]]\nsolution(clothes)","repo_name":"jae1jeong/Algorithm","sub_path":"Programmers/위장.py","file_name":"위장.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"42056479026","text":"# -*- coding: utf-8 -*-\nimport re\nfrom copy import deepcopy\nimport scrapy\nimport json\nfrom ..settings import NEWS_ALL_PAGE, PLATES, COMMENT_ALL_PAGE\nfrom ..items import TencentnewsappItem\n\n\nclass NewsSpider(scrapy.Spider):\n name = 'news'\n allowed_domains = ['inews.qq.com']\n plates_code = dict(news_news_recommend='推荐', news_news_top='要闻', news_news_ent='娱乐', news_news_19='新时代',\n news_news_sports='体育', news_news_mil='军事', news_news_nba='NBA', news_news_game='游戏',\n news_news_world='国际', news_news_tech='科技', news_news_finance='财经', news_news_auto='汽车',\n news_news_movie='电影', news_news_zongyi='综艺', news_news_food='美食', news_news_orignal='眼界',\n news_news_istock='股票', news_news_kepu='科学', news_news_health='健康', news_news_5g='5G',\n news_news_university='追光少年', news_news_agri='三农', news_news_tencentgy='公益', news_news_acg='二次元',\n news_news_netcourt='政法网事', news_news_twentyf='必读', news_news_nflfootball='NFL',\n news_news_cba='CBA', news_news_icesnow='冰雪', news_news_football='足球', news_news_media='传媒',\n news_news_music='音乐', news_news_history='历史', news_news_pet='宠物', news_news_baby='育儿',\n news_news_visit='旅游', news_news_meirong='美容', news_news_edu='教育', news_news_astro='星座',\n news_news_digi='数码', news_news_esport='电竞', news_news_jiaju='家居', news_news_msh='政务',\n news_news_olympic='综合体育', news_news_pplvideo='人民视频', news_news_legal='法制',\n news_news_workplace='职场', news_news_emotion='情感', news_news_lic='理财', news_news_fx='新国风',\n news_news_house='房产', news_news_cul='文化', news_news_lad='时尚')\n plates = PLATES\n\n def start_requests(self): # 生成起始网址\n url_model = 'https://r.inews.qq.com/getQQNewsUnreadList?chlid={}&page=0&forward=0&devid=863064010529115&appver=22_android_5.8.21'\n for plate in self.plates:\n if plate in self.plates_code.values():\n code = list(self.plates_code.keys())[list(self.plates_code.values()).index(plate)]\n yield scrapy.Request(url=url_model.format(code), callback=self.parse)\n\n def parse(self, response): # 解析新闻列表页\n item = TencentnewsappItem()\n html = json.loads(response.text)\n # 提取新闻url\n news_list = html['newslist']\n for news in news_list:\n item['_id'] = news['id']\n item['url'] = news['url']\n item['title'] = news['title']\n item['time'] = news['time']\n item['uinnick'] = news['uinnick']\n item['source'] = news['source']\n item['type'] = news['realChlName']\n detail_url = 'https://r.inews.qq.com/getSimpleNews?id={}'.format(item['_id'])\n yield scrapy.Request(url=detail_url, callback=self.parse_detail, meta={'item': deepcopy(item)})\n # 下一页\n for page in range(NEWS_ALL_PAGE):\n next_url = re.sub(r'page=\\d+&', 'page={}&'.format(page), response.url)\n yield scrapy.Request(url=next_url, callback=self.parse)\n\n def parse_detail(self, response): # 解析新闻详情页\n item = response.meta.get('item')\n html = json.loads(response.text)\n item['comment_id'] = html['commentid']\n item['media_id'] = html['media_id']\n item['media_source'] = html['card']['chlname']\n if '' in html['content']['text']:\n item['is_video'] = 'yes'\n item['video_url'] = html['attribute']['VIDEO_0']['playurl']\n else:\n item['is_video'] = 'no'\n item['text'] = html['content']['text']\n if '' in html['content']['text']:\n imgs = []\n img_count = 0\n for img_info in html['attribute'].values():\n imgs.append(img_info['url'])\n img_count += 1\n item['imgs_url'] = imgs\n # 对接评论页(热评)\n comment_url = 'https://r.inews.qq.com/getQQNewsComment?showType=orig&comment_id={}'.format(item['comment_id'])\n yield scrapy.Request(url=comment_url, callback=self.parse_comment, meta={'item': item, 'commentid': item['comment_id']})\n\n def parse_comment(self, response): # 解析评论页\n item = response.meta.get('item')\n commentid = response.meta.get('commentid')\n html = json.loads(response.body)\n coral_score = ''\n comment_list = html['comments']['new']\n item['comment_list'] = []\n for comment in comment_list:\n if comment is comment_list[-1]:\n coral_score = comment[0]['coral_score']\n comment_item = {}\n comment = comment[0]\n comment_item['comment_uid'] = comment['coral_uid']\n comment_item['comment_nick'] = comment['nick']\n comment_item['comment_content'] = comment['reply_content']\n comment_item['comment_agree_count'] = comment.get('agree_count', '')\n comment_item['reply_num'] = comment.get('reply_num', '')\n comment_item['reply_list'] = []\n reply_list = comment['reply_list']\n for reply in reply_list:\n reply_item = {}\n reply = reply[0]\n reply_item['reply_uid'] = reply['coral_uid']\n reply_item['reply_nick'] = reply['nick']\n reply_item['reply_content'] = reply['reply_content']\n reply_item['reply_agree_count'] = reply.get('agree_count', '')\n comment_item['reply_list'].append(reply_item)\n item['comment_list'].append(comment_item)\n # 评论翻页\n if coral_score:\n for i in range(COMMENT_ALL_PAGE):\n comment_url = 'https://r.inews.qq.com/getQQNewsComment?showType=orig&comment_id={}&coral_score={}'.format(commentid, coral_score)\n yield scrapy.Request(url=comment_url, callback=self.parse_comment, meta={'item': item})\n item['comment_count'] = html['comments']['count']\n yield item\n\n","repo_name":"OSinoooO/TencentNewsApp","sub_path":"TencentNewsApp/spiders/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":6373,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"30728165412","text":"with open(\"data.txt\") as f:\n drawn = [int(x) for x in f.readline().strip('\\n').split(',')]\n cards = []\n while f.readline():\n card = []\n for i in range(5):\n card.extend([int(x) for x in f.readline().strip('\\n').split(' ') if x != ''])\n cards.append(card)\n \n\ndef isWinner(card):\n #horizontal\n start = 0\n for i in range(5):\n if card[start] + card[start+1] + card[start+2] + card[start+3] + card[start+4] == 500:\n return True\n start += 5\n start = 0\n for i in range(5):\n if card[start] + card[start+5] + card[start+10] + card[start+15] + card[start+20] == 500:\n return True\n start += 1\n return False\n\nfound = False\nwhile found == False:\n number = drawn[0]\n drawn = drawn[1:]\n for card in cards:\n for i in range(len(card)):\n if card[i] == number:\n card[i] == 100\n for card in cards:\n if isWinner(card):\n total = sum([x for x in card if x != 100])\n print('Part 1', total * number)\n found = True;\n\n\n# found = False\n# while found == False:\n# number = drawn[0]\n# drawn = drawn[1:]\n \n# for index in range(len(cards)):\n# for i in range(len(cards[index])):\n# if cards[index][i] == number:\n# cards[index][i] = 100\n\n# index = 0\n# while index < len(cards):\n# if isWinner(cards[index]):\n# if len(cards) > 1:\n# cards.pop(index)\n# else:\n# found = True\n# print(cards[index])\n# break\n# else:\n# index += 1\n\n#total = sum([x for x in cards[index] if x != 100])\n#print(\"Part 2 solution:\", total * number)\n\n\n #vertical","repo_name":"PiotrZak/AOC","sub_path":"2021/days4.py","file_name":"days4.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19548708336","text":"class Alignment(object):\n \"\"\"\n Class including some interesting functions to create the\n output file mallet.\n \"\"\"\n\n def __init__(self, sequence, state_path, score = None):\n self.score = score\n self.sequence = sequence\n self.state_path = state_path\n\n #Other functions:\n\n def __len__(self):\n \"\"\"Return the length of a sequence/path\"\"\"\n return len(self.sequence)\n\n def __eq__(self, other_alignment):\n \"\"\"\n Compare the score, path and sequence of two sequences and\n return a list of booleans.\n \"\"\"\n x = self.score == other_alignment.score\n y = self.sequence == other_alignment.sequence\n z = self.state_path == other_alignment.state_path\n return x and y and z\n\n def __repr__(self):\n output = \"\"\n if self.score:\n output += \"[{:.4f}]\".format(self.score)\n else:\n output += \"[-]\"\n output += \"[{}|\".format(self.sequence.sequence)\n\n if self.state_path:\n output += \"{}]\".format(self.state_path.sequence)\n else:\n output += \"-]\"\n return output\n # return \"Score: {:.4f}\\nSequence:\\n{}\\nPath:\\n{}\\n\\n\".format(self.score, self.sequence, self.state_path)\n","repo_name":"undeadpixel/mallet","sub_path":"mallet/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"74507983315","text":"# JTMS-14\r\n# a6 p5.py\r\n# Abdullah Rafique\r\n# arafique@jacobs-university.de\r\ndef main():\r\n # input for list\r\n lst = []\r\n n = int(input(\"Enter an integer= \"))\r\n print(\"Enter{} float values:\".format(n))\r\n # using append to add number\r\n for i in range(n):\r\n a = float(input(\"Enter a floating point:\"))\r\n lst.append(a)\r\n\r\n lst2 = []\r\n for element in lst:\r\n lst2.append(element)\r\n print(\"original list:\")\r\n print_list(lst)\r\n# calling functions\r\n add(lst, 1.5)\r\n print(\"After addition\")\r\n print_list(lst)\r\n\r\n multiply(lst2, 5)\r\n print(\"After mulitplying by 5\")\r\n\r\n print_list(lst2)\r\n\r\n# add funtion\r\n\r\n\r\ndef add(lst, val):\r\n count = 0\r\n for i in lst:\r\n lst[count] = lst[count]+val\r\n count += 1\r\n\r\n# function of multiply\r\n\r\n\r\ndef multiply(lst, val):\r\n count = 0\r\n for i in lst:\r\n lst[count] = lst[count]*val\r\n count += 1\r\n\r\n# printing list\r\n\r\n\r\ndef print_list(lst):\r\n for i in lst:\r\n f = float(i)\r\n print(f)\r\n\r\n\r\nmain()\r\n","repo_name":"abdullahr007/python","sub_path":"assignment 6/q5/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"24196597935","text":"import sys\nfrom os import path\nfrom typing import Any, List, cast\n\nfrom sqlalchemy.orm.session import Session\n\n# Path hack before we try to import bespoke\nsys.path.append(path.realpath(path.join(path.dirname(__file__), \"../../src\")))\n\nfrom bespoke import errors\nfrom bespoke.db import models\nfrom bespoke.excel import excel_reader\n\n\ndef _check_no_dangling_references(company_id: str, session: Session) -> None:\n\tpayment = cast(\n\t\tmodels.Payment,\n\t\tsession.query(models.Payment).filter(\n\t\t\tmodels.Payment.company_id == company_id\n\t\t).first())\n\n\tif payment:\n\t\traise errors.Error('Company {} has an existing payment, so we cannot delete this company'.format(company_id))\n\ndef _delete_company(company_id: str, session: Session) -> None:\n\n\tdef _delete_db_objects(db_classes: List[Any]):\n\t\tfor db_class in db_classes:\n\t\t\tdb_objects = session.query(db_class).filter(db_class.company_id == company_id).all()\n\t\t\tif not db_objects:\n\t\t\t\tcontinue\n\n\t\t\tfor db_object in db_objects:\n\t\t\t\tsession.delete(db_object)\n\n\tcompany = session.query(models.Company).filter(\n\t\tmodels.Company.id == company_id).first()\n\tcompany.contract_id = None\n\tcompany.company_settings_id = None\n\n\tcompany_settings_list = session.query(models.CompanySettings).filter(\n\t\tmodels.CompanySettings.company_id == company_id).all()\n\tfor company_settings in company_settings_list:\n\t\tcompany_settings.company_id = None\n\n\tsession.flush()\n\n\tpre_db_classes = [\n\t\t# models.AuditEvent,\n\t\tmodels.BankAccount,\n\t\tmodels.CompanyAgreement,\n\t\tmodels.CompanyLicense,\n\t\tmodels.EbbaApplication,\n\t\tmodels.File,\n\t\tmodels.FinancialSummary,\n\t\tmodels.Invoice,\n\t\tmodels.LineOfCredit,\n\t\tmodels.Loan,\n\t\tmodels.Payment,\n\t\tmodels.PurchaseOrder,\n\t\tmodels.User,\n\t\tmodels.Contract,\n\t\tmodels.CompanySettings\n\t]\n\n\t_delete_db_objects(pre_db_classes)\n\n\t# IMPORTANT\n\t# The final delete of the company row fails due to a SQL timeout when the audit_events\n\t# table is searched due to the audits_events.company_id foreign key constraint.\n\t# session.delete(company)\n\ndef dedupe(session: Session, path: str) -> None:\n\n\tworkbook, err = excel_reader.ExcelWorkbook.load_xlsx(path)\n\tif err:\n\t\traise Exception(err)\n\n\tsheet, err = workbook.get_sheet_by_index(0)\n\tif err:\n\t\traise Exception(err)\n\n\trow_tuples = sheet['rows']\n\tfiltered_row_tuples = list(filter(lambda tup: tup[0] is not '', row_tuples[1:]))\n\tdedupe_tuples(session, filtered_row_tuples)\n\ndef dedupe_tuples(session: Session, row_tuples: List[List[str]]):\n\tprint(f'Beginning dedupe...')\n\trows_count = len(row_tuples)\n\t\n\tfor index, row in enumerate(row_tuples):\n\t\tprint(f'[{index + 1} of {rows_count}]')\n\n\t\t# (replace_company_id, to_delete_company_id, partnership_type)\n\t\treplacing_company_id = row[0]\n\t\tto_delete_company_id = row[1]\n\t\tpartnership_type = row[2]\n\n\t\tprint(f'[{index + 1} of {rows_count}] Deduping: merge company {to_delete_company_id} into company {replacing_company_id} for partnership type {partnership_type}')\n\n\t\texisting_replacing_company = cast(\n\t\t\tmodels.Company,\n\t\t\tsession.query(models.Company).filter(\n\t\t\t\tmodels.Company.id == replacing_company_id\n\t\t\t).first())\n\n\t\tif not existing_replacing_company:\n\t\t\traise errors.Error('No company found with ID {}'.format(replacing_company_id))\n\n\t\texisting_to_delete_company = cast(\n\t\t\tmodels.Company,\n\t\t\tsession.query(models.Company).filter(\n\t\t\t\tmodels.Company.id == to_delete_company_id\n\t\t\t).first())\n\n\t\tif not existing_to_delete_company:\n\t\t\traise errors.Error('No company found with ID {}'.format(to_delete_company_id))\n\n\t\t_check_no_dangling_references(to_delete_company_id, session)\n\n\t\tif partnership_type == 'vendor':\n\t\t\t# De-dupe operations for vendor case\n\t\t\t# (1) Change purchase_orders.vendor_id\n\t\t\t# (2) Change vendor agreements (and files)\n\t\t\t# (3) Change vendor partnership\n\t\t\t# (3) Change line_of_credits.recipient_vendor_id\n\t\t\t# (4) Transfer bank account of previous vendor\n\n\t\t\t# 1\n\t\t\tpurchase_orders = session.query(models.PurchaseOrder).filter(\n\t\t\t\tmodels.PurchaseOrder.vendor_id == to_delete_company_id\n\t\t\t).all()\n\t\t\tfor purchase_order in purchase_orders:\n\t\t\t\tpurchase_order.vendor_id = replacing_company_id\n\n\t\t\t# 2\n\t\t\tcompany_agreements = session.query(models.CompanyAgreement).filter(\n\t\t\t\tmodels.CompanyAgreement.company_id == to_delete_company_id\n\t\t\t).all()\n\t\t\tfor company_agreement in company_agreements:\n\t\t\t\t# Change files.company_id\n\t\t\t\tcompany_agreement_file = session.query(models.File).get(company_agreement.file_id)\n\t\t\t\tcompany_agreement_file.company_id = replacing_company_id\n\t\t\t\t# Change company_agreements.company_id\n\t\t\t\tcompany_agreement.company_id = replacing_company_id\n\n\t\t\t# 3\n\t\t\tcompany_vendor_partnerships = session.query(models.CompanyVendorPartnership).filter(\n\t\t\t\tmodels.CompanyVendorPartnership.vendor_id == to_delete_company_id\n\t\t\t).all()\n\t\t\tfor partnership in company_vendor_partnerships:\n\t\t\t\texisting_company_vendor_partnership = session.query(\n\t\t\t\t\tmodels.CompanyVendorPartnership\n\t\t\t\t).filter_by(\n\t\t\t\t\tcompany_id=partnership.company_id,\n\t\t\t\t\tvendor_id=replacing_company_id,\n\t\t\t\t).first()\n\n\t\t\t\tif existing_company_vendor_partnership:\n\t\t\t\t\tsession.delete(partnership)\n\t\t\t\telse:\n\t\t\t\t\tpartnership.vendor_id = replacing_company_id\n\n\t\t\t# 4\n\t\t\tline_of_credits = session.query(models.LineOfCredit).filter(\n\t\t\t\tmodels.LineOfCredit.recipient_vendor_id == to_delete_company_id\n\t\t\t).all()\n\t\t\tfor line_of_credit in line_of_credits:\n\t\t\t\tline_of_credit.recipient_vendor_id = replacing_company_id\n\n\t\t\t# 5\n\t\t\tbank_accounts = session.query(models.BankAccount).filter(\n\t\t\t\tmodels.BankAccount.company_id == to_delete_company_id\n\t\t\t).all()\n\t\t\tfor bank_account in bank_accounts:\n\t\t\t\tbank_account.company_id = replacing_company_id\n\n\t\t\tif existing_replacing_company.is_vendor != True:\n\t\t\t\texisting_replacing_company.is_vendor = True\n\n\t\telif partnership_type == 'payor':\n\t\t\t# De-dupe operations for payor case\n\t\t\t# (1) Change payor agreements (and files)\n\t\t\t# (2) Change payor partnership\n\t\t\t# (3) Change invoices.payor_id\n\n\t\t\t# 1\n\t\t\tcompany_agreements = session.query(models.CompanyAgreement).filter(\n\t\t\t\tmodels.CompanyAgreement.company_id == to_delete_company_id\n\t\t\t).all()\n\t\t\tfor company_agreement in company_agreements:\n\t\t\t\t# Change files.company_id\n\t\t\t\tcompany_agreement_file = session.query(models.File).get(company_agreement.file_id)\n\t\t\t\tcompany_agreement_file.company_id = replacing_company_id\n\t\t\t\t# Change company_agreements.company_id\n\t\t\t\tcompany_agreement.company_id = replacing_company_id\n\n\t\t\t# 2\n\t\t\tcompany_payor_partnerships = session.query(models.CompanyPayorPartnership).filter(\n\t\t\t\tmodels.CompanyPayorPartnership.payor_id == to_delete_company_id\n\t\t\t).all()\n\n\t\t\tfor partnership in company_payor_partnerships:\n\t\t\t\texisting_company_payor_partnership = session.query(\n\t\t\t\t\tmodels.CompanyPayorPartnership\n\t\t\t\t).filter_by(\n\t\t\t\t\tcompany_id=partnership.company_id,\n\t\t\t\t\tpayor_id=replacing_company_id,\n\t\t\t\t).first()\n\n\t\t\t\tif existing_company_payor_partnership:\n\t\t\t\t\tsession.delete(partnership)\n\t\t\t\telse:\n\t\t\t\t\tpartnership.payor_id = replacing_company_id\n\n\t\t\t# 3\n\t\t\tinvoices = session.query(models.Invoice).filter(\n\t\t\t\tmodels.Invoice.payor_id == to_delete_company_id\n\t\t\t).all()\n\t\t\tfor invoice in invoices:\n\t\t\t\tinvoice.payor_id = replacing_company_id\n\n\t\t\tif existing_replacing_company.is_payor != True:\n\t\t\t\texisting_replacing_company.is_payor = True\n\n\t\telse:\n\t\t\traise errors.Error('Unexpected partnership_type {}'.format(partnership_type))\n\n\t\t# De-dupe operations for both payor / vendor cases\n\t\t# 1) Change company_licenses.company_id\n\t\t# 2) Change users.company_id\n\t\t# 3) Change metrc_api_keys.company_id\n\t\tcompany_licenses = session.query(models.CompanyLicense).filter(\n\t\t\tmodels.CompanyLicense.company_id == to_delete_company_id\n\t\t).all()\n\t\tfor company_license in company_licenses:\n\t\t\tcompany_license.company_id = replacing_company_id\n\n\t\tcompany_license_file_ids = [company_license.file_id for company_license in company_licenses]\n\t\tcompany_license_files = session.query(models.File).filter(\n\t\t\tmodels.File.id.in_(company_license_file_ids)\n\t\t).all()\n\t\tfor company_license_file in company_license_files:\n\t\t\tcompany_license_file.company_id = replacing_company_id\n\n\t\tusers = session.query(models.User).filter(\n\t\t\tmodels.User.company_id == to_delete_company_id\n\t\t).all()\n\t\tfor user in users:\n\t\t\tuser.company_id = replacing_company_id\n\n\t\tmetrc_api_keys = session.query(models.MetrcApiKey).filter(\n\t\t\tmodels.MetrcApiKey.company_id == to_delete_company_id\n\t\t).all()\n\t\tfor metrc_api_key in metrc_api_keys:\n\t\t\tmetrc_api_key.company_id = replacing_company_id\n\n\t\t# Delete everything about the company ID to delete\n\t\t_delete_company(to_delete_company_id, session)\n\n\t\tsession.flush()\n\n\t\tprint(f'[{index + 1} of {rows_count}] Successfully deduped partnership type {partnership_type}')\n\n# Extracts a new vendor company out of an existing company.\n# This is useful when you want to undo an erroneous merge.\ndef extract_vendor_from_company(\n\tsession: Session,\n\tcompany_info_tuple: List[List[str]],\n\tis_test_run: bool = True,\n) -> None:\n\tif is_test_run:\n\t\tprint('Running in DRY RUN MODE...')\n\n\t(\n\t\toriginal_company_identifier,\n\t\tnew_company_name,\n\t\tnew_company_dba_name,\n\t\tvendor_partner_name, # Name of vendor partner (customer) to be extracted\n\t\tuser_email, # Email of user to be extracted\n\t\tpurchase_order_numbers, # List of purchase numbers to move to new vendor\n\t) = company_info_tuple\n\n\toriginal_company = session.query(models.Company).filter(\n\t\tmodels.Company.identifier == original_company_identifier\n\t).first()\n\n\tif original_company:\n\t\tprint(f'Found vendor company {original_company.name}')\n\telse:\n\t\traise errors.Error(f'No company found with identifier {original_company_identifier}')\n\n\tif not is_test_run:\n\t\tcompany_settings = models.CompanySettings()\n\t\tsession.add(company_settings)\n\t\tsession.flush()\n\t\tcompany_settings_id = str(company_settings.id)\n\n\t\tnew_company = models.Company(\n\t\t\tcompany_settings_id=company_settings_id,\n\t\t\tis_customer=False,\n\t\t\tis_payor=False,\n\t\t\tis_vendor=True,\n\t\t\tname=new_company_name,\n\t\t\tdba_name=new_company_dba_name,\n\t\t)\n\t\tsession.add(new_company)\n\t\tsession.flush()\n\n\tprint(f'Created new vendor company {new_company_name}')\n\n\t# Customer company who is partnered with new vendor.\n\tvendor_partner = session.query(models.Company).filter(\n\t\tmodels.Company.name == vendor_partner_name\n\t).filter(\n\t\tmodels.Company.is_customer.is_(True)\n\t).first()\n\n\tif vendor_partner:\n\t\tprint(f'Found vendor partner (customer) {vendor_partner.name}')\n\telse:\n\t\traise errors.Error(f'No company found with name {vendor_partner_name}')\n\n\t# 1. Transfer vendor_partnerships\n\tcompany_vendor_partnership = session.query(models.CompanyVendorPartnership).filter(\n\t\tmodels.CompanyVendorPartnership.vendor_id == original_company.id\n\t).filter(\n\t\tmodels.CompanyVendorPartnership.company_id == vendor_partner.id\n\t).first()\n\n\tif company_vendor_partnership:\n\t\tprint(f'Found company vendor partnership between {original_company.name} (vendor) and {vendor_partner.name} (customer), changing vendor of partnership to {new_company_name}...')\n\telse:\n\t\traise errors.Error(f'No company vendor partnership found between {original_company.name} and {vendor_partner.name}')\n\n\tif not is_test_run:\n\t\tcompany_vendor_partnership.vendor_id = new_company.id\n\n\t# 2. Transfer users\n\tuser = session.query(models.User).filter(\n\t\tmodels.User.email == user_email.lower()\n\t).first()\n\n\tif user:\n\t\tprint(f'Found user with email {user.email}, transferring to {new_company_name}')\n\telse:\n\t\traise errors.Error(f'No found with email {user_email}')\n\n\tif not is_test_run:\n\t\tuser.company_id = new_company.id\n\n\t# 3. Transfer purchase orders\n\tfor purchase_order_number in purchase_order_numbers:\n\t\tpurchase_order = session.query(models.PurchaseOrder).filter(\n\t\t\tmodels.PurchaseOrder.company_id == vendor_partner.id\n\t\t).filter(\n\t\t\tmodels.PurchaseOrder.order_number == purchase_order_number\n\t\t).first()\n\n\t\tif purchase_order:\n\t\t\tprint(f'Found purchase order for {vendor_partner.name} (with vendor {original_company.name}) with {purchase_order.order_number}, changing vendor to {new_company_name}')\n\t\telse:\n\t\t\traise errors.Error(f'No purchase order found for {vendor_partner.name} (with vendor {original_company.name}) with {purchase_order_number}')\n\n\t\tif not is_test_run:\n\t\t\tpurchase_order.vendor_id = new_company.id\n\n\tprint('DONE!')\n","repo_name":"warrenshen/platform","sub_path":"services/api-server/scripts/lib/dedupe_companies.py","file_name":"dedupe_companies.py","file_ext":"py","file_size_in_byte":12137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"3623400015","text":"size=int(input(\"enter the size of list:\"))\nlst=[]\nfor i in range(size):\n lst.append(int(input(\"Enter list element:\")))\n# lst.sort()\nprint(f\"\\nYour list is {lst}\\n\")\n\nreverse1=lst[:]\nreverse1.reverse()\nprint(f\"First reverse:{reverse1}\")\n\nreverse2=lst[:]\nprint(f\"Second reverse:{reverse2[::-1]}\")\n\nreverse3=lst[:]\nfor i in range(len(reverse3)//2):\n reverse3[i],reverse3[len(reverse3)-i-1]=reverse3[len(reverse3)-i-1],reverse3[i]\nprint(f\"Third reverse:{reverse3}\")\n\nif reverse1==reverse2==reverse3:\n print(\"\\nAll three lists are equally reversed...\")","repo_name":"pulkitsujaan/CodeDocs","sub_path":"Python/practiceproblem3(2).py","file_name":"practiceproblem3(2).py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"28531592045","text":"import subprocess\nfrom moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip\nfrom os import system\nimport os\nranges=[]\nwith open('v2_time_ranges.txt','r') as f:\n for line in f:\n words = line.split()\n x,y=words\n xx=int(x)\n yy=int(y)\n ranges.append((xx,yy))\n#SLICING VIDEO IN TIME RANGES AND THEN FINALLY MERGING TO GET FINAL VIDEO OUTPUT\nmax_idx=len(ranges)\nfor i in range (0,max_idx):\n x,y=ranges[i]\n z=y-x\n name=\"D:/BEPROJECT/tempp/highlights\"+str(i)+\".mp4\"\n command=\"ffmpeg -ss \"+str(x)+\".0 -i match.mp4 -t \"+str(z)+\" \"+name\n subprocess.call(command, shell=True)\nclips=[]\nfor i in range (0,max_idx):\n name=\"D:/BEPROJECT/tempp/highlights\"+str(i)+\".mp4\"\n clips.append(VideoFileClip(name))\nfinal_clip = concatenate_videoclips(clips)\nfinal_clip.write_videofile(\"v2_highlights.mp4\")\n\n","repo_name":"varadkulkarni11/AUTOMATIC-SPORTS-HIGHLIGHTS-GENERATOR","sub_path":"Merge - slice module/merge_slice.py","file_name":"merge_slice.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"72521885396","text":"import nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\n\nimport numpy\nimport tflearn\nimport tensorflow\nimport random\nimport json\nimport pickle\n\nwith open(\"intents.json\") as file:\n info = json.load(file)\n\ntry:\n with open(\"info.pickle\", \"rb\") as f:\n words, labels, x_train, y_train = pickle.load(f)\nexcept:\n #variable lists\n\n words = []\n labels = []\n x_docs = []\n docs_y = []\n # symbols_and_num = ['{', '}', '(', ')','[',']', '.', ',', ':', ';', '+', '-', '*', '/', '&', '|', '<', '>', '=','~'$', '1', '2', '3','4', '5', '6', '7', '8', '9', '0']\n\n #tokenization\n for intent in info[\"intents\"]:\n for pattern in intent[\"patterns\"]:\n wrds = nltk.word_tokenize(pattern)\n\n words.extend(wrds)\n\n x_docs.append(wrds)\n \n docs_y.append(intent[\"tag\"])\n\n if intent[\"tag\"] not in labels:\n labels.append(intent[\"tag\"])\n\n words = [stemmer.stem(w.lower()) for w in words if w != '?']\n\n print(words)\n words = sorted(list(set(words)))\n\n labels = sorted(labels)\n\n x_train = []\n y_train = []\n\n empty = [0 for _ in range(len(labels))]\n\n for x, doc in enumerate(x_docs):\n bag = []\n\n wrds = [stemmer.stem(w.lower()) for w in doc]\n\n for w in words:\n if w in wrds:\n bag.append(1)\n else:\n bag.append(0)\n\n output = empty[:]\n output[labels.index(docs_y[x])] = 1\n\n x_train.append(bag)\n y_train.append(output)\n\n\n x_train = numpy.array(x_train)\n y_train = numpy.array(y_train)\n\n with open(\"info.pickle\", \"wb\") as f:\n pickle.dump((words, labels, x_train, y_train), f)\n\ntensorflow.reset_default_graph()\n\nnet = tflearn.input_data(shape=[None, len(x_train[0])])\nnet = tflearn.fully_connected(net, 16, activation = \"relu\")\nnet = tflearn.fully_connected(net, 16, activation = \"relu\")\nnet = tflearn.fully_connected(net, len(y_train[0]), activation=\"softmax\")\nnet = tflearn.regression(net)\n\nmodel = tflearn.DNN(net)\n\ntry:\n model.load()\nexcept:\n model.fit(x_train, y_train, n_epoch=1000, batch_size=8, show_metric=True)\n model.save(\"model.tflearn\")\n\n\n\ndef bag_of_words(s, words):\n bag = [0 for _ in range(len(words))]\n\n s_words = nltk.word_tokenize(s)\n s_words = [stemmer.stem(word.lower()) for word in s_words]\n\n for se in s_words:\n for i, w in enumerate(words):\n if w == se:\n bag[i] = 1\n \n return numpy.array(bag)\n\n\ndef chat():\n print(\"Conversation\")\n print(\"type 'quit' for end session\")\n while True:\n inp = input(\"You: \")\n if inp.lower() == \"quit\":\n print(\"Thank you... I wish we will catch up later\");\n break\n\n results = model.predict([bag_of_words(inp, words)])\n results_index = numpy.argmax(results)\n tag = labels[results_index]\n\n for tg in info[\"intents\"]:\n if tg['tag'] == tag:\n responses = tg['responses']\n\n print(random.choice(responses))\n\nchat()","repo_name":"rExQone/chatbot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"42946855578","text":"import websocket\nimport json\nimport ctypes\nimport threading\nimport time\n\nclass Request:\n def __init__(self):\n self.user = \"\"\n self.handler = \"\"\n self.payload = \"\"\n\ndef on_message(ws, message):\n print(\"Received message:\", message)\n\ndef on_error(ws, error):\n print(\"Error:\", error)\n\ndef on_close(ws):\n print(\"Connection closed\")\n\ndef on_open(ws):\n request = Request()\n request.user = \"12D3KooWGQ4ncdUVMSaVrWrCU1fyM8ZdcVvuWa7MdwqkUu4SSDo4\"\n request.handler = \"MyHandler\"\n request.payload = \"Hello, world!\"\n\n # Convert the Request object to JSON\n request_json = json.dumps(request.__dict__)\n # Send a test message\n\n while True:\n time.sleep(0.1)\n ws.send(request_json)\n\n\ndef app():\n library = ctypes.cdll.LoadLibrary('./library.so')\n hello_world = library.start\n hello_world()\n\nif __name__ == \"__main__\":\n\n x = threading.Thread(target=app, args=())\n x.start()\n\n time.sleep(3)\n\n websocket.enableTrace(True)\n\n # WebSocket server URL\n ws_url = \"ws://localhost:8080/ws\"\n\n # Create WebSocket connection\n ws = websocket.WebSocketApp(ws_url,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close)\n ws.on_open = on_open\n\n # Start WebSocket connection\n ws.run_forever()\n","repo_name":"Mihalic2040/Hub","sub_path":"examples/ws_server/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19873280971","text":"import sys\n\nimport numpy as np\nimport flowpy\n\n\nclass UserContext:\n coef = 0\n \n def __init__(self, c):\n self.coef = c\n\n def mult(self, val):\n return self.coef * val\n\n\ndef fn_init(obj: flowpy.PythonFieldBase) -> UserContext:\n user_context = UserContext(obj.t)\n return user_context\n\n\ndef fn_reinit(obj: flowpy.PythonFieldBase, user_context: UserContext) -> flowpy.PythonFieldBase:\n obj.set_result(\"result\", 1, 3, 16)\n obj.add_to_dict(\"csection\", 1, 1, 16)\n obj.add_to_dict(\"velocity\", 1, 3, 16)\n obj.print_fields()\n return obj\n\n\ndef fn_eval(obj: flowpy.PythonFieldBase, user_context: UserContext):\n multi = user_context.mult(5)\n velocity_field = obj.f_dict[\"velocity\"]\n obj.result = velocity_field * 2\n\n\ndef test():\n print(\"Calling func example:\")\n field = flowpy.PythonFieldBase()\n field.t = 0.1\n uc = fn_init(field)\n multi = uc.mult(2)\n print(multi)\n fn_reinit(field, uc)\n field.print_fields()\n fn_eval(field, uc)\n field.print_result()\n \ntest()","repo_name":"dflanderka/python_test","sub_path":"test/example_func.py","file_name":"example_func.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"26865142417","text":"from vpython import *\nfrom matplotlib import pyplot as plt\nfrom math import sin, cos\nimport argparse\nimport math\n\n\ndef set_scene(data):\n \"\"\"\n Set Vpython Scene\n \"\"\"\n scene.title = \"Assignment 5: Projectile motion\"\n scene.width = 800\n scene.heigth = 600\n scene.caption = \"\"\"Right button drag or Ctrl-drag to rotate \"camera\" to view scene.\n To zoom, drag with middle button or Alt/Option depressed, or use scroll wheel.\n On a two-button mouse, middle is left + right.\n Touch screen: pinch/extend to zoom, swipe or two-finger rotate.\"\"\"\n scene.forward = vector(0, -.3, -1)\n scene.x = -1\n # Set background: floor, table, etc\n floor = box(pos = vector(0,0,0), length = 3000, width = 20, height = .1, color = color.white)\n\ndef motion_no_drag(data):\n \"\"\"\n Create animation for projectile motion with no dragging force\n \"\"\"\n ball_nd = sphere(pos=vector(-25, data['init_height'], 0),\n radius=1, color=color.cyan, make_trail=True)\n # Follow the movement of the ball\n scene.camera.follow(ball_nd)\n # Set initial velocity & position\n ball_nd.theta = math.radians(data['theta'])\n ball_nd.mag = data['init_velocity']\n ball_nd.vy = ball_nd.mag * math.sin(ball_nd.theta)\n ball_nd.vx = ball_nd.mag * math.cos(ball_nd.theta)\n ball_nd.v = vector(ball_nd.vx,ball_nd.vy,0)\n # Animate\n while ball_nd.pos.y > 0:\n rate(1/data['deltat'])\n g = vector(0,data['gravity']*data['deltat'],0)\n data['nd_pos_x'].append(ball_nd.pos.x)\n data['nd_pos_y'].append(ball_nd.pos.y)\n ball_nd.v += g\n ball_nd.pos += ball_nd.v\n\n\n\n\ndef motion_drag(data):\n ball_d = sphere(pos=vector(-25, data['init_height'], 0),\n radius=1, color=color.red, make_trail=True)\n\n ball_d.theta = math.radians(data['theta'])\n ball_d.mag = data['init_velocity']\n ball_d.vy = ball_d.mag * math.sin(ball_d.theta)\n ball_d.vx = ball_d.mag * math.cos(ball_d.theta)\n ball_d.v = vector(ball_d.vx, ball_d.vy, 0)\n\n while ball_d.pos.y > 0:\n rate(1/data['deltat'])\n g = vector(0,data['gravity']*data['deltat'],0)\n\n d_mag = data['beta']\n\n data['d_pos_x'].append(ball_d.pos.x)\n data['d_pos_y'].append(ball_d.pos.y)\n #adding pi to an angle in radians turns around, by rotating it 180 degrees.\n d_y = d_mag * math.sin(ball_d.theta + math.pi)\n d_x = d_mag * math.cos(ball_d.theta + math.pi)\n d = vector(d_x, d_y, 0)\n\n ball_d.v += g\n ball_d.v += d\n\n ball_d.pos += ball_d.v\n\ndef plot_data(data):\n\n plt.figure()\n plt.plot(data['d_pos_x'],data['d_pos_y'])\n plt.plot(data['nd_pos_x'], data['nd_pos_y'])\n plt.show()\n\n\n\ndef main():\n \"\"\"\n \"\"\"\n # 1) Parse the arguments\n parser = argparse.ArgumentParser(description=\"get the starting conditions\")\n parser.add_argument('velocity', type=float)\n parser.add_argument('angle', type=float)\n parser.add_argument(\"--height\", type=float, default=1.2)\n args = parser.parse_args()\n # Set Variables\n data = {} # empty dictionary for all data and variables\n\n data['nd_pos_x'] = []\n data['d_pos_x'] = []\n data['nd_pos_y'] = []\n data['d_pos_y'] = []\n data['init_velocity'] = args.velocity\n data['theta'] = args.angle\n data['init_height'] = args.height\n\n print(data['init_velocity'])\n print(data['theta'])\n print(data['init_height'])\n # Constants\n data['rho'] = 1.225 # kg/m^3\n data['Cd'] = 0.5 # coefficient friction\n data['deltat'] = 0.005\n data['gravity'] = -9.8 # m/s^2\n\n data['ball_mass'] = 0.145 # kg\n data['ball_radius'] = 0.075 # meters\n data['ball_area'] = pi * data['ball_radius']**2\n data['alpha'] = data['rho'] * data['Cd'] * data['ball_area'] / 2.0\n data['beta'] = data['alpha'] / data['ball_mass']\n # Set Scene\n set_scene(data)\n # 2) No Drag Animation\n motion_no_drag(data)\n # 3) Drag Animation\n motion_drag(data)\n # 4) Plot Information: extra credit\n plot_data(data)\n\n\nif __name__ == \"__main__\":\n main()\n exit(0)\n","repo_name":"yjotyrrm/phys2300_labs","sub_path":"lab5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"84"} +{"seq_id":"21237869709","text":"import sys, argparse\nimport ipaddress\n\ndef ipCheck(ips, ip_type):\n if ip_type == 'private':\n for i in ips:\n if ipaddress.ip_address(i.rstrip()).is_private:\n print(i.rstrip())\n elif ip_type == 'public':\n for i in ips:\n if ipaddress.ip_address(i.rstrip()).is_private == False:\n print(i.rstrip())\n\ndef main():\n parser = argparse.ArgumentParser(description='Print non CF IPs')\n parser.add_argument('-i','--inputfile', type=argparse.FileType('r', encoding='UTF-8'), required=False, help='Input file')\n parser.add_argument('-pr','--privateIp', action='store_true', required=False, help='Input file')\n parser.add_argument('-pub','--publicIp', action='store_true', required=False, help='Input file')\n args = parser.parse_args()\n f = open(args.inputfile.name, \"r\")\n if(args.privateIp == True and args.publicIp == False):\n ipCheck(f, 'private')\n else:\n ipCheck(f, 'public')\n\nif (__name__ == \"__main__\"):\n main()\n","repo_name":"pradeepch99/tools","sub_path":"ipcheck.py","file_name":"ipcheck.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"10353944104","text":"from functools import partial\nfrom typing import *\n\nfrom dataclasses_json import DataClassJsonMixin\nfrom functional import Some\n\nfrom openapi_parser.model import ModelClass, ModelSchema, HavingPath, HavingExtendedDescription, ModelEnumData\nfrom openapi_parser.util.utils import StrIO\nfrom .abstract_writer import Writer, yielder, writer\nfrom .attribute_writer import AttributeWriter\nfrom .description_writer import DescriptionWriter\nfrom .footer_writer import Exporting\n\nclass ClassWriter(Exporting, AttributeWriter, DescriptionWriter, Writer):\n def dump_class_description(self, cls: HavingExtendedDescription, *, path_cls: Optional[HavingPath] = None, cls_all_required_properties: List[str] = None, compact: bool = False) -> Iterator[str]:\n if (cls_all_required_properties is None):\n if (isinstance(cls, ModelClass)):\n cls_all_required_properties = cls.all_required_properties\n else:\n cls_all_required_properties = list()\n \n gen = partial(self.generate_class_description, cls, path_cls=path_cls, cls_all_required_properties=cls_all_required_properties)\n yield from self.smart_description(gen, compact=compact)\n \n def generate_class_description(self, cls: HavingExtendedDescription, *, path_cls: Optional[HavingPath] = None, cls_all_required_properties: Optional[List[str]], compact: bool):\n def extra_gen():\n if (cls_all_required_properties):\n yield \"Required Properties:\"\n yield from (f\" - {self.object_valid_name_filter(self.field_name_pretty(f_name))}\" for f_name in cls_all_required_properties)\n if (not compact):\n yield\n \n yield from self.generate_item_description(item=cls, path_item=path_cls, item_type='class', compact=compact, extra_gen=extra_gen())\n \n def dump_enum(self, schema: ModelSchema, enum_data: ModelEnumData) -> Iterator[str]:\n cls_name = self.object_valid_name_filter(self.class_name_pretty(enum_data))\n \n self.export(cls_name)\n yield f'class {cls_name}(Enum):'\n with self.indent():\n yield from self.dump_class_description(schema, path_cls=enum_data)\n yield from map(lambda v: f'{self.object_valid_name_filter(self.enum_entry_name_pretty(v))} = {v!r}', enum_data.possible_values)\n yield\n \n def dump_property(self, name: str, prop: ModelSchema, *, only_if_has_default: Optional[bool] = None, is_required: bool = True) -> Iterator[str]:\n actual_name, f_type, f_value, f_default = self.parse_attribute(name, prop, is_required=is_required)\n \n if (only_if_has_default is not None and f_default.is_empty == only_if_has_default):\n return\n \n f_constructor = dict()\n f_constructor_meta = dict()\n encoder = self.extract_coder(prop, 'encoder')\n if (encoder is not None):\n f_constructor_meta['encoder'] = encoder\n decoder = self.extract_coder(prop, 'decoder')\n if (decoder is not None):\n f_constructor_meta['decoder'] = decoder\n if (actual_name != self.field_name_pretty(name)):\n f_constructor_meta['field_name'] = '{!r}'.format(name)\n \n if (f_constructor_meta):\n f_constructor['metadata'] = self.constructor('config', **f_constructor_meta)\n if (prop.default.non_empty and isinstance(prop.default.get, (list, dict, set, tuple))):\n if (f_default.get):\n f_constructor['default_factory'] = f'lambda: {prop.default.get!r}'\n else:\n f_constructor['default_factory'] = type(prop.default.get).__name__\n if (f_constructor):\n if (f_default.non_empty):\n if ('default_factory' not in f_constructor):\n f_constructor['default'] = '{!r}'.format(f_default.get)\n f_value = Some(self.constructor('field', **f_constructor))\n else:\n f_value = f_default.map('{!r}'.format)\n \n yield self.join_attribute(actual_name, f_type, f_value)\n yield from self.dump_class_description(prop, compact=True)\n \n def dump_class(self, cls: ModelClass) -> Tuple[List[str], Iterator[str]]:\n cls_name = self.object_valid_name_filter(self.class_name_pretty(cls))\n cls_all_req_properties = cls.all_required_properties\n \n dataclass_json_config = dict()\n dataclass_json_config['letter_case'] = 'LetterCase.CAMEL'\n self.export(cls_name)\n yield self.constructor('@dataclass_json', **dataclass_json_config)\n yield '@dataclass'\n yield f\"class {cls_name}({', '.join(self.object_valid_name_filter(self.class_name_pretty(p)) for p in (cls.parents + [ DataClassJsonMixin ]))}):\"\n \n with self.indent():\n yield from self.dump_class_description(cls, cls_all_required_properties=cls_all_req_properties)\n for f_name, f_data in cls.all_properties_iter:\n yield from self.dump_property(f_name, f_data, only_if_has_default=False, is_required=f_name in cls_all_req_properties)\n for f_name, f_data in cls.all_properties_iter:\n yield from self.dump_property(f_name, f_data, only_if_has_default=True, is_required=f_name in cls_all_req_properties)\n \n yield\n \n # region Writers\n @yielder\n def yield_class(self, cls: ModelClass) -> Iterator[Tuple[int, str]]:\n # noinspection PyTypeChecker\n return self.dump_class(cls)\n \n @overload\n def write_class(self, cls: ModelClass) -> Iterator[str]:\n pass\n # noinspection PyOverloads\n @overload\n def write_class(self, cls: ModelClass, *, file: StrIO) -> None:\n pass\n @writer\n def write_class(self, cls: ModelClass) -> Optional[Iterator[str]]:\n # noinspection PyTypeChecker\n return self.yield_class(cls)\n # endregion\n\n\n__all__ = \\\n[\n 'ClassWriter',\n]\n","repo_name":"sgeiser/urlscraper","sub_path":"venv/Lib/site-packages/openapi_parser/exporter/class_writer.py","file_name":"class_writer.py","file_ext":"py","file_size_in_byte":5940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"74159861355","text":"import math\nimport pygame\nfrom Button import Button\n\n\ndef obliczOdleglosc(A,B):\n return math.sqrt((B[0]-A[0])**2 + (B[1]-A[1])**2)\n\ndef poleTrojkata(A,B,C):\n return 0.5*math.fabs((B[0]-A[0])*(C[1]-A[1])-(B[1]-A[1])*(C[0]-A[0]))\n\ndef punktTrojkat(A,B,C,P):\n poleABC = poleTrojkata(A,B,C)\n poleAPB = poleTrojkata(A,P,B)\n poleAPC = poleTrojkata(A,P,C)\n poleBPC = poleTrojkata(B,P,C)\n\n if math.fabs(poleABC - (poleBPC + poleAPC + poleAPB)) < 0.00001:\n return True\n else:\n return False\n\n\ndef punktOdcinek(A, B, P):\n ab = obliczOdleglosc(A, B)\n ap = obliczOdleglosc(A, P)\n bp = obliczOdleglosc(B, P)\n\n roznica = ab - (ap + bp)\n if math.fabs(roznica) < 0.001:\n return True\n else:\n return False\n\ndef punktProsta(A,B,P):\n wyznacznik = B[0]*P[1] + A[0]*B[1] + A[1]*P[0] - (A[1]*B[0] + A[0]*P[1] + B[1]*P[0])\n\n return wyznacznik\n\ndef dwaOdcinki(A,B,C,D):\n w1 = punktProsta(A,B,C)\n w2 = punktProsta(A,B,D)\n w3 = punktProsta(C,D,A)\n w4 = punktProsta(C,D,B)\n\n if (w1*w2 < 0 and w3*w4 < 0) or punktOdcinek(A,B,C) or punktOdcinek(A,B,D) or punktOdcinek(C,D,A) or punktOdcinek(C,D,B):\n return True\n else:\n return False\n\n\ndef dwaOdcinki(A,B,C,D):\n w1 = punktProsta(A,B,C)\n w2 = punktProsta(A,B,D)\n w3 = punktProsta(C,D,A)\n w4 = punktProsta(C,D,B)\n\n if (w1*w2 < 0 and w3*w4 < 0) or punktOdcinek(A,B,C) or punktOdcinek(A,B,D) or punktOdcinek(C,D,A) or punktOdcinek(C,D,B):\n return True\n else:\n return False\n\n\n\n\npygame.init()\nokno = pygame.display.set_mode([800, 600])\npygame.display.set_caption(\"Algorytmy geometryczne\")\ntimer = pygame.time.Clock()\nFPS = 60\nfontDUZA = pygame.font.SysFont('Comic Sans MS', 34)\nfontDUZA.set_underline(True)\nfontMALA = pygame.font.SysFont('Comic Sans MS', 22)\nfontMALUTKA = pygame.font.SysFont('Comic Sans MS', 14)\n\npunktOdcinekB = Button('aquamarine3','aquamarine','black',\"PUNKT - ODCINEK\",45)\npunktProstaB = Button('aquamarine3','aquamarine','black',\"PUNKT - PROSTA\",45)\npunktTrojkatB = Button('aquamarine3','aquamarine','black',\"PUNKT - TRÓJKĄT\",45)\ndwaOdcinkiB = Button('aquamarine3','aquamarine','black',\"DWA ODCINKI\",55)\npunktFiguraB = Button('aquamarine3','aquamarine','black',\"PUNKT FIGURA\",55)\nkoniecB = Button('aquamarine3','aquamarine','black',\"ZAMKNIJ\",85)\nmenuB = Button('aquamarine3','aquamarine','black',\"POWRÓT\",45)\nTRYB = 0\nrun = True\nWIERZCHOŁKI = []\nP = []\nK = []\npunktyWspolne = 0\nWIERZCHOŁKI.append([300,300])\nrysowanie = 1\nwhile run:\n timer.tick(FPS)\n okno.fill('azure3')\n klawisze = pygame.key.get_pressed()\n myszPozycja = pygame.mouse.get_pos()\n myszKlik = pygame.mouse.get_pressed()\n\n\n for zdarzenie in pygame.event.get():\n if zdarzenie.type == pygame.QUIT:\n run = False\n\n if klawisze[pygame.K_ESCAPE]: run = False\n if TRYB == 0:\n okno.blit(fontDUZA.render(\"Wizualizacja algorytmów geometrycznych\", True, 'black'),(60,20))\n okno.blit(fontMALUTKA.render(\"Informatyka rozszerzona Zespół Szkół Energetycznych w Rzeszowie Paweł Łapiński\", True, 'black'),(10,580))\n\n punktOdcinekB.render(okno,250,100,300,50)\n punktProstaB.render(okno,250,170,300,50)\n punktTrojkatB.render(okno,250,240,300,50)\n dwaOdcinkiB.render(okno,250,310,300,50)\n punktFiguraB.render(okno,250,380,300,50)\n koniecB.render(okno,250,510,300,50)\n\n if punktOdcinekB.clik():\n TRYB = 1\n if punktProstaB.clik():\n TRYB = 2\n if punktTrojkatB.clik():\n TRYB = 3\n if dwaOdcinkiB.clik():\n TRYB = 4\n if punktFiguraB.clik():\n czasRysowania = pygame.time.get_ticks()\n WIERZCHOŁKI = []\n P = []\n K = []\n punktyWspolne = 0\n WIERZCHOŁKI.append([300, 300])\n rysowanie = 1\n TRYB = 5\n if koniecB.clik():\n run = False\n\n if TRYB in (1,2,3,4,5):\n okno.fill('white')\n menuB.render(okno,585,540,200,50)\n if menuB.clik():\n TRYB = 0\n\n if TRYB == 1:\n A = [300,500]\n B = [600,250]\n P = [myszPozycja[0],myszPozycja[1]]\n okno.blit(fontDUZA.render(\"Algorytm punkt - odcinek\", True, 'black'),(10,10))\n pygame.draw.line(okno,'black',A,B,3)\n pygame.draw.line(okno,'red',(myszPozycja),B,1)\n pygame.draw.line(okno,'red',A,(myszPozycja),1)\n pygame.draw.circle(okno,'black',A,3)\n pygame.draw.circle(okno,'black',B,3)\n pygame.draw.circle(okno,'red',(myszPozycja),3)\n okno.blit(fontMALA.render(\"A\", True, 'black'),A)\n okno.blit(fontMALA.render(\"B\", True, 'black'),B)\n okno.blit(fontMALA.render(\"P\", True, 'red'),(myszPozycja[0]-20,myszPozycja[1]-20))\n okno.blit(fontMALA.render(\"Długość AB: \" + str(round(obliczOdleglosc(A,B),1)), True, 'black'), (20, 70))\n if punktOdcinek(A,B,P):\n okno.blit(fontMALA.render(\"Punkt P znajduje się NA odcinku AB\", True, 'blue'), (350, 70))\n else:\n okno.blit(fontMALA.render(\"Punkt P znajduje się POZA odcinkiem AB\", True, 'blue'), (350, 70))\n okno.blit(fontMALA.render(\"Długość AP: \" + str(round(obliczOdleglosc(A,P),1)), True, 'black'), (20, 100))\n okno.blit(fontMALA.render(\"Długość BP: \" + str(round(obliczOdleglosc(B,P),1)), True, 'black'), (20, 130))\n okno.blit(fontMALA.render(\"Suma AP + BP: \" + str(round(obliczOdleglosc(B,P)+ obliczOdleglosc(A,P),1)), True, 'red'), (20, 160))\n okno.blit(fontMALA.render(\"Różnica AB - (AP + BP): \" + str(math.fabs(round(obliczOdleglosc(A,B) - (obliczOdleglosc(B,P)+ obliczOdleglosc(A,P)),4))), True, 'black'), (20, 190))\n\n\n if TRYB == 2:\n okno.blit(fontDUZA.render(\"Algorytm punkt - prosta\", True, 'black'),(10,10))\n A = [200, 500]\n B = [600, 100]\n P = [myszPozycja[0], myszPozycja[1]]\n pygame.draw.line(okno, 'black', (100,600), (700,0), 3)\n pygame.draw.circle(okno, 'black', A, 3)\n pygame.draw.circle(okno, 'black', B, 3)\n pygame.draw.circle(okno, 'red', (myszPozycja), 3)\n okno.blit(fontMALA.render(\"A\", True, 'black'), A)\n okno.blit(fontMALA.render(\"B\", True, 'black'), B)\n okno.blit(fontMALA.render(\"P\", True, 'red'), (myszPozycja[0] - 20, myszPozycja[1] - 20))\n okno.blit(fontMALA.render(\"Punkt P (\" + str(P[0])+\",\"+ str(P[1]) + \")\", True, 'red'), (20, 70))\n okno.blit(fontMALA.render(\"Wyznacznik: \" + str(punktProsta(A,B,P)), True, 'black'), (20, 100))\n okno.blit(fontMALA.render(\"Punkt P leży:\", True, 'black'), (20, 130))\n if punktProsta(A,B,P) < 0:\n okno.blit(fontMALA.render(\"Po LEWEJ stronia prostej AB\", True, 'blue'), (30, 160))\n elif punktProsta(A, B, P) > 0:\n okno.blit(fontMALA.render(\"Po PRAWEJ stronia prostej AB\", True, 'blue'), (30, 160))\n if punktProsta(A, B, P) == 0:\n okno.blit(fontMALA.render(\"IDEALNIE na prostej AB\", True, 'blue'), (30, 160))\n if TRYB == 3:\n okno.blit(fontDUZA.render(\"Algorytm punkt - trójkąt\", True, 'black'),(10,10))\n A = [200, 500]\n B = [600, 500]\n C = [400, 250]\n P = [myszPozycja[0], myszPozycja[1]]\n pygame.draw.line(okno, 'black', A, B, 3)\n pygame.draw.line(okno, 'black', A, C, 3)\n pygame.draw.line(okno, 'black', C, B, 3)\n pygame.draw.circle(okno, 'black', A, 3)\n pygame.draw.circle(okno, 'black', B, 3)\n pygame.draw.circle(okno, 'red', (myszPozycja), 3)\n okno.blit(fontMALA.render(\"A\", True, 'black'), (180,500))\n okno.blit(fontMALA.render(\"C\", True, 'black'), (390,220))\n okno.blit(fontMALA.render(\"B\", True, 'black'), B)\n okno.blit(fontMALA.render(\"P\", True, 'red'), (myszPozycja[0] - 20, myszPozycja[1] - 20))\n pygame.draw.line(okno, 'red', A, P, 1)\n pygame.draw.line(okno, 'red', B, P, 1)\n pygame.draw.line(okno, 'red', C, P, 1)\n\n okno.blit(fontMALA.render(\"Pole Δ ABC = \" + str(round(poleTrojkata(A,B,C),0)), True, 'black'), (20, 70))\n okno.blit(fontMALA.render(\"Suma ΔABC+ΔABP+ΔBPC = \" + str(round(poleTrojkata(A,B,P)+poleTrojkata(A,P,C)+poleTrojkata(B,P,C),0)), True, 'black'), (300, 70))\n okno.blit(fontMALA.render(\"Pole Δ ABP = \" + str(round(poleTrojkata(A,B,P),0)), True, 'red'), (20, 100))\n okno.blit(fontMALA.render(\"Pole Δ APC = \" + str(round(poleTrojkata(A,P,C),0)), True, 'red'), (20, 130))\n okno.blit(fontMALA.render(\"Pole Δ BPC = \" + str(round(poleTrojkata(B,P,C),0)), True, 'red'), (20, 160))\n\n if punktTrojkat(A,B,C,P):\n okno.blit(fontMALA.render(\"Punkt P leży WEWNĄTRZ Δ ABC \", True, 'blue'), (20, 190))\n else:\n okno.blit(fontMALA.render(\"Punkt P leży POZA Δ ABC \", True, 'blue'), (20, 190))\n\n\n if TRYB == 4:\n A = [300, 500]\n B = [500, 300]\n C = [300, 300]\n D = [myszPozycja[0], myszPozycja[1]]\n pygame.draw.line(okno, 'black', A, B, 3)\n pygame.draw.line(okno, 'black', C, D, 3)\n pygame.draw.circle(okno, 'black', A, 3)\n pygame.draw.circle(okno, 'black', B, 3)\n pygame.draw.circle(okno, 'black', C, 3)\n pygame.draw.circle(okno, 'black', (myszPozycja), 3)\n\n okno.blit(fontMALA.render(\"A\", True, 'black'), A)\n okno.blit(fontMALA.render(\"C\", True, 'black'), (C[0]-20,C[1]))\n okno.blit(fontMALA.render(\"B\", True, 'black'), B)\n okno.blit(fontMALA.render(\"D\", True, 'red'), (myszPozycja[0] - 20, myszPozycja[1]))\n okno.blit(fontMALA.render(\"Punkt A (\" + str(A[0]) + \",\" + str(A[1]) + \")\", True, 'black'), (20, 70))\n okno.blit(fontMALA.render(\"WYZNACZNIK AB + C = \" + str(punktProsta(A,B,C)) , True, 'cyan4'), (400, 70))\n okno.blit(fontMALA.render(\"WYZNACZNIK AB + D = \" + str(punktProsta(A,B,D)) , True, 'cyan4'), (400, 100))\n okno.blit(fontMALA.render(\"WYZNACZNIK CD + A = \" + str(punktProsta(C,D,A)) , True, 'chartreuse4'), (400, 130))\n okno.blit(fontMALA.render(\"WYZNACZNIK CD + B = \" + str(punktProsta(C,D,B)) , True, 'chartreuse4'), (400, 160))\n okno.blit(fontMALA.render(\"Punkt B (\" + str(B[0]) + \",\" + str(B[1]) + \")\", True, 'black'), (20, 100))\n okno.blit(fontMALA.render(\"Punkt C (\" + str(C[0]) + \",\" + str(C[1]) + \")\", True, 'black'), (20, 130))\n okno.blit(fontMALA.render(\"Punkt D (\" + str(D[0]) + \",\" + str(D[1]) + \")\", True, 'red'), (20, 160))\n if dwaOdcinki(A,B,C,D):\n okno.blit(fontMALA.render(\"Odcinki MAJĄ punkt wspólny\", True, 'blue'), (20, 190))\n else:\n okno.blit(fontMALA.render(\"Odcinki NIE MAJĄ punktu wspólnego \", True, 'blue'), (20, 190))\n\n okno.blit(fontDUZA.render(\"Algorytm dwa odcinki\", True, 'black'),(10,10))\n\n if TRYB == 5:\n okno.blit(fontDUZA.render(\"Algorytm punkt - figura\", True, 'black'), (10, 10))\n if rysowanie < 3:\n okno.blit(fontMALA.render(\"Punkt W1:(300,300)\", True, 'black'), (20, 70))\n okno.blit(fontMALA.render(\"MYSZ:(\"+str(myszPozycja[0])+\",\"+str(myszPozycja[1])+\")\", True, 'black'), (20, 100))\n for i in range(len(WIERZCHOŁKI)-1):\n pygame.draw.line(okno, 'black', WIERZCHOŁKI[i], WIERZCHOŁKI[i+1], 3)\n okno.blit(fontMALA.render(\"W\"+str(i+1), True, 'black'), WIERZCHOŁKI[i])\n pygame.draw.circle(okno, 'black', (WIERZCHOŁKI[i][0], WIERZCHOŁKI[i][1]), 4)\n if rysowanie == 1:\n okno.blit(fontMALA.render(\"Narysuj figurę klikając LPM lub PPM(koniec)\", True, 'black'), (300, 70))\n\n pygame.draw.line(okno,'black',WIERZCHOŁKI[-1],myszPozycja,3)\n if myszKlik[0] and pygame.time.get_ticks() - czasRysowania > 300:\n czasRysowania = pygame.time.get_ticks()\n WIERZCHOŁKI.append(myszPozycja)\n if myszKlik[2] and pygame.time.get_ticks() - czasRysowania > 300:\n WIERZCHOŁKI.append(WIERZCHOŁKI[0])\n if len(WIERZCHOŁKI) > 1 and WIERZCHOŁKI[0][0] == WIERZCHOŁKI[-1][0] and WIERZCHOŁKI[0][1] == WIERZCHOŁKI[-1][1]:\n rysowanie = 2\n if rysowanie == 2:\n okno.blit(fontMALA.render(\"Umieść punkt P klikając LPM\", True, 'black'), (300, 70))\n if myszKlik[0] and pygame.time.get_ticks() - czasRysowania > 300:\n czasRysowania = pygame.time.get_ticks()\n P.append(myszPozycja[0])\n P.append(myszPozycja[1])\n rysowanie = 3\n if rysowanie == 3:\n maxX = WIERZCHOŁKI[0][0]\n for punkt in WIERZCHOŁKI:\n if punkt[0] > maxX:\n maxX = punkt[0]\n\n K.append(maxX + 50)\n K.append(P[1])\n\n for i in range(len(WIERZCHOŁKI) - 1):\n if dwaOdcinki(P, K, WIERZCHOŁKI[i], WIERZCHOŁKI[i + 1]):\n punktyWspolne += 1\n\n if dwaOdcinki(P, K, WIERZCHOŁKI[0], WIERZCHOŁKI[-1]):\n punktyWspolne += 1\n\n rysowanie = 4\n if rysowanie == 4:\n pygame.draw.circle(okno, 'red', (P[0], P[1]), 4)\n okno.blit(fontMALA.render(\"P\", True, 'red'), (P[0], P[1]))\n pygame.draw.circle(okno, 'red', (K[0], K[1]), 4)\n okno.blit(fontMALA.render(\"K\", True, 'red'), (K[0], K[1]))\n okno.blit(fontMALA.render(\"Punkt P:(\" + str(P[0]) + \",\" + str(P[1]) + \")\", True, 'red'),(20, 70))\n okno.blit(fontMALA.render(\"Punkt K:(\" + str(K[0]) + \",\" + str(K[1]) + \")\", True, 'red'),(20, 100))\n okno.blit(fontMALA.render(\"Ilość punktów przecięcia: \" + str(punktyWspolne) , True, 'black'),(20, 130))\n pygame.draw.line(okno, 'red', P, K, 3)\n\n if punktyWspolne % 2 == 1:\n okno.blit(fontMALA.render(\"Punkt P leży WEWNĄTRZ figury\", True, 'blue'), (20, 160))\n else:\n okno.blit(fontMALA.render(\"Punkt P leży POZA figurą \", True, 'blue'), (20, 160))\n\n pygame.display.update()\n\npygame.quit()","repo_name":"xidroo/Geometryczne","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14051,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4071069444","text":"import os\nimport shutil\n\ndef copy_files(src, dest):\n try:\n if not os.path.exists(dest):\n os.makedirs(dest)\n for f in os.listdir(src):\n if os.path.isfile(os.path.join(src, f)):\n shutil.copyfile(os.path.join(src, f), os.path.join(dest,f))\n except shutil.Error as e:\n print('Directory not copied. Error: %s' % e)\n except OSError as e:\n print('Directory not copied. Error: %s' % e)\n\nshutil.rmtree('temp_submission', ignore_errors=True)\nos.mkdir('temp_submission')\nfor dir_name in ['code']:\n copy_files(dir_name, '/'.join(['temp_submission', dir_name]))\nshutil.make_archive('submission', 'zip', 'temp_submission')\nshutil.rmtree('temp_submission', ignore_errors=True)","repo_name":"silvery107/gatech-computer-vision","sub_path":"Project-5-Face_Detection_with_Sliding_Window(HOG)/zip_submission.py","file_name":"zip_submission.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"3852921287","text":"import sys\n\nsys.path.append(\"../\")\n\nimport json\nimport logging\nimport os\nfrom typing import Callable, Union\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nimport requests\nfrom icon_zoom.actions.create_user.schema import Input\nfrom icon_zoom.connection.connection import Connection\nfrom insightconnect_plugin_runtime.action import Action\nfrom insightconnect_plugin_runtime.task import Task\n\nSTUB_CONNECTION = {\n \"client_id\": \"asdf\",\n \"client_secret\": {\"secretKey\": \"asdf\"},\n \"account_id\": \"asdf\",\n}\nSTUB_BASE_URL = \"https://api.zoom.us/v2\"\nSTUB_OAUTH_URL = \"https://zoom.us/oauth/token\"\nSTUB_USER_ID = \"user@example.com\"\nSTUB_OAUTH_TOKEN = \"MTQ0NjJkZmQ5OTM2NDE1ZTZjNGZmZjI3\"\nSTUB_CREATE_USER = {\n Input.ACTION: \"create\",\n Input.TYPE: \"Basic\",\n Input.EMAIL: \"user@example.com\",\n Input.FIRST_NAME: \"FirstName\",\n Input.LAST_NAME: \"LastName\",\n}\n\nREFRESH_OAUTH_TOKEN_PATH = \"icon_zoom.util.api.ZoomAPI._refresh_oauth_token\"\n\n\nclass Util:\n @staticmethod\n @mock.patch(REFRESH_OAUTH_TOKEN_PATH, return_value=None)\n def default_connector(action: Action, mock_refresh_call: MagicMock) -> Union[Action, Task]:\n default_connection = Connection()\n default_connection.logger = logging.getLogger(\"connection logger\")\n default_connection.connect(STUB_CONNECTION)\n default_connection.zoom_api.oauth_token = STUB_OAUTH_TOKEN\n action.connection = default_connection\n action.logger = logging.getLogger(\"action logger\")\n return action\n\n\nclass MockResponse:\n def __init__(self, filename: str, status_code: int, text: str = \"\", headers: dict = {}) -> None:\n self.filename = filename\n self.status_code = status_code\n self.text = text\n self.headers = headers\n\n def json(self):\n with open(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), f\"responses/{self.filename}.json.resp\")\n ) as file_:\n return json.load(file_)\n\n\ndef mocked_request(side_effect: Callable) -> None:\n mock_function = requests\n mock_function.request = mock.Mock(side_effect=side_effect)\n\n\ndef mock_conditions(method: str, url: str, status_code: int) -> MockResponse:\n if url == STUB_OAUTH_URL:\n return MockResponse(\"oauth2_token\", status_code)\n if method == \"GET\":\n return MockResponse(\"get_user\", status_code)\n if method == \"POST\":\n return MockResponse(\"create_user\", status_code)\n if method == \"DELETE\":\n return MockResponse(\"delete_user\", status_code)\n raise Exception(\"Unrecognized endpoint\")\n\n\ndef mock_request_201(*args, **kwargs) -> MockResponse:\n method = kwargs.get(\"method\") if not args else args[0]\n url = kwargs.get(\"url\") if not args else args[1]\n return mock_conditions(method, url, 201)\n\n\ndef mock_request_204(*args, **kwargs) -> MockResponse:\n method = kwargs.get(\"method\") if not args else args[0]\n url = kwargs.get(\"url\") if not args else args[1]\n return mock_conditions(method, url, 204)\n\n\ndef mock_request_400(*args, **kwargs) -> MockResponse:\n return mock_conditions(args[0], args[1], 400)\n\n\ndef mock_request_404(*args, **kwargs) -> MockResponse:\n return mock_conditions(args[0], args[1], 404)\n\n\ndef mock_request_409(*args, **kwargs) -> MockResponse:\n return mock_conditions(args[0], args[1], 409)\n\n\ndef mock_request_429(*args, **kwargs) -> MockResponse:\n return mock_conditions(args[0], args[1], 429)\n","repo_name":"rapid7/insightconnect-plugins","sub_path":"plugins/zoom/unit_test/mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"73"} +{"seq_id":"70240045356","text":"import random\nkroky = int(input(\"Kroky:\"))\nvzdalenost = int(input(\"Vzdálenost:\"))\npozice = int(vzdalenost/2)\ndum = 0\ntrasa = []\n\nfor z in range(vzdalenost):\n trasa.append(\".\")\ntrasa[pozice] = \"•\"\nprint(\"dum\",*trasa,\"hospoda\")\n\nfor i in range(kroky):\n trasa = []\n x = random.choice([0 , 1])\n for y in range(vzdalenost):\n trasa.append(\".\")\n if x == 1:\n pozice = pozice + 1\n else:\n pozice = pozice - 1\n if pozice == vzdalenost:\n print(\"dum\",*trasa,\"•hospoda\")\n print(\"Matyáš je opět v hospodě\")\n break\n elif pozice == dum:\n print(\"dum•\",*trasa,\"hospoda\")\n print(\"Matyáš je doma\")\n break\n else:\n trasa[pozice] = \"•\"\n print(\"dum\",*trasa,\"hospoda\")\n\nif i == (kroky - 1):\n print(\"Matyáš umřel po cestě\")\n ","repo_name":"kohuto/gpisnicka","sub_path":"programovani_algoritmizace/opilec/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33299840185","text":"import unittest\n\nimport copy\nimport os\nimport re\nimport types\nimport urllib\nimport zipfile\n\n__ALL__ = ['subConverters']\n\nfrom music21.converter import subConverters\n\nfrom music21 import exceptions21\nfrom music21 import common\nfrom music21 import stream\nfrom music21.ext import six\nfrom music21 import musedata as musedataModule\n\nfrom music21 import _version\nfrom music21 import environment\n_MOD = 'converter/__init__.py'\nenvironLocal = environment.Environment(_MOD)\n\n\n#-------------------------------------------------------------------------------\nclass ArchiveManagerException(exceptions21.Music21Exception):\n pass\n\nclass PickleFilterException(exceptions21.Music21Exception):\n pass\n\nclass ConverterException(exceptions21.Music21Exception):\n pass\n\nclass ConverterFileException(exceptions21.Music21Exception):\n pass\n\n\n#-------------------------------------------------------------------------------\nclass ArchiveManager(object):\n r'''Before opening a file path, this class can check if this is an \n archived file collection, such as a .zip or or .mxl file. This will return the \n data from the archive.\n \n >>> fnCorpus = corpus.getWork('bwv66.6', fileExtensions=('.xml',))\n \n This is likely a unicode string\n \n >>> #_DOCS_SHOW fnCorpus\n >>> '/Users/cuthbert/git/music21base/music21/corpus/bach/bwv66.6.mxl' #_DOCS_HIDE\n '/Users/cuthbert/git/music21base/music21/corpus/bach/bwv66.6.mxl'\n >>> am = converter.ArchiveManager(fnCorpus)\n >>> am.isArchive()\n True\n >>> am.getNames()\n ['bwv66.6.xml', 'META-INF/container.xml']\n >>> data = am.getData()\n >>> data[0:70]\n '\\r>> fp = '/Users/Cuthbert/Desktop/musicFile.mxl'\n >>> pickfilt = converter.PickleFilter(fp)\n >>> #_DOCS_SHOW pickfilt.status()\n ('/Users/Cuthbert/Desktop/musicFile.mxl', True, \n '/var/folders/music21/m21-18b8c5a5f07826bd67ea0f20462f0b8d.pgz')\n\n '''\n fpScratch = environLocal.getRootTempDir()\n m21Format = common.findFormatFile(self.fp)\n\n if m21Format == 'pickle': # do not pickle a pickle\n if self.forceSource:\n raise PickleFilterException(\n 'cannot access source file when only given a file path to a pickled file.')\n writePickle = False # cannot write pickle if no scratch dir\n fpLoad = self.fp\n fpPickle = None\n elif fpScratch is None or self.forceSource:\n writePickle = False # cannot write pickle if no scratch dir\n fpLoad = self.fp\n fpPickle = None\n else: # see which is more up to date\n fpPickle = self._getPickleFp(fpScratch, zipType='gz')\n if not os.path.exists(fpPickle):\n writePickle = True # if pickled file does not exist\n fpLoad = self.fp\n else:\n post = common.sortFilesRecent([self.fp, fpPickle])\n if post[0] == fpPickle: # pickle is most recent\n writePickle = False\n fpLoad = fpPickle\n elif post[0] == self.fp: # file is most recent\n writePickle = True\n fpLoad = self.fp\n return fpLoad, writePickle, fpPickle\n\n\n#-------------------------------------------------------------------------------\n_registeredSubconverters = []\n_deregisteredSubconverters = [] # default subconverters to skip\n\ndef resetSubconverters():\n '''\n Reset state to default (removing all registered and deregistered subconverters.\n '''\n global _registeredSubconverters # pylint: disable=global-statement\n global _deregisteredSubconverters # pylint: disable=global-statement\n _registeredSubconverters = []\n _deregisteredSubconverters = []\n\ndef registerSubconverter(newSubConverter):\n '''\n Add a Subconverter to the list of registered subconverters.\n \n Example, register a converter for the obsolete Amiga composition software Sonix (so fun...)\n \n >>> class ConverterSonix(converter.subConverters.SubConverter):\n ... registerFormats = ('sonix',)\n ... registerInputExtensions = ('mus',)\n >>> converter.registerSubconverter(ConverterSonix)\n >>> scf = converter.Converter().getSubConverterFormats()\n >>> for x in sorted(scf):\n ... x, scf[x] \n ('abc', )\n ...\n ('sonix', ) \n ...\n\n See `converter.qmConverter` for an example of an extended subconverter.\n\n >>> converter.resetSubconverters() #_DOCS_HIDE\n\n '''\n _registeredSubconverters.append(newSubConverter)\n\ndef unregisterSubconverter(removeSubconverter):\n '''\n Remove a Subconverter from the list of registered subconverters.\n \n >>> converter.resetSubconverters() #_DOCS_HIDE \n >>> mxlConverter = converter.subConverters.ConverterMusicXML\n\n >>> c = converter.Converter()\n >>> mxlConverter in c.subconvertersList()\n True\n >>> converter.unregisterSubconverter(mxlConverter)\n >>> mxlConverter in c.subconvertersList()\n False\n \n if there is no such subConverter registered and it is not a default subconverter, \n then a converter.ConverterException is raised:\n \n >>> class ConverterSonix(converter.subConverters.SubConverter):\n ... registerFormats = ('sonix',)\n ... registerInputExtensions = ('mus',)\n >>> converter.unregisterSubconverter(ConverterSonix)\n Traceback (most recent call last):\n music21.converter.ConverterException: Could not remove from \n registered subconverters\n \n The special command \"all\" removes everything including the default converters:\n\n >>> converter.unregisterSubconverter('all')\n >>> c.subconvertersList()\n []\n\n >>> converter.resetSubconverters() #_DOCS_HIDE\n\n '''\n global _registeredSubconverters # pylint: disable=global-statement\n global _deregisteredSubconverters # pylint: disable=global-statement\n if removeSubconverter == 'all':\n _registeredSubconverters = []\n _deregisteredSubconverters = ['all']\n return\n \n try:\n _registeredSubconverters.remove(removeSubconverter)\n except ValueError:\n c = Converter()\n dsc = c.defaultSubconverters()\n if removeSubconverter in dsc:\n _deregisteredSubconverters.append(removeSubconverter)\n else: \n raise ConverterException(\n \"Could not remove %r from registered subconverters\" % removeSubconverter)\n\n\n\n\n#-------------------------------------------------------------------------------\n\n\nclass Converter(object):\n '''\n A class used for converting all supported data formats into music21 objects.\n\n Not a subclass, but a wrapper for different converter objects based on format.\n '''\n _DOC_ATTR = {'subConverter': 'a ConverterXXX object that will do the actual converting.',}\n \n def __init__(self):\n self.subConverter = None\n self._thawedStream = None # a stream object unthawed\n\n\n def _getDownloadFp(self, directory, ext, url):\n if directory is None:\n raise ValueError\n return os.path.join(directory, 'm21-' + _version.__version__ + '-' + \n common.getMd5(url) + ext)\n\n # pylint: disable=redefined-builtin\n def parseFileNoPickle(self, fp, number=None, \n format=None, forceSource=False, **keywords): # @ReservedAssignment\n '''\n Given a file path, parse and store a music21 Stream.\n\n If format is None then look up the format from the file\n extension using `common.findFormatFile`.\n \n Does not use or store pickles in any circumstance.\n '''\n fp = common.cleanpath(fp)\n #environLocal.printDebug(['attempting to parseFile', fp])\n if not os.path.exists(fp):\n raise ConverterFileException('no such file exists: %s' % fp)\n useFormat = format\n\n if useFormat is None:\n useFormat = self.getFormatFromFileExtension(fp)\n\n self.setSubconverterFromFormat(useFormat)\n self.subConverter.keywords = keywords\n self.subConverter.parseFile(fp, number=number, **keywords)\n self.stream.filePath = fp\n self.stream.fileNumber = number\n self.stream.fileFormat = useFormat\n \n def getFormatFromFileExtension(self, fp):\n '''\n gets the format from a file extension.\n \n >>> import os\n >>> fp = os.path.join(common.getSourceFilePath(), 'musedata', 'testZip.zip')\n >>> c = converter.Converter()\n >>> c.getFormatFromFileExtension(fp)\n 'musedata'\n '''\n fp = common.cleanpath(fp)\n # if the file path is to a directory, assume it is a collection of\n # musedata parts\n useFormat = None\n if os.path.isdir(fp):\n useFormat = 'musedata'\n else:\n useFormat = common.findFormatFile(fp)\n if useFormat is None:\n raise ConverterFileException('cannot find a format extensions for: %s' % fp)\n return useFormat\n \n def parseFile(self, fp, number=None, \n format=None, forceSource=False, storePickle=True, **keywords): # @ReservedAssignment\n '''\n Given a file path, parse and store a music21 Stream.\n\n If format is None then look up the format from the file\n extension using `common.findFormatFile`.\n \n Will load from a pickle unless forceSource is True\n Will store as a pickle unless storePickle is False\n '''\n from music21 import freezeThaw\n fp = common.cleanpath(fp)\n\n if not os.path.exists(fp):\n raise ConverterFileException('no such file exists: %s' % fp)\n useFormat = format\n\n if useFormat is None:\n useFormat = self.getFormatFromFileExtension(fp)\n pfObj = PickleFilter(fp, forceSource, number)\n unused_fpDst, writePickle, fpPickle = pfObj.status()\n if writePickle is False and fpPickle is not None and forceSource is False:\n environLocal.printDebug(\"Loading Pickled version\")\n try:\n self._thawedStream = thaw(fpPickle, zipType='zlib')\n except freezeThaw.FreezeThawException:\n environLocal.warn(\"Could not parse pickle, %s ...rewriting\" % fpPickle)\n os.remove(fpPickle)\n self.parseFileNoPickle(fp, number, format, forceSource, **keywords)\n\n self.stream.filePath = fp\n self.stream.fileNumber = number\n self.stream.fileFormat = useFormat\n else:\n environLocal.printDebug(\"Loading original version\")\n self.parseFileNoPickle(fp, number, format, forceSource, **keywords)\n if writePickle is True and fpPickle is not None and storePickle is True:\n # save the stream to disk...\n environLocal.printDebug(\"Freezing Pickle\")\n s = self.stream\n sf = freezeThaw.StreamFreezer(s, fastButUnsafe=True)\n sf.write(fp=fpPickle, zipType='zlib')\n \n environLocal.printDebug(\"Replacing self.stream\")\n # get a new stream\n self._thawedStream = thaw(fpPickle, zipType='zlib')\n self.stream.filePath = fp\n self.stream.fileNumber = number\n self.stream.fileFormat = useFormat\n\n \n\n def parseData(self, dataStr, number=None, \n format=None, forceSource=False, **keywords): # @ReservedAssignment\n '''\n Given raw data, determine format and parse into a music21 Stream.\n '''\n useFormat = format\n # get from data in string if not specified\n if useFormat is None: # its a string\n dataStr = dataStr.lstrip()\n useFormat, dataStr = self.formatFromHeader(dataStr)\n\n if six.PY3 and isinstance(dataStr, bytes):\n dataStrMakeStr = dataStr.decode('utf-8', 'ignore')\n else:\n dataStrMakeStr = dataStr\n\n if useFormat is not None:\n pass\n elif dataStrMakeStr.startswith('>> jeanieLightBrownURL = ('https://github.com/cuthbertLab/music21/raw/master' + \n ... '/music21/corpus/leadSheet/fosterBrownHair.mxl')\n >>> c = converter.Converter()\n >>> #_DOCS_SHOW c.parseURL(jeanieLightBrownURL)\n >>> #_DOCS_SHOW jeanieStream = c.stream\n '''\n autoDownload = environLocal['autoDownload']\n if autoDownload in ('deny', 'ask'):\n message = 'Automatic downloading of URLs is presently set to {!r};'\n message += ' configure your Environment \"autoDownload\" setting to '\n message += '\"allow\" to permit automatic downloading: '\n message += \"environment.set('autoDownload', 'allow')\"\n message = message.format(autoDownload)\n raise ConverterException(message)\n\n # this format check is here first to see if we can find the format\n # in the url; if forcing a format we do not need this\n # we do need the file extension to construct file path below\n if format is None:\n formatFromURL, ext = common.findFormatExtURL(url)\n if formatFromURL is None: # cannot figure out what it is\n raise ConverterException('cannot determine file format of url: %s' % url)\n else:\n unused_formatType, ext = common.findFormat(format)\n if ext is None:\n ext = '.txt'\n\n directory = environLocal.getRootTempDir()\n dst = self._getDownloadFp(directory, ext, url)\n if (hasattr(urllib, 'urlretrieve')): \n # python 2\n urlretrieve = urllib.urlretrieve\n else: #python3\n urlretrieve = urllib.request.urlretrieve # @UndefinedVariable\n \n if not os.path.exists(dst):\n try:\n environLocal.printDebug(['downloading to:', dst])\n fp, unused_headers = urlretrieve(url, filename=dst)\n except IOError:\n raise ConverterException('cannot access file: %s' % url)\n else:\n environLocal.printDebug(['using already downloaded file:', dst])\n fp = dst\n\n # update format based on downloaded fp\n if format is None: # if not provided as an argument\n useFormat = common.findFormatFile(fp)\n else:\n useFormat = format\n self.setSubconverterFromFormat(useFormat)\n self.subConverter.keywords = keywords\n self.subConverter.parseFile(fp, number=number)\n self.stream.filePath = fp\n self.stream.fileNumber = number\n self.stream.fileFormat = useFormat\n\n #------------------------------------------------------------------------#\n # Subconverters\n def subconvertersList(self, converterType='any'):\n '''\n Gives a list of all the subconverters that are registered.\n \n If converterType is 'any' (true), then input or output\n subconverters are listed.\n \n Otherwise, 'input', or 'output' can be used to filter.\n \n >>> converter.resetSubconverters() #_DOCS_HIDE\n >>> c = converter.Converter()\n >>> scl = c.subconvertersList()\n >>> defaultScl = c.defaultSubconverters()\n >>> tuple(scl) == tuple(defaultScl)\n True\n \n >>> sclInput = c.subconvertersList('input')\n >>> sclInput\n [, \n , \n , \n , \n , \n , \n , \n , \n , \n , \n , \n , \n ]\n\n >>> sclOutput = c.subconvertersList('output')\n >>> sclOutput\n [, \n , \n , \n , \n , \n , \n , \n ]\n\n \n \n >>> class ConverterSonix(converter.subConverters.SubConverter):\n ... registerFormats = ('sonix',)\n ... registerInputExtensions = ('mus',)\n >>> converter.registerSubconverter(ConverterSonix)\n >>> ConverterSonix in c.subconvertersList()\n True\n\n >>> converter.resetSubconverters() #_DOCS_HIDE\n '''\n subConverterList = []\n for reg in _registeredSubconverters:\n #print reg\n subConverterList.append(reg)\n\n if _deregisteredSubconverters and _deregisteredSubconverters[0] == 'all':\n pass\n else:\n subConverterList.extend(self.defaultSubconverters())\n for unreg in _deregisteredSubconverters:\n try:\n subConverterList.remove(unreg)\n except ValueError:\n pass\n \n if converterType == 'any':\n return subConverterList\n \n filteredSubConvertersList = []\n for sc in subConverterList:\n if converterType == 'input' and len(sc.registerInputExtensions) == 0:\n continue\n if converterType == 'output' and len(sc.registerOutputExtensions) == 0:\n continue\n filteredSubConvertersList.append(sc)\n \n return filteredSubConvertersList\n\n def defaultSubconverters(self):\n '''\n return an alphabetical list of the default subconverters: those in converter.subConverters\n with the class Subconverter.\n \n Do not use generally. use c.subConvertersList()\n \n >>> c = converter.Converter()\n >>> for sc in c.defaultSubconverters():\n ... print(sc)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n '''\n defaultSubconverters = []\n for i in sorted(subConverters.__dict__):\n name = getattr(subConverters, i)\n if (callable(name)\n and not isinstance(name, types.FunctionType)\n and subConverters.SubConverter in name.__mro__):\n defaultSubconverters.append(name)\n return defaultSubconverters\n\n def getSubConverterFormats(self):\n '''\n Get a dictionary of subConverters for various formats.\n \n >>> scf = converter.Converter().getSubConverterFormats()\n >>> scf['abc']\n \n >>> for x in sorted(scf):\n ... x, scf[x]\n ('abc', )\n ('braille', )\n ('capella', )\n ('cttxt', )\n ('har', )\n ('humdrum', )\n ('ipython', )\n ('lily', )\n ('lilypond', )\n ('mei', )\n ('midi', )\n ('musedata', )\n ('musicxml', )\n ('noteworthy', )\n ('noteworthytext', )\n ('rntext', )\n ('romantext', )\n ('scala', )\n ('t', )\n ('text', )\n ('textline', )\n ('tinynotation', )\n ('txt', )\n ('vexflow', )\n ('xml', ) \n '''\n converterFormats = {}\n for name in self.subconvertersList():\n if hasattr(name, 'registerFormats'):\n formatsTuple = name.registerFormats\n for f in formatsTuple:\n converterFormats[f.lower()] = name\n return converterFormats\n\n def setSubconverterFromFormat(self, converterFormat): \n '''\n sets the .subConverter according to the format of `converterFormat`:\n \n >>> convObj = converter.Converter() \n >>> convObj.setSubconverterFromFormat('humdrum')\n >>> convObj.subConverter\n \n '''\n if converterFormat is None:\n raise ConverterException('Did not find a format from the source file')\n converterFormat = converterFormat.lower()\n scf = self.getSubConverterFormats()\n if converterFormat not in scf: \n raise ConverterException('no converter available for format: %s' % converterFormat)\n subConverterClass = scf[converterFormat]\n self.subConverter = subConverterClass()\n\n\n def formatFromHeader(self, dataStr):\n '''\n if dataStr begins with a text header such as \"tinyNotation:\" then\n return that format plus the dataStr with the head removed.\n\n Else, return (None, dataStr) where dataStr is the original untouched.\n\n Not case sensitive.\n\n >>> c = converter.Converter()\n >>> c.formatFromHeader('tinynotation: C4 E2')\n ('tinynotation', 'C4 E2')\n\n >>> c.formatFromHeader('C4 E2')\n (None, 'C4 E2')\n\n >>> c.formatFromHeader('romanText: m1: a: I b2 V')\n ('romantext', 'm1: a: I b2 V')\n\n New formats can register new headers:\n\n >>> class ConverterSonix(converter.subConverters.SubConverter):\n ... registerFormats = ('sonix',)\n ... registerInputExtensions = ('mus',)\n >>> converter.registerSubconverter(ConverterSonix)\n >>> c.formatFromHeader('sonix: AIFF data')\n ('sonix', 'AIFF data')\n >>> converter.resetSubconverters() #_DOCS_HIDE \n '''\n dataStrStartLower = dataStr[:20].lower()\n if six.PY3 and isinstance(dataStrStartLower, bytes):\n dataStrStartLower = dataStrStartLower.decode('utf-8', 'ignore')\n\n foundFormat = None\n sclist = self.subconvertersList()\n for sc in sclist:\n for possibleFormat in sc.registerFormats:\n if dataStrStartLower.startswith(possibleFormat.lower() + ':'):\n foundFormat = possibleFormat\n dataStr = dataStr[len(foundFormat) + 1:]\n dataStr = dataStr.lstrip()\n break\n return (foundFormat, dataStr)\n\n def regularizeFormat(self, fmt):\n '''\n Take in a string representing a format, a file extension (w/ or without leading dot)\n etc. and find the format string that best represents the format that should be used.\n \n Searches SubConverter.registerFormats first, then SubConverter.registerInputExtensions,\n then SubConverter.registerOutputExtensions\n \n Returns None if no format applies:\n \n >>> c = converter.Converter()\n >>> c.regularizeFormat('mxl')\n 'musicxml'\n >>> c.regularizeFormat('t')\n 'text'\n >>> c.regularizeFormat('abc')\n 'abc'\n >>> c.regularizeFormat('lily.png')\n 'lilypond'\n >>> c.regularizeFormat('blah') is None\n True \n \n '''\n # make lower case, as some lilypond processing used upper case\n fmt = fmt.lower().strip()\n if fmt.startswith('.'):\n fmt = fmt[1:] # strip .\n foundSc = None\n \n formatList = fmt.split('.')\n fmt = formatList[0]\n if len(formatList) > 1:\n unused_subformats = formatList[1:]\n else:\n unused_subformats = []\n scl = self.subconvertersList()\n \n for sc in scl:\n formats = sc.registerFormats \n for scFormat in formats:\n if fmt == scFormat:\n foundSc = sc\n break\n if foundSc is not None:\n break\n\n if foundSc is None:\n for sc in scl:\n extensions = sc.registerInputExtensions\n for ext in extensions:\n if fmt == ext:\n foundSc = sc\n break\n if foundSc is not None:\n break\n if foundSc is None:\n for sc in scl:\n extensions = sc.registerInputExtensions\n for ext in extensions:\n if fmt == ext:\n foundSc = sc\n break\n if foundSc is not None:\n break\n\n if sc.registerFormats:\n return sc.registerFormats[0]\n else:\n return None\n\n\n #---------------------------------------------------------------------------\n # properties\n @property\n def stream(self):\n '''\n Returns the .subConverter.stream object.\n '''\n if self._thawedStream is not None:\n return self._thawedStream\n elif self.subConverter is not None:\n return self.subConverter.stream\n else:\n return None\n # not _stream: please don't look in other objects' private variables;\n # humdrum worked differently.\n\n\n\n\n#-------------------------------------------------------------------------------\n# module level convenience methods\n\n# pylint: disable=redefined-builtin\ndef parseFile(fp, number=None, format=None, forceSource=False, **keywords): #@ReservedAssignment\n '''\n Given a file path, attempt to parse the file into a Stream.\n '''\n v = Converter()\n fp = common.cleanpath(fp)\n v.parseFile(fp, number=number, format=format, forceSource=forceSource, **keywords)\n return v.stream\n\n# pylint: disable=redefined-builtin\ndef parseData(dataStr, number=None, format=None, **keywords): # @ReservedAssignment\n '''\n Given musical data represented within a Python string, attempt to parse the\n data into a Stream.\n '''\n v = Converter()\n v.parseData(dataStr, number=number, format=format, **keywords)\n return v.stream\n\n# pylint: disable=redefined-builtin\ndef parseURL(url, number=None, format=None, forceSource=False, **keywords): # @ReservedAssignment\n '''\n Given a URL, attempt to download and parse the file into a Stream. Note:\n URL downloading will not happen automatically unless the user has set their\n Environment \"autoDownload\" preference to \"allow\".\n '''\n v = Converter()\n v.parseURL(url, format=format, **keywords)\n return v.stream\n\ndef parse(value, *args, **keywords):\n r'''\n Given a file path, encoded data in a Python string, or a URL, attempt to\n parse the item into a Stream. Note: URL downloading will not happen\n automatically unless the user has set their Environment \"autoDownload\"\n preference to \"allow\".\n\n Keywords can include `number` which specifies a piece number in a file of\n multipiece file.\n\n `format` specifies the format to parse the line of text or the file as.\n\n A string of text is first checked to see if it is a filename that exists on\n disk. If not it is searched to see if it looks like a URL. If not it is\n processed as data.\n\n PC File:\n \n >>> #_DOCS_SHOW s = converter.parse(r'c:\\users\\myke\\desktop\\myfile.xml') \n \n Mac File:\n \n >>> #_DOCS_SHOW s = converter.parse('/Users/cuthbert/Desktop/myfile.xml') \n\n URL:\n \n >>> #_DOCS_SHOW s = converter.parse('http://midirepository.org/file220/file.mid') \n\n\n Data is preceded by an identifier such as \"tinynotation:\"\n\n >>> s = converter.parse(\"tinyNotation: 3/4 E4 r f# g=lastG trip{b-8 a g} c\", makeNotation=False)\n >>> s.getElementsByClass(meter.TimeSignature)[0]\n \n\n or the format can be passed directly:\n\n >>> s = converter.parse(\"2/16 E4 r f# g=lastG trip{b-8 a g} c\", format='tinyNotation').flat\n >>> s.getElementsByClass(meter.TimeSignature)[0]\n \n '''\n\n #environLocal.printDebug(['attempting to parse()', value])\n if 'forceSource' in keywords:\n forceSource = keywords['forceSource']\n del(keywords['forceSource'])\n else:\n forceSource = False\n\n # see if a work number is defined; for multi-work collections\n if 'number' in keywords:\n number = keywords['number']\n del(keywords['number'])\n else:\n number = None\n\n if 'format' in keywords:\n m21Format = keywords['format']\n del(keywords['format'])\n else:\n m21Format = None\n\n if six.PY3 and isinstance(value, bytes):\n valueStr = value.decode('utf-8', 'ignore')\n else:\n valueStr = value\n\n if (common.isListLike(value) \n and len(value) == 2 \n and value[1] is None \n and os.path.exists(value[0])):\n # comes from corpus.search\n return parseFile(value[0], format=m21Format, **keywords)\n elif (common.isListLike(value) \n and len(value) == 2 \n and isinstance(value[1], int) \n and os.path.exists(value[0])):\n # corpus or other file with movement number\n return parseFile(value[0], format=m21Format, **keywords).getScoreByNumber(value[1])\n elif common.isListLike(value) or args: # tiny notation list # TODO: Remove.\n if args: # add additional args to a list\n value = [value] + list(args)\n return parseData(value, number=number, **keywords)\n # a midi string, must come before os.path.exists test\n elif valueStr.startswith('MThd'):\n return parseData(value, number=number, format=m21Format, **keywords)\n elif os.path.exists(value):\n return parseFile(value, number=number, format=m21Format, \n forceSource=forceSource, **keywords)\n elif os.path.exists(common.cleanpath(value)):\n return parseFile(common.cleanpath(value), number=number, format=m21Format, \n forceSource=forceSource, **keywords)\n \n elif (valueStr.startswith('http://') or valueStr.startswith('https://')):\n # its a url; may need to broaden these criteria\n return parseURL(value, number=number, format=m21Format, \n forceSource=forceSource, **keywords)\n else:\n return parseData(value, number=number, format=m21Format, **keywords)\n\n\n\ndef freeze(streamObj, fmt=None, fp=None, fastButUnsafe=False, zipType='zlib'):\n '''Given a StreamObject and a file path, serialize and store the Stream to a file.\n\n This function is based on the :class:`~music21.converter.StreamFreezer` object.\n\n The serialization format is defined by the `fmt` argument; 'pickle' (the default) is only one\n presently supported. 'json' or 'jsonnative' will be used once jsonpickle is good enough.\n\n If no file path is given, a temporary file is used.\n\n The file path is returned.\n\n\n >>> c = converter.parse('tinynotation: 4/4 c4 d e f')\n >>> c.show('text')\n {0.0} \n {0.0} \n {0.0} \n {0.0} \n {1.0} \n {2.0} \n {3.0} \n {4.0} \n >>> fp = converter.freeze(c, fmt='pickle')\n >>> #_DOCS_SHOW fp\n '/tmp/music21/sjiwoe.pgz'\n\n The file can then be \"thawed\" back into a Stream using the \n :func:`~music21.converter.thaw` method.\n\n >>> d = converter.thaw(fp)\n >>> d.show('text')\n {0.0} \n {0.0} \n {0.0} \n {0.0} \n {1.0} \n {2.0} \n {3.0} \n {4.0} \n '''\n from music21 import freezeThaw\n v = freezeThaw.StreamFreezer(streamObj, fastButUnsafe=fastButUnsafe)\n return v.write(fmt=fmt, fp=fp, zipType=zipType) # returns fp\n\n\ndef thaw(fp, zipType='zlib'):\n '''Given a file path of a serialized Stream, defrost the file into a Stream.\n\n This function is based on the :class:`~music21.converter.StreamFreezer` object.\n\n See the documentation for :meth:`~music21.converter.freeze` for demos.\n '''\n from music21 import freezeThaw\n v = freezeThaw.StreamThawer()\n v.open(fp, zipType=zipType)\n return v.stream\n\n\ndef freezeStr(streamObj, fmt=None):\n '''\n Given a StreamObject\n serialize and return a serialization string.\n\n This function is based on the\n :class:`~music21.converter.StreamFreezer` object.\n\n The serialization format is defined by\n the `fmt` argument; 'pickle' (the default),\n is the only one presently supported.\n\n\n >>> c = converter.parse('tinyNotation: 4/4 c4 d e f', makeNotation=False)\n >>> c.show('text')\n {0.0} \n {0.0} \n {1.0} \n {2.0} \n {3.0} \n >>> data = converter.freezeStr(c, fmt='pickle')\n >>> len(data) > 20 # pickle implementation dependent\n True\n >>> d = converter.thawStr(data)\n >>> d.show('text')\n {0.0} \n {0.0} \n {1.0} \n {2.0} \n {3.0} \n\n '''\n from music21 import freezeThaw\n v = freezeThaw.StreamFreezer(streamObj)\n return v.writeStr(fmt=fmt) # returns a string\n\ndef thawStr(strData):\n '''\n Given a serialization string, defrost into a Stream.\n\n This function is based on the :class:`~music21.converter.StreamFreezer` object.\n '''\n from music21 import freezeThaw\n v = freezeThaw.StreamThawer()\n v.openStr(strData)\n return v.stream\n\n\n\n\n#-------------------------------------------------------------------------------\nclass TestExternal(unittest.TestCase):\n # interpreter loading\n\n def runTest(self):\n pass\n\n def testMusicXMLConversion(self):\n from music21.musicxml import testFiles\n for mxString in testFiles.ALL: # @UndefinedVariable\n a = subConverters.ConverterMusicXML()\n a.parseData(mxString)\n\n def testMusicXMLTabConversion(self):\n from music21.musicxml import testFiles\n \n mxString = testFiles.ALL[5] # @UndefinedVariable\n a = subConverters.ConverterMusicXML()\n a.parseData(mxString)\n\n b = parseData(mxString)\n b.show('text')\n\n #{0.0} \n #{0.0} \n # {0.0} \n # {0.0} \n # {0.0} \n # {0.0} \n # {0.0} \n # {0.0} \n # {0.0} \n # {0.0} \n # {2.0} \n \n b.show()\n pass \n\n\n def testConversionMusicXml(self):\n c = stream.Score()\n\n from music21.musicxml import testPrimitive\n mxString = testPrimitive.chordsThreeNotesDuration21c\n a = parseData(mxString)\n\n mxString = testPrimitive.beams01\n b = parseData(mxString)\n #b.show()\n\n c.append(a[0])\n c.append(b[0])\n c.show()\n # TODO: this is only showing the minimum number of measures\n\n\n def testParseURL(self):\n urlBase = 'http://kern.ccarh.org/cgi-bin/ksdata?l=users/craig/classical/'\n urlB = urlBase + 'schubert/piano/d0576&file=d0576-06.krn&f=kern'\n urlC = urlBase + 'bach/cello&file=bwv1007-01.krn&f=xml'\n for url in [urlB, urlC]:\n try:\n unused_post = parseURL(url)\n except:\n print(url)\n raise\n\n def testFreezer(self):\n from music21 import corpus\n s = corpus.parse('bach/bwv66.6.xml')\n fp = freeze(s)\n s2 = thaw(fp)\n s2.show()\n\n\nclass Test(unittest.TestCase):\n\n def runTest(self):\n pass\n\n def testCopyAndDeepcopy(self):\n '''Test copying all objects defined in this module\n '''\n import sys\n for part in sys.modules[self.__module__].__dict__:\n match = False\n for skip in ['_', '__', 'Test', 'Exception']:\n if part.startswith(skip) or part.endswith(skip):\n match = True\n if match:\n continue\n obj = getattr(sys.modules[self.__module__], part)\n if callable(obj) and not isinstance(obj, types.FunctionType):\n i = copy.copy(obj)\n j = copy.deepcopy(obj)\n\n\n def testConversionMX(self):\n from music21.musicxml import testPrimitive\n from music21 import dynamics\n from music21 import note\n\n\n mxString = testPrimitive.pitches01a\n a = parse(mxString)\n a = a.flat\n b = a.getElementsByClass(note.Note)\n # there should be 102 notes\n self.assertEqual(len(b), 102)\n\n\n # test directions, dynamics, wedges\n mxString = testPrimitive.directions31a\n a = parse(mxString)\n a = a.flat\n b = a.getElementsByClass(dynamics.Dynamic)\n # there should be 27 dynamics found in this file\n self.assertEqual(len(b), 27)\n c = a.getElementsByClass(note.Note)\n self.assertEqual(len(c), 53)\n\n # two starts and two stops == 2!\n d = a.getElementsByClass(dynamics.DynamicWedge)\n self.assertEqual(len(d), 2)\n\n\n # test lyrics\n mxString = testPrimitive.lyricsMelisma61d\n a = parse(mxString)\n a = a.flat\n b = a.getElementsByClass(note.Note)\n found = []\n for noteObj in b:\n for obj in noteObj.lyrics:\n found.append(obj)\n self.assertEqual(len(found), 3)\n\n\n # test we are getting rests\n mxString = testPrimitive.restsDurations02a\n a = parse(mxString)\n a = a.flat\n b = a.getElementsByClass(note.Rest)\n self.assertEqual(len(b), 19)\n\n\n # test if we can get trills\n mxString = testPrimitive.notations32a\n a = parse(mxString)\n a = a.flat\n b = a.getElementsByClass(note.Note)\n\n\n\n mxString = testPrimitive.rhythmDurations03a\n a = parse(mxString)\n #a.show('t')\n self.assertEqual(len(a), 2) # one part, plus metadata\n for part in a.getElementsByClass(stream.Part):\n self.assertEqual(len(part), 7) # seven measures\n measures = part.getElementsByClass(stream.Measure)\n self.assertEqual(int(measures[0].number), 1)\n self.assertEqual(int(measures[-1].number), 7)\n\n # print a.recurseRepr()\n\n\n\n # print a.recurseRepr()\n\n # get the third movement\n# mxFile = corpus.getWork('opus18no1')[2]\n# a = parse(mxFile)\n# a = a.flat\n# b = a.getElementsByClass(dynamics.Dynamic)\n# # 110 dynamics\n# self.assertEqual(len(b), 110)\n#\n# c = a.getElementsByClass(note.Note)\n# # over 1000 notes\n# self.assertEqual(len(c), 1289)\n\n\n\n def testConversionMXChords(self):\n from music21 import chord\n from music21.musicxml import testPrimitive\n\n mxString = testPrimitive.chordsThreeNotesDuration21c\n a = parse(mxString)\n for part in a.getElementsByClass(stream.Part):\n chords = part.flat.getElementsByClass(chord.Chord)\n self.assertEqual(len(chords), 7)\n knownSize = [3, 2, 3, 3, 3, 3, 3]\n for i in range(len(knownSize)):\n #print chords[i].pitches, len(chords[i].pitches)\n self.assertEqual(knownSize[i], len(chords[i].pitches))\n\n\n def testConversionMXBeams(self):\n\n from music21.musicxml import testPrimitive\n\n mxString = testPrimitive.beams01\n a = parse(mxString)\n part = a.parts[0]\n notes = part.flat.notesAndRests\n beams = []\n for n in notes:\n if \"Note\" in n.classes:\n beams += n.beams.beamsList\n self.assertEqual(len(beams), 152)\n\n\n def testConversionMXTime(self):\n\n from music21.musicxml import testPrimitive\n\n mxString = testPrimitive.timeSignatures11c\n a = parse(mxString)\n unused_part = a.parts[0]\n\n\n mxString = testPrimitive.timeSignatures11d\n a = parse(mxString)\n part = a.parts[0]\n\n notes = part.flat.notesAndRests\n self.assertEqual(len(notes), 11)\n\n\n def testConversionMXClefPrimitive(self):\n from music21 import clef\n from music21.musicxml import testPrimitive\n mxString = testPrimitive.clefs12a\n a = parse(mxString)\n part = a.parts[0]\n\n clefs = part.flat.getElementsByClass(clef.Clef)\n self.assertEqual(len(clefs), 18)\n\n\n def testConversionMXClefTimeCorpus(self):\n\n from music21 import corpus, clef, meter\n a = corpus.parse('luca')\n\n # there should be only one clef in each part\n clefs = a.parts[0].flat.getElementsByClass(clef.Clef)\n self.assertEqual(len(clefs), 1)\n self.assertEqual(clefs[0].sign, 'G')\n\n # second part\n clefs = a.parts[1].flat.getElementsByClass(clef.Clef)\n self.assertEqual(len(clefs), 1)\n self.assertEqual(clefs[0].octaveChange, -1)\n self.assertEqual(type(clefs[0]).__name__, 'Treble8vbClef')\n\n # third part\n clefs = a.parts[2].flat.getElementsByClass(clef.Clef)\n self.assertEqual(len(clefs), 1)\n\n # check time signature count\n ts = a.parts[1].flat.getElementsByClass(meter.TimeSignature)\n self.assertEqual(len(ts), 4)\n\n\n def testConversionMXArticulations(self):\n from music21 import note\n from music21.musicxml import testPrimitive\n\n mxString = testPrimitive.articulations01\n a = parse(mxString)\n part = a.parts[0]\n\n notes = part.flat.getElementsByClass(note.Note)\n self.assertEqual(len(notes), 4)\n post = []\n match = [\"\",\n \"\",\n \"\",\n \"\"]\n for i in range(len(notes)):\n post.append(str(notes[i].articulations[0].__class__))\n self.assertEqual(post, match)\n #a.show()\n\n def testConversionMXKey(self):\n from music21 import key\n from music21.musicxml import testPrimitive\n mxString = testPrimitive.keySignatures13a\n a = parse(mxString)\n part = a.parts[0]\n\n keyList = part.flat.getElementsByClass(key.KeySignature)\n self.assertEqual(len(keyList), 46)\n\n\n def testConversionMXMetadata(self):\n from music21.musicxml import testFiles\n\n a = parse(testFiles.mozartTrioK581Excerpt) # @UndefinedVariable\n self.assertEqual(a.metadata.composer, 'Wolfgang Amadeus Mozart')\n self.assertEqual(a.metadata.title, 'Quintet for Clarinet and Strings')\n self.assertEqual(a.metadata.movementName, 'Menuetto (Excerpt from Second Trio)')\n\n a = parse(testFiles.binchoisMagnificat) # @UndefinedVariable\n self.assertEqual(a.metadata.composer, 'Gilles Binchois')\n # this gets the best title available, even though this is movement title\n self.assertEqual(a.metadata.title, 'Excerpt from Magnificat secundi toni')\n\n\n def testConversionMXBarlines(self):\n from music21 import bar\n from music21.musicxml import testPrimitive\n a = parse(testPrimitive.barlines46a)\n part = a.parts[0]\n barlineList = part.flat.getElementsByClass(bar.Barline)\n self.assertEqual(len(barlineList), 11)\n\n def testConversionXMLayout(self):\n\n from music21.musicxml import testPrimitive\n from music21 import layout\n\n a = parse(testPrimitive.systemLayoutTwoPart)\n #a.show()\n\n part = a.getElementsByClass(stream.Part)[0]\n systemLayoutList = part.flat.getElementsByClass(layout.SystemLayout)\n measuresWithSL = []\n for e in systemLayoutList:\n measuresWithSL.append(e.measureNumber)\n self.assertEqual(measuresWithSL, [1, 3, 4, 5, 7, 8])\n self.assertEqual(len(systemLayoutList), 6)\n\n\n def testConversionMXTies(self):\n\n from music21.musicxml import testPrimitive\n from music21 import clef\n\n a = parse(testPrimitive.multiMeasureTies)\n #a.show()\n\n countTies = 0\n countStartTies = 0\n for p in a.parts:\n post = p.recurse().notes[0].getContextByClass('Clef')\n self.assertEqual(isinstance(post, clef.TenorClef), True)\n for n in p.flat.notes:\n if n.tie != None:\n countTies += 1\n if n.tie.type == 'start' or n.tie.type =='continue':\n countStartTies += 1\n\n self.assertEqual(countTies, 57)\n self.assertEqual(countStartTies, 40)\n\n\n def testConversionMXInstrument(self):\n from music21 import corpus\n s = corpus.parse('schumann_clara/opus17', 3)\n #s.show()\n is1 = s.parts[0].flat.getElementsByClass('Instrument')\n self.assertEqual(len(is1), 1)\n #self.assertIn('Violin', is1[0].classes)\n is2 = s.parts[1].flat.getElementsByClass('Instrument')\n self.assertEqual(len(is2), 1)\n #self.assertIn('Violoncello', is1[0].classes)\n is3 = s.parts[2].flat.getElementsByClass('Instrument')\n self.assertEqual(len(is3), 1)\n #self.assertIn('Piano', is1[0].classes)\n\n\n def testConversionMidiBasic(self):\n directory = common.getPackageDir(relative=False, remapSep=os.sep)\n fp = None\n for fp in directory:\n if fp.endswith('midi'):\n break\n else:\n raise ConverterException('Could not find a directory with MIDI')\n if fp is None:\n raise ConverterException('Could not find a directory with MIDI')\n \n dirLib = os.path.join(fp, 'testPrimitive')\n # a simple file created in athenacl\n fp = os.path.join(dirLib, 'test01.mid')\n\n unused_s = parseFile(fp)\n unused_s = parse(fp)\n\n c = subConverters.ConverterMidi()\n c.parseFile(fp)\n\n # try low level string data passing\n f = open(fp, 'rb')\n data = f.read()\n f.close()\n\n c.parseData(data)\n\n # try module-leve; function\n parseData(data)\n parse(data)\n\n\n def testConversionMidiNotes(self):\n from music21 import meter, key, chord, note\n\n fp = os.path.join(common.getSourceFilePath(), 'midi', 'testPrimitive', 'test01.mid')\n # a simple file created in athenacl\n #for fn in ['test01.mid', 'test02.mid', 'test03.mid', 'test04.mid']:\n s = parseFile(fp)\n #s.show()\n self.assertEqual(len(s.flat.getElementsByClass(note.Note)), 18)\n\n\n # has chords and notes\n fp = os.path.join(common.getSourceFilePath(), 'midi', 'testPrimitive', 'test05.mid')\n s = parseFile(fp)\n #s.show()\n #environLocal.printDebug(['\\nopening fp', fp])\n\n self.assertEqual(len(s.flat.getElementsByClass(note.Note)), 2)\n self.assertEqual(len(s.flat.getElementsByClass(chord.Chord)), 4)\n\n self.assertEqual(len(s.flat.getElementsByClass(meter.TimeSignature)), 0)\n self.assertEqual(len(s.flat.getElementsByClass(key.KeySignature)), 0)\n\n\n # this sample has eight note triplets\n fp = os.path.join(common.getSourceFilePath(), 'midi', 'testPrimitive', 'test06.mid')\n s = parseFile(fp)\n #s.show()\n\n #environLocal.printDebug(['\\nopening fp', fp])\n\n #s.show()\n from fractions import Fraction as F\n dList = [n.quarterLength for n in s.flat.notesAndRests[:30]]\n match = [0.5, 0.5, 1.0, 0.5, 0.5, 0.5, 0.5, 1.0, 0.5, 0.5, \n 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, \n 0.5, 0.5, 0.5, 0.5, F(1,3), F(1,3), F(1,3), 0.5, 0.5, 1.0]\n self.assertEqual(dList, match)\n\n\n self.assertEqual(len(s.flat.getElementsByClass('TimeSignature')), 1)\n self.assertEqual(len(s.flat.getElementsByClass('KeySignature')), 1)\n\n\n # this sample has sixteenth note triplets\n # TODO much work is still needed on getting timing right\n # this produces numerous errors in makeMeasure partitioning\n fp = os.path.join(common.getSourceFilePath(), 'midi', 'testPrimitive', 'test07.mid')\n #environLocal.printDebug(['\\nopening fp', fp])\n s = parseFile(fp)\n #s.show('t')\n self.assertEqual(len(s.flat.getElementsByClass('TimeSignature')), 1)\n self.assertEqual(len(s.flat.getElementsByClass('KeySignature')), 1)\n\n\n\n\n # this sample has dynamic changes in key signature\n fp = os.path.join(common.getSourceFilePath(), 'midi', 'testPrimitive', 'test08.mid')\n #environLocal.printDebug(['\\nopening fp', fp])\n s = parseFile(fp)\n #s.show('t')\n self.assertEqual(len(s.flat.getElementsByClass('TimeSignature')), 1)\n found = s.flat.getElementsByClass('KeySignature')\n self.assertEqual(len(found), 3)\n # test the right keys\n self.assertEqual(found[0].sharps, -3)\n self.assertEqual(found[1].sharps, 3)\n self.assertEqual(found[2].sharps, -1)\n\n\n def testConversionMXRepeats(self):\n from music21 import bar\n from music21.musicxml import testPrimitive\n\n mxString = testPrimitive.simpleRepeat45a\n s = parse(mxString)\n\n part = s.parts[0]\n measures = part.getElementsByClass('Measure')\n self.assertEqual(measures[0].leftBarline, None)\n self.assertEqual(measures[0].rightBarline.style, 'final')\n\n self.assertEqual(measures[1].leftBarline, None)\n self.assertEqual(measures[1].rightBarline.style, 'final')\n\n mxString = testPrimitive.repeatMultipleTimes45c\n s = parse(mxString)\n\n self.assertEqual(len(s.flat.getElementsByClass(bar.Barline)), 4)\n part = s.parts[0]\n measures = part.getElementsByClass('Measure')\n\n #s.show()\n\n\n\n def testConversionABCOpus(self):\n\n from music21.abcFormat import testFiles\n from music21 import corpus\n\n s = parse(testFiles.theAleWifesDaughter)\n # get a Stream object, not an opus\n self.assertEqual(isinstance(s, stream.Score), True)\n self.assertEqual(isinstance(s, stream.Opus), False)\n self.assertEqual(len(s.flat.notesAndRests), 66)\n\n # a small essen collection\n op = corpus.parse('essenFolksong/teste')\n # get a Stream object, not an opus\n #self.assertEqual(isinstance(op, stream.Score), True)\n self.assertEqual(isinstance(op, stream.Opus), True)\n self.assertEqual([len(s.flat.notesAndRests) for s in op], \n [33, 51, 59, 33, 29, 174, 67, 88])\n #op.show()\n\n # get one work from the opus\n s = corpus.parse('essenFolksong/teste', number=6)\n self.assertEqual(isinstance(s, stream.Score), True)\n self.assertEqual(isinstance(s, stream.Opus), False)\n self.assertEqual(s.metadata.title, 'Moli hua')\n\n #s.show()\n\n\n def testConversionABCWorkFromOpus(self):\n # test giving a work number at loading\n from music21 import corpus\n s = corpus.parse('essenFolksong/han1', number=6)\n self.assertEqual(isinstance(s, stream.Score), True)\n self.assertEqual(s.metadata.title, 'Yi gan hongqi kongzhong piao')\n # make sure that beams are being made\n self.assertEqual(str(s.parts[0].flat.notesAndRests[4].beams), \n '/>')\n #s.show()\n\n\n\n def testConversionMusedata(self):\n fp = os.path.join(common.getSourceFilePath(), 'musedata', 'testPrimitive', 'test01')\n s = parse(fp)\n self.assertEqual(len(s.parts), 5)\n #s.show()\n\n\n\n def testMixedArchiveHandling(self):\n '''Test getting data out of musedata or musicxml zip files.\n '''\n fp = os.path.join(common.getSourceFilePath(), 'musicxml', 'testMxl.mxl')\n af = ArchiveManager(fp)\n # for now, only support zip\n self.assertEqual(af.archiveType, 'zip')\n self.assertEqual(af.isArchive(), True)\n # if this is a musicxml file, there will only be single file; we\n # can cal get datat to get this\n post = af.getData()\n self.assertEqual(post[:38], '')\n self.assertEqual(af.getNames(), ['musicXML.xml', 'META-INF/', 'META-INF/container.xml'])\n\n# # test from a file that ends in zip\n# # note: this is a stage1 file!\n# fp = os.path.join(common.getSourceFilePath(), 'musedata', 'testZip.zip')\n# af = ArchiveManager(fp)\n# # for now, only support zip\n# self.assertEqual(af.archiveType, 'zip')\n# self.assertEqual(af.isArchive(), True)\n# self.assertEqual(af.getNames(), ['01/', '01/04', '01/02', '01/03', '01/01'] )\n# \n# # returns a list of strings\n# self.assertEqual(af.getData(dataFormat='musedata')[0][:30],\n# '378\\n1080 1\\nBach Gesells\\nchaft')\n\n\n #mdw = musedataModule.MuseDataWork()\n # can add a list of strings from getData\n #mdw.addString(af.getData(dataFormat='musedata'))\n #self.assertEqual(len(mdw.files), 4)\n#\n# mdpList = mdw.getParts()\n# self.assertEqual(len(mdpList), 4)\n\n # try to load parse the zip file\n #s = parse(fp)\n\n # test loading a directory\n fp = os.path.join(common.getSourceFilePath(), 'musedata',\n 'testPrimitive', 'test01')\n cmd = subConverters.ConverterMuseData()\n cmd.parseFile(fp)\n\n def testMEIvsMX(self):\n '''\n Ensure Converter.parseData() distinguishes between a string with MEI data and a string with\n MusicXML data. The \"subConverter\" module is mocked out because we don't actually need to\n test the conversion process in this unit test.\n '''\n # These strings aren't valid documents, but they are enough to pass the detection we're\n # testing in parseData(). But it does mean we'll be testing in a strange way.\n meiString = ''\n #mxlString = ('' + \n # '')\n\n # The \"mei\" module raises an MeiElementError with \"meiString,\" so as long as that's raised,\n # we know that parseData() chose correctly.\n from music21.mei.base import MeiElementError\n testConv = Converter()\n self.assertRaises(MeiElementError, testConv.parseData, meiString)\n\n # TODO: another test -- score-partwise is good enough for new converter.\n ## The ConverterMusicXML raises a SubConverterException with \"mxlString,\" so as long as\n ## that's raised, we know that parseData()... well at least that it didn't choose MEI.\n #from music21.converter.subConverters import SubConverterException\n #testConv = Converter()\n #self.assertRaises(SubConverterException, testConv.parseData, mxlString)\n \n def testParseMidiQuantize(self):\n '''\n Checks quantization when parsing a stream. Here everything snaps to the 8th note.\n '''\n from music21 import omr\n from music21.common import numberTools\n midifp = omr.correctors.pathName + os.sep + 'k525short.mid'\n midistream = parse(midifp, forceSource=True, storePickle=False, quarterLengthDivisors=[2])\n #midistream.show()\n for n in midistream.recurse(classFilter='Note'):\n self.assertTrue(numberTools.almostEquals(n.quarterLength % .5, 0.0))\n \n \n\n#-------------------------------------------------------------------------------\n# define presented order in documentation\n_DOC_ORDER = [parse, parseFile, parseData, parseURL, freeze, thaw, freezeStr, thawStr, \n Converter, registerSubconverter, unregisterSubconverter]\n\n\nif __name__ == \"__main__\":\n # sys.arg test options will be used in mainTest()\n import music21\n music21.mainTest(Test)\n\n\n\n#------------------------------------------------------------------------------\n# eof\n\n","repo_name":"springml/case_routing","sub_path":"env/lib/python2.7/site-packages/music21/converter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":68830,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"73"} +{"seq_id":"38065384848","text":"import torch\nfrom torchvision.transforms import ToTensor\n\nimport numpy as np\nfrom PIL import Image\nfrom os import remove\n\nclass ResNet10(torch.nn.Module):\n def __init__(self):\n super().__init__()\n \n self.res15t75 = torch.nn.Conv2d(15,75,1,2,0)\n self.res75t150 = torch.nn.Conv2d(75,150,1,2,0)\n self.res150t300 = torch.nn.Conv2d(150,300,1,2,0)\n \n self.conv15 = torch.nn.Conv2d(3,15,3,1,1)\n \n self.conv75a = torch.nn.Conv2d(15,75,3,2,1)\n self.conv75b = torch.nn.Conv2d(75,75,3,1,1)\n self.conv75c = torch.nn.Conv2d(75,75,3,1,1)\n \n self.conv150a = torch.nn.Conv2d(75,150,3,2,1)\n self.conv150b = torch.nn.Conv2d(150,150,3,1,1)\n self.conv150c = torch.nn.Conv2d(150,150,3,1,1)\n \n self.conv300a = torch.nn.Conv2d(150,300,3,2,1)\n self.conv300b = torch.nn.Conv2d(300,300,3,1,1)\n self.conv300c = torch.nn.Conv2d(300,300,3,1,1)\n \n self.aapool = torch.nn.AvgPool2d(2,2)\n \n self.flat = torch.nn.Flatten()\n \n self.linear1 = torch.nn.Linear(1800,300)\n self.linear2 = torch.nn.Linear(300,2)\n \n def forward(self,data):\n \n out = torch.relu(self.conv15(data))\n \n x = self.res15t75(out)\n out = torch.relu(self.conv75a(out))\n out = torch.relu(self.conv75b(out) + x)\n out = torch.relu(self.conv75c(out))\n \n x = self.res75t150(out)\n out = torch.relu(self.conv150a(out))\n out = torch.relu(self.conv150b(out) + x)\n out = torch.relu(self.conv150c(out))\n \n x = self.res150t300(out)\n out = torch.relu(self.conv300a(out))\n out = torch.relu(self.conv300b(out) + x)\n out = torch.relu(self.conv300c(out)) \n \n out = self.aapool(out)\n out = self.aapool(out)\n \n out = self.flat(out)\n \n out = self.linear1(out)\n out = torch.relu(out)\n out = self.linear2(out)\n\n out = torch.softmax(out, -1)\n \n return out \n\n\ndef predict(path):\n img = Image.open(path)\n \n if img.size != (64,96):\n img = img.resize((64,96))\n\n pred_model = ResNet10()\n pred_model.load_state_dict(torch.load(\"./models/Gender_RN10_acc97.pth\", map_location=torch.device(\"cpu\")))\n \n img_cls = ['Female', 'Male']\n\n transform = ToTensor()\n img_tensor = transform(img)\n img_tensor = img_tensor[:3]\n \n img_tensor = torch.reshape(img_tensor, (1,3,96,64))\n \n pred = pred_model(img_tensor).detach()\n pred = np.array(pred[0])\n for i in range(len(img_cls)):\n pred[i] = round(pred[i]*100, 2)\n\n result = list(zip(img_cls, pred))\n result.sort(key=lambda x:x[1], reverse=True)\n\n remove(path)\n \n return result\n\n","repo_name":"Anky209e/ModeLsmith","sub_path":"cnn/classes/gender.py","file_name":"gender.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"73786666157","text":"import inspect\nimport numpy\nimport pandas\nimport torch\nimport torch.nn as nn\nfrom ..util.items import Items\nfrom ..dataset.dateset import Tsr\nfrom ..loss import loss_quantile, loss_mse\n\n# Tsr = torch.DoubleTensor\n# Tsr = torch.Tensor\n\n\nclass ModelBase(nn.Module):\n quantiles = [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]\n name = \"base\"\n\n def __init__(\n self,\n dim_ins: tuple, # dims of ti, tc, kn\n dim_out: int, # just reserved\n ws: int, # windows size of time series/sequence\n dim_emb: int, # dim for embedding\n n_heads: int, # the number of attention heads in transformer\n n_layers: int, # layers of multi-heads\n k: int, # the numbers of curves\n ):\n super().__init__()\n self.set_params()\n\n # override args from child/sub class\n dim_ins = self.args.dim_ins\n dim_emb = self.args.dim_emb\n ws = self.args.ws\n n_heads = self.args.n_heads\n n_layers = self.args.n_layers\n\n self.n_quantiles = len(self.quantiles) # the number of quantiles\n\n # embedder\n n_dim = sum(dim_ins[0:])\n self.emb_encode = nn.Linear(n_dim, dim_emb) # .double()\n self.emb_decode = nn.Linear(n_dim, dim_emb) # .double()\n max_len = max(16, ws)\n self.pos = PositionalEncoding(d_model=dim_emb, max_len=max_len)\n\n # Transformer\n prm = dict(\n d_model=dim_emb,\n nhead=n_heads,\n num_encoder_layers=n_layers,\n num_decoder_layers=n_layers,\n )\n self.tr = nn.Transformer(**prm) # .double()\n\n # linears\n self.dc = nn.Linear(ws * dim_emb, ws * n_dim * self.n_quantiles) # .double()\n\n # constraint\n self.loss_constraint_pretrain = 0\n\n # to double\n self.emb_encode = self.emb_encode.double()\n self.emb_decode = self.emb_decode.double()\n self.tr = self.tr.double()\n self.dc = self.dc.double()\n\n # initialize weights/biases\n weight_interval = 0.01\n nn.init.uniform_(self.emb_encode.weight, -weight_interval, weight_interval)\n nn.init.uniform_(self.emb_decode.weight, -weight_interval, weight_interval)\n nn.init.xavier_normal_(self.dc.weight)\n for fc in [self.emb_encode, self.emb_decode, self.dc]:\n nn.init.zeros_(fc.bias)\n\n def set_params(self):\n _frame = inspect.currentframe()\n _locals = _frame.f_back.f_back.f_locals\n assert _frame.f_back.f_back.f_locals[\"__class__\"] == type(self)\n args = {k: v for k, v in _locals.items() if k not in [\"self\"] and k[:1] != \"_\"}\n self.args = Items().setup(args)\n\n def make_x(self, ti: Tsr, tc: Tsr, kn: Tsr):\n ti_base = pandas.date_range(\"2010-1-1\", \"2010-1-2\")[0]\n ti_base = ti_base.to_numpy().astype(numpy.int64)\n n_digits = numpy.int64(numpy.floor(numpy.log10(ti_base)))\n interval = (\n numpy.sqrt(2) * (10 ** n_digits) / 10\n ) # make interval to be irrational number\n _ti = ti / interval # _ti scales to (0, 1) by 2200/1/1\n x = torch.cat([_ti, tc, kn], dim=-1) # as input\n return x\n\n def _pretrain(self, ti: Tsr, tc: Tsr, kn: Tsr) -> Tsr:\n # setup\n mask_rate = 0.15\n\n # concat input\n x = self.make_x(ti, tc, kn) # as input\n y = x.clone() # as target\n\n # make mask\n B, W, D = x.shape\n zr = torch.zeros((1, W, D))\n mask_vector = -1 * torch.ones_like(zr) # (1, W, D)\n probs = mask_rate * torch.ones(1, W, 1) # (1, W, 1)\n msk = torch.bernoulli(probs).repeat(1, 1, D) # (1, W, D)\n mask_vector *= msk # (1, W, D)\n mask_vector = mask_vector.repeat(B, 1, 1) # this vector means `[MASK]`\n msk_flipped = 1 - msk.type(torch.bool).type(torch.long) # (1, W, D)\n assert (msk[0, :, 0] + msk_flipped[0, :, 0] == torch.ones(W)).all().item()\n\n # masking\n x *= msk_flipped.repeat(B, 1, 1).to(x.device)\n x += mask_vector.to(x.device)\n\n # embedding\n emb_encode = self.emb_encode(x) * numpy.sqrt(x.shape[-1])\n emb_encode = emb_encode.transpose(1, 0) # (B, W, Demb) -> (W, B, Demb)\n emb_encode = self.pos(emb_encode)\n\n # - for y\n emb_decode = self.emb_decode(y) * numpy.sqrt(y.shape[-1])\n # emb_decode = self.emb_encode(y) * numpy.sqrt(y.shape[-1])\n emb_decode = emb_decode.transpose(1, 0) # (B, W, Demb) -> (W, B, Demb)\n emb_decode = self.pos(emb_decode)\n\n # transform\n def reshape(tsr: Tsr) -> Tsr:\n _tsr = tsr.transpose(1, 0) # (W, B, Demb) -> (B, W, Demb)\n return _tsr.reshape(-1, self.args.ws * self.args.dim_emb)\n\n encoded = reshape(self.tr(emb_encode, emb_decode))\n p = self.dc(encoded)\n p = p.reshape(*x.shape, self.n_quantiles)\n p = torch.sigmoid(p)\n\n return p\n\n def pretrain(self, ti: Tsr, tc: Tsr, kn: Tsr):\n raise NotImplementedError(type(self))\n\n def forward(self, ti: Tsr, tc: Tsr, kn: Tsr):\n raise NotImplementedError(type(self))\n\n def loss_pretrain(self, ti: Tsr, tc: Tsr, kn: Tsr, **params) -> Tsr:\n x = self.make_x(ti, tc, kn)\n y = x.unsqueeze(-1)\n p = self.pretrain(ti, tc, kn) # (B, W, D, k)\n loss = self.calc_loss_pretrain(p, y, **params)\n return loss\n\n def calc_loss_pretrain(self, pred_y: Tsr, tg: Tsr, **params) -> Tsr:\n loss = loss_quantile(pred_y, tg)\n loss += self.calc_loss_pretrain_constraint(**params)\n return loss\n\n def calc_loss_pretrain_constraint(self, **params) -> Tsr:\n return 0.0\n\n def loss_train(self, ti: Tsr, tc: Tsr, kn: Tsr, tg: Tsr, **params) -> Tsr:\n p = self(ti, tc, kn) # (B, W, D, k)\n loss = self.calc_loss(p, tg, **params)\n return loss\n\n def calc_loss(self, pred_y: Tsr, tg: Tsr, **params) -> Tsr:\n raise NotImplementedError(type(self))\n\n\nclass ModelTimesries(ModelBase):\n r\"\"\"\n TODO: to implement\n \"\"\"\n\n def __init__(self, dim_ins=16, n_heads=4, ws=8, n_quantiles=7):\n super().__init__() # after this call, to be enabled to access `self.args`\n self.cyclic = Cyclic(\n dim_ins=dim_ins, n_heads=4, n_quantiles=self.args.n_quantiles\n )\n self.trend = None\n self.recent = None\n self.ws = ws # window size\n\n def forward(self, ti: Tsr, tc: Tsr, kn: Tsr, un: Tsr, tg: Tsr):\n r\"\"\"\n Args:\n ti: time index\n tc: time constant\n kn: known time variant / exog\n un: unknown time variant / multi variable\n Shapes:\n ti: (B, W, D1)\n tc: (B, W, D2)\n kn: (B, W, D3)\n un: (B, W, D4)\n B: batch size\n W: window size\n D?: dims\n \"\"\"\n cyclic = self.cyclic(ti, tc, kn, tg)\n trend = self.trend(ti, tc, kn, tg)\n ws = self.ws\n recent = self.recent(ti, kn, tc[:, -ws:, :], un[:, -ws:, :], tg)\n y = cyclic + trend + recent\n return y\n\n\nclass PositionalEncoding(nn.Module):\n \"\"\"\n c.f. https://pytorch.org/tutorials/beginner/transformer_tutorial.html\n \"\"\"\n\n def __init__(self, d_model, dropout=0.0, max_len=512):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, d_model, 2).float() * (-numpy.log(10000.0) / d_model)\n )\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1) # (W, D) -> (1, W, D) -> (W, 1, D)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x: Tsr) -> Tsr:\n \"\"\"\n Shapes:\n x: (W, B, D)\n B: batch size\n W: window size\n D: dim\n \"\"\"\n x = x + self.pe[: x.size(0), :]\n return self.dropout(x) # (W, B, D)\n\n\nclass Cyclic(ModelBase):\n name = \"cyclic\"\n\n def __init__(\n self,\n dim_ins: tuple,\n dim_out: int,\n ws: int,\n dim_emb=4 * 2, # dim for embedding\n n_heads=4,\n n_layers=1, # layers of multi-heads\n k=5, # the numbers of sin/cos curves\n ):\n super().__init__(\n dim_ins,\n dim_out,\n ws,\n dim_emb,\n n_heads,\n n_layers,\n k,\n ) # after this call, to be enabled to access `self.args`\n\n # linears\n self.ak = nn.Linear(ws * dim_emb, k * self.n_quantiles).double()\n self.bk = nn.Linear(ws * dim_emb, k * self.n_quantiles).double()\n self.wk = nn.Linear(ws * dim_emb, k).double()\n self.ok = nn.Linear(ws * dim_emb, k).double()\n\n # constraint\n self.loss_constraint = 0 # just reserved\n\n # initialize weights\n nn.init.kaiming_normal_(self.ak.weight)\n nn.init.kaiming_normal_(self.bk.weight)\n nn.init.xavier_normal_(self.wk.weight)\n nn.init.xavier_normal_(self.ok.weight)\n\n for fc in [self.ak, self.bk, self.wk, self.ok]:\n nn.init.zeros_(fc.bias)\n\n def make_x(self, ti: Tsr, tc: Tsr, kn: Tsr):\n pi = numpy.pi\n _ti = ti % (2 * pi) / (2 * pi)\n x = torch.cat([_ti, tc, kn], dim=-1) # as input\n return x\n\n def pretrain(self, ti: Tsr, tc: Tsr, kn: Tsr, **params) -> Tsr:\n return self._pretrain(ti, tc, kn)\n\n def _forward(self, ti: Tsr, tc: Tsr, kn: Tsr):\n # embedding\n # - for X\n x = self.make_x(ti, tc, kn) # as input\n emb_encode = self.emb_encode(x) * numpy.sqrt(x.shape[-1])\n emb_encode = emb_encode.transpose(1, 0) # (B, W, Demb) -> (W, B, Demb)\n emb_encode = self.pos(emb_encode)\n\n # transform\n def reshape(tsr: Tsr) -> Tsr:\n _tsr = tsr.transpose(1, 0) # (W, B, Demb) -> (B, W, Demb)\n return _tsr.reshape(-1, self.args.ws * self.args.dim_emb)\n\n z = reshape(self.tr.encoder(emb_encode))\n\n a = self.ak(z)\n b = self.bk(z)\n\n w = self.wk(z)\n o = self.ok(z)\n\n pi = numpy.pi\n o = 2 * pi * torch.sigmoid(o)\n\n return x, a, b, w, o\n\n def forward(self, ti: Tsr, tc: Tsr, kn: Tsr):\n x, a, b, w, o = self._forward(ti, tc, kn)\n\n # adjusting the shape\n k, q = self.args.k, self.n_quantiles\n kq = k * q\n dim_ti = self.args.dim_ins[0]\n _ti = x[:, :, :dim_ti]\n t = _ti[:, -1, :].repeat(1, kq).view(-1, k, q)\n w = w.view(-1, k, 1).repeat(1, 1, q).view(-1, k, q)\n o = o.view(-1, k, 1).repeat(1, 1, q).view(-1, k, q)\n a = a.view(-1, k, q)\n b = b.view(-1, k, q)\n\n # calculate theta (rad)\n th = w * t + o\n y = a * torch.cos(th) + b * torch.sin(th)\n y = y.sum(dim=1) # sum_k\n\n return y\n\n def calc_loss(self, pred_y: Tsr, tg: Tsr, **params) -> Tsr:\n y = tg[:, -1, :]\n loss = loss_quantile(pred_y, y)\n loss += self.calc_loss_train_constraint(**params)\n return loss\n\n def calc_loss_train_constraint(self, **params) -> Tsr:\n if \"batch\" not in params: # possibly in prediction context\n return 0.0\n\n bti_org, __bti, btc, bkn, __btg = params[\"batch\"]\n __x, a, b, w, o = self._forward(bti_org, btc, bkn)\n # l2 = nn.MSELoss()\n l1 = nn.SmoothL1Loss()\n loss_constraint = (\n l1(a[:-1], a[1:])\n + l1(b[:-1], b[1:])\n + l1(w[:-1], w[1:])\n + l1(o[:-1], o[1:])\n )\n return loss_constraint\n\n\nclass Trend(ModelBase):\n name = \"trend\"\n\n def __init__(\n self,\n dim_ins: tuple,\n dim_out: int,\n ws: int,\n dim_emb=3,\n n_heads=3,\n n_layers=1,\n k=5, # the numbers of relu curves\n ):\n super().__init__(\n dim_ins,\n dim_out,\n ws,\n dim_emb,\n n_heads,\n n_layers,\n k,\n ) # after this call, to be enabled to access `self.args`\n\n # linears\n self.ak = nn.Linear(ws * dim_emb, k).double()\n self.bk = nn.Linear(ws * dim_emb, self.n_quantiles).double()\n self.ok = nn.Linear(ws * dim_emb, k).double()\n\n # activation\n self.relu = nn.ReLU()\n\n # constraint\n self.loss_constraint = 0\n\n # initialize weights\n nn.init.xavier_normal_(self.ak.weight)\n nn.init.kaiming_normal_(self.bk.weight)\n nn.init.xavier_normal_(self.ok.weight)\n for fc in [self.ak, self.bk, self.ok]:\n nn.init.zeros_(fc.bias)\n\n def pretrain(self, ti: Tsr, tc: Tsr, kn: Tsr) -> Tsr:\n return self._pretrain(ti, tc, kn)\n\n def _forward(self, ti: Tsr, tc: Tsr, kn: Tsr):\n # embedding\n # - for X\n x = self.make_x(ti, tc, kn) # as input\n emb_encode = self.emb_encode(x) * numpy.sqrt(x.shape[-1])\n emb_encode = emb_encode.transpose(1, 0) # (B, W, Demb) -> (W, B, Demb)\n emb_encode = self.pos(emb_encode)\n\n # transform\n def reshape(tsr: Tsr):\n _tsr = tsr.transpose(1, 0) # (W, B, Demb) -> (B, W, Demb)\n return _tsr.reshape(-1, self.args.dim_emb * self.args.ws)\n\n h = reshape(self.tr.encoder(emb_encode))\n a = self.ak(h)\n b = self.bk(h)\n o = torch.sigmoid(self.ok(h)) # almost ti in (0, 1), by 2200-1-1\n\n return x, a, b, o\n\n def forward(self, ti: Tsr, tc: Tsr, kn: Tsr):\n x, a, b, o = self._forward(ti, tc, kn)\n\n # adjusting the shape\n k, q = self.args.k, self.n_quantiles\n kq = k * q\n dim_ti = self.args.dim_ins[0]\n _ti = x[:, :, :dim_ti]\n t = _ti[:, -1, :].repeat(1, kq).view(-1, k, q) # shape = (B, k, q)\n a = a.view(-1, k, 1).repeat(1, 1, q) # (B, k, q)\n b = b.view(-1, 1, q).repeat(1, k, 1) # (B, k, q)\n o = o.view(-1, k, 1).repeat(1, 1, q) # (B, k, q)\n\n # calculate trend line with t\n y = a * self.relu(t - o) + b\n y = y.sum(dim=1) # ¥sum_k y_{b, k, q}\n return y\n\n def calc_loss(self, pred_y: Tsr, tg: Tsr, **params) -> Tsr:\n idx = pred_y.shape[-1] // 2\n p = pred_y[..., idx].unsqueeze(-1)\n y = tg[:, -1, :]\n assert p.shape == y.shape\n loss = loss_quantile(pred_y, y) + loss_mse(p, y)\n loss += self.calc_loss_train_constraint(**params)\n return loss\n\n def calc_loss_train_constraint(self, **params) -> Tsr:\n if \"batch\" not in params: # possibly in prediction context\n return 0.0\n\n bti_org, __bti, btc, bkn, __btg = params[\"batch\"]\n __x, a, b, o = self._forward(bti_org, btc, bkn)\n # l2 = nn.MSELoss()\n # l1 = nn.L1Loss()\n l1 = nn.SmoothL1Loss()\n n_smooth_steps = 3\n loss_constraint = 0.0\n for idx in range(1, n_smooth_steps + 1):\n loss_constraint += (\n l1(a[:-idx], a[idx:]) + l1(b[:-1], b[1:]) + l1(o[:-1], o[1:])\n )\n return loss_constraint\n","repo_name":"tkosht/forecaster","sub_path":"src/modules/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":15164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11382169334","text":"from __future__ import print_function\nimport numpy as np\n\nfrom openmdao.api import ExplicitComponent\n\nclass VortexMesh(ExplicitComponent):\n \"\"\"\n Compute the vortex mesh based on the deformed aerodynamic mesh.\n\n Parameters\n ----------\n def_mesh[nx, ny, 3] : numpy array\n We have a mesh for each lifting surface in the problem.\n That is, if we have both a wing and a tail surface, we will have both\n `wing_def_mesh` and `tail_def_mesh` as inputs.\n\n Returns\n -------\n vortex_mesh[nx, ny, 3] : numpy array\n The actual aerodynamic mesh used in VLM calculations, where we look\n at the rings of the panels instead of the panels themselves. That is,\n this mesh coincides with the quarter-chord panel line, except for the\n final row, where it lines up with the trailing edge.\n \"\"\"\n\n def initialize(self):\n self.options.declare('surfaces', types=list)\n\n def setup(self):\n surfaces = self.options['surfaces']\n\n # Because the vortex_mesh always comes from the deformed mesh in the\n # same way, the Jacobian is fully linear and can be set here instead\n # of doing compute_partials.\n # We do have to account for symmetry here to create a ghost mesh\n # by mirroring the symmetric mesh.\n for surface in surfaces:\n mesh=surface['mesh']\n nx = mesh.shape[0]\n ny = mesh.shape[1]\n name = surface['name']\n\n mesh_name = '{}_def_mesh'.format(name)\n vortex_mesh_name = '{}_vortex_mesh'.format(name)\n\n self.add_input(mesh_name, shape=(nx, ny, 3), units='m')\n\n if surface['symmetry']:\n self.add_output(vortex_mesh_name, shape=(nx, ny*2-1, 3), units='m')\n\n mesh_indices = np.arange(nx * ny * 3).reshape((nx, ny, 3))\n vor_indices = np.arange(nx * (2*ny-1) * 3).reshape((nx, (2*ny-1), 3))\n\n rows = np.tile(vor_indices[:(nx-1), :ny, :].flatten(), 2)\n rows = np.hstack((rows, vor_indices[-1 , :ny, :].flatten()))\n\n rows = np.hstack((rows, np.tile(vor_indices[:(nx-1), ny:, [0, 2]][:, ::-1, :].flatten(), 2)))\n rows = np.hstack((rows, vor_indices[-1, ny:, [0, 2]].flatten()[::-1]))\n\n rows = np.hstack((rows, np.tile(vor_indices[:(nx-1), ny:, 1][:, ::-1].flatten(), 2)))\n rows = np.hstack((rows, vor_indices[-1, ny:, 1].flatten()))\n\n cols = np.concatenate([\n mesh_indices[:-1, :, :].flatten(),\n mesh_indices[1: , :, :].flatten(),\n mesh_indices[-1 , :, :].flatten(),\n\n mesh_indices[:-1, :-1, [0, 2]].flatten(),\n mesh_indices[1: , :-1, [0, 2]].flatten(),\n mesh_indices[-1 , :-1, [0, 2]][::-1, :].flatten(),\n\n mesh_indices[:-1, :-1, 1].flatten(),\n mesh_indices[1: , :-1, 1].flatten(),\n mesh_indices[-1 , :-1, 1][::-1].flatten(),\n ])\n\n data = np.concatenate([\n 0.75 * np.ones((nx-1) * ny * 3),\n 0.25 * np.ones((nx-1) * ny * 3),\n np.ones(ny * 3), # back row\n\n 0.75 * np.ones((nx-1) * (ny-1) * 2),\n 0.25 * np.ones((nx-1) * (ny-1) * 2),\n np.ones((ny-1) * 2), # back row\n\n -0.75 * np.ones((nx-1) * (ny-1)),\n -.25 * np.ones((nx-1) * (ny-1)),\n -np.ones((ny-1)), # back row\n ])\n\n self.declare_partials(vortex_mesh_name, mesh_name, val=data, rows=rows, cols=cols)\n\n else:\n self.add_output(vortex_mesh_name, shape=(nx, ny, 3), units='m')\n\n mesh_indices = np.arange(nx * ny * 3).reshape(\n (nx, ny, 3))\n\n rows = np.tile(mesh_indices[:(nx-1), :, :].flatten(), 2)\n rows = np.hstack((rows, mesh_indices[-1 , :, :].flatten()))\n cols = np.concatenate([\n mesh_indices[:-1, :, :].flatten(),\n mesh_indices[1: , :, :].flatten(),\n mesh_indices[-1 , :, :].flatten(),\n ])\n\n data = np.concatenate([\n 0.75 * np.ones((nx-1) * ny * 3),\n 0.25 * np.ones((nx-1) * ny * 3),\n np.ones(ny * 3), # back row\n ])\n\n self.declare_partials(vortex_mesh_name, mesh_name, val=data, rows=rows, cols=cols)\n\n def compute(self, inputs, outputs):\n surfaces = self.options['surfaces']\n\n for surface in surfaces:\n nx = surface['mesh'].shape[0]\n ny = surface['mesh'].shape[1]\n name = surface['name']\n\n mesh_name = '{}_def_mesh'.format(name)\n vortex_mesh_name = '{}_vortex_mesh'.format(name)\n\n if surface['symmetry']:\n mesh = np.zeros((nx, ny*2-1, 3), dtype=type(inputs[mesh_name][0, 0, 0]))\n mesh[:, :ny, :] = inputs[mesh_name]\n mesh[:, ny:, :] = inputs[mesh_name][:, :-1, :][:, ::-1, :]\n mesh[:, ny:, 1] *= -1.\n else:\n mesh = inputs[mesh_name]\n\n outputs[vortex_mesh_name][:-1, :, :] = 0.75 * mesh[:-1, :, :] + 0.25 * mesh[1:, :, :]\n outputs[vortex_mesh_name][-1, :, :] = mesh[-1, :, :]\n","repo_name":"mid2SUPAERO/ecoHALE","sub_path":"openaerostruct/aerodynamics/vortex_mesh.py","file_name":"vortex_mesh.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"19996356259","text":"wincon = False\ngameBoard = [[0,0,0],[0,0,0],[0,0,0]]\nplayer = True\nwhile not wincon:\n print(' 1 2 3 ')\n count = 0\n for count, row in enumerate(gameBoard): #enumerate returns (int, value)\n print(count+1,row)\n\n print(f\"Player {'1' if player else '2'}, enter your move by entering row number, followed by enter key, then column number:\")\n selectedRow = int(input(\"Row:\"))\n selectedColumn = int(input(\"Column:\"))\n gameBoard[selectedRow-1][selectedColumn-1] = f'{\"x\" if player else \"o\"}'\n player = not player\n wincon = checkWincon(gameBoard)\n\ndef checkWincon(gameState):\n for count in enumerate(gameState):\n if gameState[count][0] == gameState[count][1] & gameState[count][1] == gameState[count][2]:\n return True\n else:\n return False\n","repo_name":"kylebiesecker/python-club","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17601968507","text":"import numpy as np\nimport scipy.optimize\nfrom scipy.integrate import cumulative_trapezoid\n\n\ndef find_first_value_location_in_series(frq_timeseries, frq_desired):\n if frq_desired < np.min(frq_timeseries):\n raise Exception(\"Desired frequency out of bounds, lower than min frequency\")\n\n if frq_desired > np.max(frq_timeseries):\n raise Exception(\"Desired frequency out of bounds, higher than max frequency\")\n \"\"\" \n We reverse the array and traverse it to find the location where the i_th value is more than\n the desired value while the i+1_th value is less, hence locating the desired value somewhere\n between those two points. We then choose the value closer to the value desired (among i and i+1) \n and call it the location of the desired value. \n \"\"\"\n\n for idx, f_value in enumerate(frq_timeseries):\n if idx != len(frq_timeseries) - 1:\n if (\n frq_timeseries[idx] <= frq_desired\n and frq_timeseries[idx + 1] >= frq_desired\n ):\n fr1 = frq_timeseries[idx]\n fr2 = frq_timeseries[idx + 1]\n\n if abs(frq_desired - fr1) <= abs(frq_desired - fr2):\n final_idx = idx\n else:\n final_idx = idx + 1\n break\n return final_idx\n\n\ndef find_last_value_location_in_series(frq_timeseries, frq_desired):\n if frq_desired < np.min(frq_timeseries):\n raise Exception(\"Desired frequency out of bounds, lower than min frequency\")\n\n if frq_desired > np.max(frq_timeseries):\n raise Exception(\"Desired frequency out of bounds, higher than max frequency\")\n \"\"\" \n We reverse the array and traverse it to find the location where the i_th value is more than\n the desired value while the i+1_th value is less, hence locating the desired value somewhere\n between those two points. We then choose the value closer to the value desired (among i and i+1) \n and call it the location of the desired value. \n \"\"\"\n\n reversed_freq_timeseries = frq_timeseries[::-1]\n final_idx = len(reversed_freq_timeseries) - 1\n\n for idx, f_value in enumerate(reversed_freq_timeseries):\n if idx != len(reversed_freq_timeseries) - 1:\n if (\n reversed_freq_timeseries[idx] >= frq_desired\n and reversed_freq_timeseries[idx + 1] <= frq_desired\n ):\n fr1 = reversed_freq_timeseries[idx]\n fr2 = reversed_freq_timeseries[idx + 1]\n\n if abs(frq_desired - fr1) <= abs(frq_desired - fr2):\n final_idx = idx\n else:\n final_idx = idx + 1\n break\n return len(frq_timeseries) - 1 - final_idx\n\n\ndef mismatch_discrete(w1, w2, sample_indices_insp, sample_indices_mr):\n w1_d = w1[sample_indices_insp]\n w2_d = w2[sample_indices_mr] # can't give the same comb to w2\n w2sq = np.square(np.abs(w2_d))\n # w1sq = np.square(np.abs(w2_d)) # another normalising factor can be (w1sq + w2sq) / 2\n diff = np.abs(w1_d - w2_d)\n diffsq = np.square(diff)\n mm = 0.5 * (np.sum(diffsq) / np.sum(w2sq))\n return mm\n\n\ndef align_in_phase(\n inspiral,\n merger_ringdown,\n sample_indices_insp,\n sample_indices_mr,\n t1_index_insp,\n t2_index_insp,\n t1_index_mr,\n t2_index_mr,\n m_mode=2,\n):\n # Function alignes the two waveforms using the phase, optimised over the attachment region\n # m from l,m mode\n def optfn_ph(phaseshift_correction):\n phase_corrected_insp = inspiral * np.exp(1j * m_mode * phaseshift_correction)\n m_d = mismatch_discrete(\n phase_corrected_insp[t1_index_insp : t2_index_insp + 1],\n merger_ringdown[t1_index_mr : t2_index_mr + 1],\n sample_indices_insp,\n sample_indices_mr,\n )\n return m_d\n\n phase_optimizer = scipy.optimize.minimize(optfn_ph, 0)\n phaseshift_required_for_alignment = phase_optimizer.x\n\n inspiral_aligned = inspiral * np.exp(\n 1j * m_mode * phaseshift_required_for_alignment\n )\n\n return inspiral_aligned, phaseshift_required_for_alignment\n\n\ndef blend_series(x1, x2, t1_index_insp, t2_index_insp, t1_index_mr, t2_index_mr):\n assert (\n t1_index_mr - t2_index_mr == t1_index_insp - t2_index_insp\n ), \"Inconsistent indices passed to blending function\"\n\n # blending fn is an array\n blfn_var = np.arange(t1_index_insp, t2_index_insp)\n tau = np.square(\n (\n np.sin(\n (np.pi / 2)\n * (blfn_var - t1_index_insp)\n / (t2_index_insp - t1_index_insp)\n )\n )\n )\n\n x_hyb = (1 - tau) * x1[t1_index_insp:t2_index_insp] + tau * x2[\n t1_index_mr:t2_index_mr\n ]\n return x_hyb\n\n\ndef compute_amplitude(waveform):\n amplitude = np.abs(waveform)\n return amplitude\n\n\ndef compute_phase(waveform):\n phase = np.unwrap(-np.angle(waveform))\n return phase\n\n\ndef compute_frequency(phase, delta_t):\n frequency = np.gradient(phase, delta_t) / (2 * np.pi)\n return frequency\n\n\ndef hybridize_modes(\n inspiral_modes,\n merger_ringdown_modes,\n frq_attach,\n frq_width=10.0,\n delta_t=1.0 / 4096,\n no_sp=8,\n modes_to_hybridize=[(2, 2), (3, 3), (4, 4)],\n mode_to_align_by=(2, 2),\n include_conjugate_modes=True,\n verbose=True,\n):\n \"\"\"Hybridize inspiral and merger-ringdown modes\n\n Inputs\n ------\n inspiral_modes: dict\n Dictionary indexed by (l, m) containing numpy-like arrays of\n complex-valued mode timeseries.\n merger_ringdown_modes: dict\n Dictionary indexed by (l, m) containing numpy-like arrays of\n complex-valued mode timeseries.\n\n frq_attach: float\n Frequency (Hz) at which to align the inspiral and merger-ringdown modes.\n frq_width: {10.0, float}\n Frequency (Hz) window around the central attachment frequency over which\n hybridization of modes is performed.\n delta_t: {1/4096, float}\n Sample rate for timeseries (Hz)\n np_sp: {4, int}\n\n modes_to_hybridize: {[(2, 2), (3, 3), (4, 4), (5, 5)], list}\n List of modes as tuples of (l, m) values to hybridize\n mode_to_align_by: {(2, 2), tuple}\n One specific mode (l, m) value that is to be treated as baseline for\n time/phase alignment. We recommend using only the (2, 2) mode for this.\n include_conjugate_modes: {True, bool}\n When set to True, we also consider (l, -m) modes in addition to (l, m) ones.\n verbose: {True, bool}\n Set this to True to enable logging output.\n \"\"\"\n modes_not_aligned_by = modes_to_hybridize.copy()\n if include_conjugate_modes:\n for el, em in modes_to_hybridize.copy():\n modes_to_hybridize.append((el, -em))\n for el, em in modes_not_aligned_by.copy():\n modes_not_aligned_by.append((el, -em))\n modes_not_aligned_by.remove(mode_to_align_by)\n\n # Input checks\n for lm in modes_to_hybridize + [mode_to_align_by]:\n if lm not in inspiral_modes or lm not in merger_ringdown_modes:\n raise IOError(\n \"We cannot hybridize {} mode as its missing in the input inspiral modes\"\n \" ({}) or the merger ringdown modes ({})\".format(\n lm, lm in inspiral_modes, lm in merger_ringdown_modes\n )\n )\n if verbose:\n print(\"Hybridizing the following modes: {}\".format(modes_to_hybridize))\n print(\"By aligning {} mode\".format(mode_to_align_by))\n print(\n \"..and inheriting the phase/time shifts for alignment of {} modes\".format(\n modes_not_aligned_by\n )\n )\n\n # Get amplitude and phase for all modes\n phase_insp = {}\n frq_insp = {}\n phase_mr = {}\n frq_mr = {}\n amp_mr = {}\n\n for el, em in modes_to_hybridize:\n phase_insp[(el, em)] = compute_phase(inspiral_modes[(el, em)])\n frq_insp[(el, em)] = compute_frequency(phase_insp[(el, em)], delta_t)\n\n phase_mr[(el, em)] = compute_phase(merger_ringdown_modes[(el, em)])\n frq_mr[(el, em)] = compute_frequency(phase_mr[(el, em)], delta_t)\n amp_mr[(el, em)] = compute_amplitude(merger_ringdown_modes[(el, em)])\n\n if verbose:\n print(\n f\"INSPIRAL mode ({el}, {em}) goes from {frq_insp[(el, em)][0]}Hz to\"\n f\" {frq_insp[(el, em)][-1]}Hz\"\n )\n print(\n f\"MERGER mode ({el}, {em}) goes from {frq_mr[(el, em)][0]}Hz to\"\n f\" {frq_mr[(el, em)][-1]}Hz\"\n )\n\n \"\"\" first we need to find the attachment region, based on the frequency \"\"\"\n\n \"\"\" \n We search left to right in merger-ringdown to avoid frequency fluctuations \n after the merger, and right to left in inspiral to avoid frequency degeneracy\n caused by eccentricity \n \"\"\"\n el, em = mode_to_align_by\n\n t1_index_mr = find_first_value_location_in_series(\n frq_mr[(el, em)], frq_attach - frq_width / 2\n )\n\n t2_index_mr = find_first_value_location_in_series(\n frq_mr[(el, em)], frq_attach + frq_width / 2\n )\n \"\"\" \n For eccentric inspiral, there will be multiple instances of the \n same frequency. Pick the one having the highest index value (i.e. \n the one at the rightmost occurance in time) \n\n \"\"\"\n t2_index_insp = find_last_value_location_in_series(\n frq_insp[(el, em)], frq_attach + frq_width / 2\n )\n\n # another way to define t2_index_mr is through number of points in the inspiral window\n t1_index_insp = t2_index_insp - (t2_index_mr - t1_index_mr)\n \"\"\" \n Theoretically, we NEED a timeshift to align the waveforms in frequency. \n Instead of shifting one of the two waveforms for alignment, we are defining\n the time such that the frequencies are pre-aligned to the best of the \n discrete interval errors. That is: \n deltaT (timeshift) = t1_index_insp - t1_index_mr\n The mathematical way is to optimise the difference in frequencies over the matching \n region and using that to determine deltaT, hence arriving at t1_index_mr. \n \"\"\"\n\n sample_indices_insp = (\n np.linspace(t1_index_insp, t2_index_insp, no_sp).astype(int) - t1_index_insp\n )\n sample_indices_mr = (\n sample_indices_insp # since the attachment region in both has the same length\n )\n \"\"\" alignment using corrective phase addition \"\"\"\n\n inspiral_modes_aligned = {}\n amp_insp_aligned = {}\n phase_insp_aligned = {}\n frq_insp_aligned = {}\n\n inspiral_modes_aligned[(el, em)], phase_correction = align_in_phase(\n inspiral_modes[(el, em)],\n merger_ringdown_modes[(el, em)],\n sample_indices_insp,\n sample_indices_mr,\n t1_index_insp,\n t2_index_insp,\n t1_index_mr,\n t2_index_mr,\n )\n\n amp_insp_aligned[(el, em)] = compute_amplitude(inspiral_modes_aligned[(el, em)])\n phase_insp_aligned[(el, em)] = compute_phase(inspiral_modes_aligned[(el, em)])\n phph = compute_phase(inspiral_modes_aligned[(el, em)])\n\n for el, em in modes_not_aligned_by:\n inspiral_modes_aligned[(el, em)] = inspiral_modes[(el, em)] * np.exp(\n 1j * em * phase_correction\n )\n amp_insp_aligned[(el, em)] = compute_amplitude(inspiral_modes_aligned[(el, em)])\n phase_insp_aligned[(el, em)] = compute_phase(inspiral_modes_aligned[(el, em)])\n\n \"\"\"\n It would be same as frq_mr as the corrected phase factor will be canceled in the derivative, \n defining frq_insp_aligned just for consistency \n \"\"\"\n frq_insp_aligned = frq_insp\n\n \"\"\" Performing attachment using the blending function \"\"\"\n\n amp_hyb_window = {}\n amp_hyb_full = {}\n frq_hyb_window = {}\n phase_hyb_window = {}\n phase_hyb_full = {}\n hybrid_modes = {}\n\n for el, em in modes_to_hybridize:\n amp_hyb_window[(el, em)] = blend_series(\n amp_insp_aligned[(el, em)],\n amp_mr[(el, em)],\n t1_index_insp,\n t2_index_insp,\n t1_index_mr,\n t2_index_mr,\n )\n frq_hyb_window[(el, em)] = blend_series(\n frq_insp_aligned[(el, em)],\n frq_mr[(el, em)],\n t1_index_insp,\n t2_index_insp,\n t1_index_mr,\n t2_index_mr,\n )\n \"\"\" Integrating frq_hyb to obtain phase_hyb and removing discontinuities, \n compiling amp_hyb and phase_hyb to obtain the hybrid waveform. \"\"\"\n\n phase_hyb_window[(el, em)] = (2 * np.pi) * cumulative_trapezoid(\n frq_hyb_window[(el, em)], dx=delta_t, initial=0\n )\n\n \"\"\" Right now the phase is integrated only inside the hybrid window, \n need to add constants to preserve phase continuity and compile full IMR phase \"\"\"\n\n def remove_phase_discontinuity(phase_insp_aligned, phase_hyb_window, phase_mr):\n delta1 = phase_insp_aligned[t1_index_insp] - phase_hyb_window[0]\n phase_hyb_1 = np.append(\n phase_insp_aligned[:t1_index_insp], phase_hyb_window + delta1\n )\n delta2 = phase_hyb_1[t2_index_insp - 1] - phase_mr[t2_index_mr - 1]\n phase_hyb_2 = np.append(\n phase_hyb_1[: t2_index_insp - 1], phase_mr[t2_index_mr - 1 :] + delta2\n )\n return phase_hyb_2\n\n for el, em in modes_to_hybridize:\n phase_hyb_full[(el, em)] = remove_phase_discontinuity(\n phase_insp_aligned[(el, em)], phase_hyb_window[(el, em)], phase_mr[(el, em)]\n )\n\n amp_hyb_full[(el, em)] = np.append(\n np.concatenate(\n [amp_insp_aligned[(el, em)][:t1_index_insp], amp_hyb_window[(el, em)]]\n )[: t2_index_insp - 1],\n amp_mr[(el, em)][t2_index_mr - 1 :],\n )\n\n hybrid_modes[(el, em)] = amp_hyb_full[(el, em)] * np.exp(\n -1j * phase_hyb_full[(el, em)]\n )\n\n return (\n hybrid_modes,\n t1_index_insp,\n t1_index_mr,\n t2_index_insp,\n t2_index_mr,\n frq_insp,\n frq_mr,\n frq_insp_aligned,\n frq_hyb_window,\n inspiral_modes_aligned,\n sample_indices_insp,\n sample_indices_mr,\n amp_insp_aligned,\n amp_hyb_window,\n amp_hyb_full,\n phase_insp,\n phase_insp_aligned,\n phase_hyb_window,\n phase_hyb_full,\n phase_correction,\n phph,\n )\n","repo_name":"gwnrtools/gwnrtools","sub_path":"gwnr/waveform/hybridize.py","file_name":"hybridize.py","file_ext":"py","file_size_in_byte":14452,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"72641648876","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nimport os\n\nprint(os.listdir(\"./\"))\n\n\n\n# Any results you write to the current directory are saved as output.\nclass ImageDataGenerator_self(object):\n\n def __init__(self):\n\n self.reset()\n\n\n\n def reset(self):\n\n self.images = []\n\n self.labels = []\n\n\n\n def flow_from_directory(self, directory, classes, batch_size=32):\n\n # LabelEncode(classをint型に変換)するためのdict\n\n classes = {v: i for i, v in enumerate(sorted(classes))}\n\n while True:\n\n # ディレクトリから画像のパスを取り出す\n\n for path in pathlib.Path(directory).iterdir():\n\n # 画像を読み込みRGBへの変換、Numpyへの変換を行い、配列(self.iamges)に格納\n\n with Image.open(path) as f:\n\n self.images.append(np.asarray(f.convert('RGB'), dtype=np.float32))\n\n # ファイル名からラベルを取り出し、配列(self.labels)に格納\n\n _, y = path.stem.split('_')\n\n self.labels.append(to_categorical(classes[y], len(classes)))\n\n\n\n # ここまでを繰り返し行い、batch_sizeの数だけ配列(self.iamges, self.labels)に格納\n\n # batch_sizeの数だけ格納されたら、戻り値として返し、配列(self.iamges, self.labels)を空にする\n\n if len(self.images) == batch_size:\n\n inputs = np.asarray(self.images, dtype=np.float32)\n\n targets = np.asarray(self.labels, dtype=np.float32)\n\n self.reset()\n\n yield inputs, targets\n\n\n\n def flow_from_dir2(self, data_dir, data_list, label_train, classes, batch_size=32):\n\n label_train = pd.read_csv(label_csv,index_col=0)\n\n while True:\n\n for img_path in data_list:\n\n img = cv2.imread(data_dir + \"/\" + img_path)\n\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)\n\n #img = img.transpose((2, 0, 1))\n\n #img = img.reshape((1,) + img.shape)\n\n self.images.append(img)\n\n\n\n # ファイル名からラベルを取り出し、配列(self.labels)に格納\n\n #_, y = path.stem.split('_')\n\n y = label_train.loc[img_path,\"has_cactus\"]\n\n\n\n self.labels.append(keras.utils.to_categorical(classes[y], len(classes)))\n\n\n\n # ここまでを繰り返し行い、batch_sizeの数だけ配列(self.iamges, self.labels)に格納\n\n # batch_sizeの数だけ格納されたら、戻り値として返し、配列(self.iamges, self.labels)を空にする\n\n if len(self.images) == batch_size:\n\n inputs = np.asarray(self.images, dtype=np.float32)\n\n targets = np.asarray(self.labels, dtype=np.float32)\n\n self.reset()\n\n yield inputs, targets\nimport csv,os\n\nimport numpy as np\n\nimport pandas as pd\n\nimport keras\n\ntrain_order = os.listdir(\"../input/train/train\")\n\npd_train_order = pd.DataFrame(train_order,columns = [\"id\"])\n\nlabel_train = pd.read_csv(\"../input/train.csv\")\n\ndf_train_label = pd.merge(pd_train_order, label_train)\nimport random\n\ndata_dir=\"../input/train/train\"\n\ndata_list=os.listdir(data_dir)\n\nlabel_csv=\"../input/train.csv\"\n\nclasses=[\"0\",\"1\"]\n\nbatch_size = 10\n\nval_ratio = 0.1\n\n\n\ntrain_list = []\n\nval_list = []\n\n\n\nfor data in data_list:\n\n if random.random() > val_ratio:\n\n train_list.append(data)\n\n else:\n\n val_list.append(data)\n\nprint(len(train_list))\n\nprint(len(val_list))\n\n\n\ntrain_datagen=ImageDataGenerator_self()\n\ntest_datagen=ImageDataGenerator_self()\n\nfrom matplotlib import pyplot as plt\n\nfrom keras.applications.vgg16 import VGG16\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom keras.models import Sequential, Model\n\nfrom keras.layers import Input, Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom keras import optimizers\n\nfrom keras.utils import plot_model\n\nfrom keras.callbacks import ModelCheckpoint\n\nimport cv2\n\n\n\nimg_height,img_width,img_channel = cv2.imread(data_dir + \"/\" + data_list[0]).shape\n\nbase_model = VGG16(include_top=False, weights=None, input_tensor=None, input_shape=(img_width,img_height,img_channel))\n\n\n\nn_categories = 2\n\n\n\nx=base_model.output\n\nx=GlobalAveragePooling2D()(x)\n\nx=Dense(1024,activation='relu')(x)\n\nprediction=Dense(n_categories,activation='softmax')(x)\n\nmodel=Model(inputs=base_model.input,outputs=prediction)\n\n\n\nmodel.compile(optimizer=optimizers.SGD(lr=0.0001,momentum=0.9),\n\n loss='categorical_crossentropy',\n\n metrics=['accuracy'])\n\n\n\n#model.summary()\nos.mkdir(\"/output\")\n\nfpath = '/output/weights.{epoch:03d}-{loss:.2f}-{acc:.2f}-{val_loss:.2f}-{val_acc:.2f}.hdf5'\n\nmodelCheckpoint = ModelCheckpoint(filepath = fpath,\n\n monitor='loss',\n\n verbose=1,\n\n save_best_only=True,\n\n save_weights_only=False,\n\n mode='min',\n\n period=1)\n\n\n\nmodel.fit_generator(\n\n generator=train_datagen.flow_from_dir2(data_dir=data_dir, data_list=train_list, label_train=label_train, classes=classes,batch_size=batch_size),\n\n #generator=train_datagen.flow_from_dir2(data_dir, data_list, label_csv,classes,batchsize),\n\n steps_per_epoch=int(len(data_list) / batch_size),\n\n epochs=100,\n\n verbose=2,\n\n validation_data=test_datagen.flow_from_dir2(data_dir=data_dir, data_list=val_list, label_train=label_train, classes=classes,batch_size=batch_size),\n\n validation_steps=int(len(val_list) / batch_size),\n\n callbacks=[modelCheckpoint]\n\n )\nmodel_json_str = model.to_json()\n\nopen(\"/output/vgg16.json\",\"w\").write(model_json_str)\nimport pathlib\n\nfrom keras.models import model_from_json\n\nweight_file = os.listdir(\"/output\")\n\nlatest_time = 0.0\n\nfor weight in weight_file:\n\n if \".hdf5\" in weight:\n\n st = pathlib.Path(\"/output/\" + weight).stat()\n\n if latest_time < st.st_mtime:\n\n latest_time = st.st_mtime\n\n latest_weight = weight\n\nprint(latest_weight)\n\nprint(latest_time)\n\n\n\nmodel = model_from_json(open(\"/output/\" + \"/vgg16.json\").read())\n\nmodel.load_weights(\"/output/\" + latest_weight)\n\nmodel.summary()\noutput = [[\"id\",\"has_cactus\"]]\n\npred_list = os.listdir(\"../input/test/test\")\n\nfor pred_img in pred_list:\n\n img = cv2.cvtColor(cv2.imread(\"../input/test/test/\" + pred_img), cv2.COLOR_BGR2RGB).astype(np.float32)\n\n img = img.reshape((1,) + img.shape)\n\n result = model.predict(img)\n\n output.append([os.path.basename(pred_img),result[0][1]])\n\n\n\nwith open('/output/pred_result.csv', 'w') as f:\n\n writer = csv.writer(f)\n\n writer.writerows(output)\nsub = pd.read_csv('/output/pred_result.csv')\n\nsub.to_csv(\"submission.csv\",index=False)\nout = os.listdir(\"/\")\n\nprint(out)","repo_name":"aorursy/new-nb-7","sub_path":"shotaho_kernel4265908402.py","file_name":"shotaho_kernel4265908402.py","file_ext":"py","file_size_in_byte":7540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"7548922104","text":"import os\n\nimport numpy as np\nimport tensorrt as trt\n\n\nTRT_LOGGER = trt.Logger()\n\n# This script converts an ONNX export to TensorRT, which lets you use the DLNA accelerator cores on your device\n# This has to be run on the device that the inference will occur on to get the best actual performance\n\nonnx_file_path = \"/home/robot/yolov5s.onnx\"\nengine_file_path = \"/home/robot/yolov5s.trt\"\n\nwith trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:\n builder.max_workspace_size = 1 << 30 # 1GB\n builder.max_batch_size = 1\n\n # Parse model file\n if not os.path.exists(onnx_file_path):\n print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))\n exit(0)\n\n print('Loading ONNX file from path {}...'.format(onnx_file_path))\n with open(onnx_file_path, 'rb') as model:\n print('Beginning ONNX file parsing')\n parser.parse(model.read())\n print('Completed parsing of ONNX file')\n print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))\n engine = builder.build_cuda_engine(network)\n \n print(\"Completed creating Engine\")\n with open(engine_file_path, \"wb\") as f:\n f.write(engine.serialize())","repo_name":"GoodDogAI/bumble","sub_path":"onnx_to_trt.py","file_name":"onnx_to_trt.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72641809196","text":"import os\n\nimport numpy as np\n\nimport pandas as pd\n\nfrom sklearn import preprocessing\n\nimport xgboost as xgb\n\nimport lightgbm as lgb\n\nimport optuna\n\nimport functools\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import roc_curve,auc,accuracy_score,confusion_matrix,f1_score\ntrain_transaction = pd.read_csv('../input/train_transaction.csv', index_col='TransactionID')\n\ntest_transaction = pd.read_csv('../input/test_transaction.csv', index_col='TransactionID')\n\n\n\ntrain_identity = pd.read_csv('../input/train_identity.csv', index_col='TransactionID')\n\ntest_identity = pd.read_csv('../input/test_identity.csv', index_col='TransactionID')\n\n\n\nsample_submission = pd.read_csv('../input/sample_submission.csv', index_col='TransactionID')\n\n\n\ntrain = train_transaction.merge(train_identity, how='left', left_index=True, right_index=True)\n\ntest = test_transaction.merge(test_identity, how='left', left_index=True, right_index=True)\n\n\n\nprint(train.shape)\n\nprint(test.shape)\n\n\n\ny_train = train['isFraud'].copy()\n\ndel train_transaction, train_identity, test_transaction, test_identity\n\n\n\n# Drop target, fill in NaNs\n\nX_train = train.drop('isFraud', axis=1)\n\nX_test = test.copy()\n\n\n\ndel train, test\n\n\n\nX_train = X_train.fillna(-999)\n\nX_test = X_test.fillna(-999)\n\n\n\n# Label Encoding\n\nfor f in X_train.columns:\n\n if X_train[f].dtype=='object' or X_test[f].dtype=='object': \n\n lbl = preprocessing.LabelEncoder()\n\n lbl.fit(list(X_train[f].values) + list(X_test[f].values))\n\n X_train[f] = lbl.transform(list(X_train[f].values))\n\n X_test[f] = lbl.transform(list(X_test[f].values)) \n(X_train,X_eval,y_train,y_eval) = train_test_split(X_train,y_train,test_size=0.2,random_state=0)\ndef opt(X_train, y_train, X_test, y_test, trial):\n\n #param_list\n\n n_estimators = trial.suggest_int('n_estimators', 0, 1000)\n\n max_depth = trial.suggest_int('max_depth', 1, 20)\n\n min_child_weight = trial.suggest_int('min_child_weight', 1, 20)\n\n #learning_rate = trial.suggest_discrete_uniform('learning_rate', 0.01, 0.1, 0.01)\n\n scale_pos_weight = trial.suggest_int('scale_pos_weight', 1, 100)\n\n subsample = trial.suggest_discrete_uniform('subsample', 0.5, 0.9, 0.1)\n\n colsample_bytree = trial.suggest_discrete_uniform('colsample_bytree', 0.5, 0.9, 0.1)\n\n\n\n xgboost_tuna = xgb.XGBClassifier(\n\n random_state=42, \n\n tree_method='gpu_hist',\n\n n_estimators = n_estimators,\n\n max_depth = max_depth,\n\n min_child_weight = min_child_weight,\n\n #learning_rate = learning_rate,\n\n scale_pos_weight = scale_pos_weight,\n\n subsample = subsample,\n\n colsample_bytree = colsample_bytree,\n\n )\n\n xgboost_tuna.fit(X_train, y_train)\n\n tuna_pred_test = xgboost_tuna.predict(X_test)\n\n \n\n return (1.0 - (accuracy_score(y_test, tuna_pred_test)))\nstudy = optuna.create_study()\n\nstudy.optimize(functools.partial(opt, X_train, y_train, X_eval, y_eval), n_trials=100)\nstudy.best_params\nclf = xgb.XGBClassifier(tree_method='gpu_hist',**study.best_params)\n\nclf.fit(X_train, y_train)\nsample_submission['isFraud'] = clf.predict_proba(X_test)[:,1]\n\nsample_submission.to_csv('submission.csv')","repo_name":"aorursy/new-nb-7","sub_path":"snakayama_xgboost-using-optuna.py","file_name":"snakayama_xgboost-using-optuna.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"25408636332","text":"\"\"\"Scrapes and summarizes news articles from the New York Times.\"\"\"\n\nfrom newspaper import Article\nimport nltk\n\n\ndef summarize_article(url):\n \"\"\"Summarizes the article and provides valuable information regarding the article metadata, including images and attributions.\"\"\"\n article = Article(url)\n\n # download article and parse\n article.download()\n article.parse()\n\n # download sentence tokenizer to extract/detect words\n nltk.download(\"punkt\")\n\n # allow for natural language processing\n article.nlp()\n\n # get the author(s) of the article\n author_string = \"Author(s): \"\n for author in article.authors:\n author_string += (\n author # adds all authors (if more than 1) to the author string.\n )\n print(author_string)\n\n # get the publishing date of the article\n date = article.publish_date\n print(\"Publish Date: \" + str(date.strftime(\"%m/%d/%Y\")))\n\n # get the top image of the article\n print(\"Top Image URL: \" + str(article.top_image))\n\n # get the article images\n image_string = \"All Images: \"\n for image in article.images:\n image_string += (\n \"\\n\\t\" + image\n ) # adds a newline and a tab before each image is printed\n print(image_string)\n print()\n\n # get the article summary\n print(\"A Quick Article Summary\")\n print(\"----------------------------------------\")\n print(article.summary)\n\n return article.summary\n","repo_name":"minnafeng/media-bias-detector","sub_path":"src/news_scrape.py","file_name":"news_scrape.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73566106157","text":"# -*- coding: UTF8 -*-\n\nimport newrelic.agent\n\nfrom api_helios.base import AbstractHeliosEndpoint\n\nfrom silo_geodata.api import render_place\nfrom silo_geodata.geography.db import Place\n\nfrom sys_base.api import parse_search_string\n\n\nclass GeolocateByName(AbstractHeliosEndpoint):\n\n @newrelic.agent.function_trace()\n def post(self, request, **kwargs):\n \"\"\"\n Finds up to 'max_results' places matching 'search_name'.\n\n ** This endpoint is backed by a high performance datastore and can typically run queries in\n ** under 10ms. It's designed to be called multiple times in rapid succession for implementing\n ** autocomplete functionality.\n\n :param request: HTTP request containing\n\n .. code-block:: javascript\n {\n \"search_str\": \"San Fr\", // String to match against. Must be in UTF-8.\n \"max_results\": \"20\" // Max number of results to return\n }\n\n :return: JSON object\n\n .. code-block:: javascript\n {\n \"data\": {\n\n \"viewer_hid\": \"b3665ea5\",\n\n \"results\": [\n\n {\n \"precision\": \"place\",\n \"geo_id\": 5128581,\n \"name\": \"San Francisco, California, San Francisco County\",\n \"latitude\": \"37.77493\",\n \"longitude\": \"-122.41942\",\n \"country_name\": \"United States\"\n },\n {\n \"precision\": \"place\",\n \"geo_id\": 3493146,\n \"name\": \"San Francisco de Macoris, Provincia Duarte\",\n \"latitude\": \"19.30099\",\n \"longitude\": \"-70.25259\",\n \"country_name\": \"Dominican Republic\"\n }\n ]\n },\n \"success\": true\n }\n \"\"\"\n\n # We restrict this endpoint to logged-in users to prevent people using us as a free geocoding service\n target_hid = request.POST.get('target_hid')\n\n search_str = request.POST.get('search_str')\n max_results = request.POST.get('max_results', 10)\n\n upstream = self.get_upstream_for_user(request, target_hid=target_hid)\n\n if not self.viewer_logged_in(upstream):\n return self.render_error(request, code='login_required', status=401)\n\n elif not upstream['target_user']:\n return self.render_error(request, code='nonexistent_user', status=400)\n\n if not (upstream['root_active'] or upstream['target_is_viewer']):\n return self.render_error(request, code='access_denied', status=403)\n\n # Sanitize search_name\n # =========================================================================\n\n tokens = parse_search_string(search_str)\n\n if len(tokens) < 1:\n return self.render_error(request, code='search_str_empty', status=400)\n\n # Sanitize max_results\n # =========================================================================\n\n try:\n max_results = int(max_results)\n\n except:\n return self.render_error(request, code='max_results_not_int', status=400)\n\n if (max_results < 1) or (max_results > 1000):\n return self.render_error(request, code='max_results_exceeds_bounds', status=400)\n\n filters = {\n\n 'fulltext_chain__ft_phrase_startswith': tokens\n }\n\n pre = [\n\n 'country',\n 'admin1',\n 'admin2',\n 'admin3',\n 'admin4'\n ]\n\n places = list(Place.objects.filter(**filters).prefetch_related(*pre).order_by('-population')[0:max_results])\n\n # Return JSON response\n # =======================================================================\n\n return self.render_response(\n\n request=request,\n\n data={\n\n 'viewer_hid': None,\n 'results': [render_place(place) for place in places]\n }\n )\n","repo_name":"kairathmann/dating","sub_path":"helios/api_helios/geo/by_name.py","file_name":"by_name.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"43084513280","text":"from django.core.urlresolvers import reverse, resolve\nfrom django.test import TestCase, Client\nfrom timeline.models import Event\n\n\nclass HomeTestCase(TestCase):\n '''\n Tests for the Home page.\n '''\n\n def setUp(self):\n self.url = reverse('home')\n self.response = self.client.get(self.url)\n\n def test_url(self):\n '''\n Ensure uses correct url.\n '''\n self.assertEqual(self.url, '/')\n\n def test_loads(self):\n '''\n Ensure loads OK.\n '''\n self.assertEqual(self.response.status_code, 200)\n\n def test_renders_strings(self):\n '''\n Ensures strings are present in response.\n '''\n fragments = [\n '',\n '',\n '
',\n '
',\n '
',\n '