diff --git "a/676.jsonl" "b/676.jsonl" new file mode 100644--- /dev/null +++ "b/676.jsonl" @@ -0,0 +1,654 @@ +{"seq_id":"387475760","text":"\n# https://leetcode.com/problems/path-sum-ii/description/\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def pathSum(self, root, sum):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: List[List[int]]\n \"\"\"\n res = []\n self.dfs(root, sum, res, [])\n return res\n\n @staticmethod\n def dfs(node, remaining, res, path):\n if not node or remaining < node.val:\n return\n\n if remaining == node.val and not node.left and not node.right:\n res.append(path + [node.val])\n return\n\n Solution.dfs(node.left, remaining - node.val, res, path + [node.val])\n Solution.dfs(node.right, remaining - node.val, res, path + [node.val])\n\n\nn5 = TreeNode(5)\nn4 = TreeNode(4)\nn8 = TreeNode(8)\nn11 = TreeNode(11)\nn13 = TreeNode(13)\nn4_2 = TreeNode(4)\nn7 = TreeNode(7)\nn2 = TreeNode(2)\nn5_2 = TreeNode(5)\nn1 = TreeNode(1)\nn5.left, n5.right = n4, n8\nn4.left = n11\nn8.left, n8.right = n13, n4_2\nn11.left, n11.right = n7, n2\nn4_2.left, n4_2.right = n5_2, n1\nprint(Solution().pathSum(n5, 22))\n","sub_path":"leetcode/python/path-sum-ii.py","file_name":"path-sum-ii.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"638011572","text":"import dungeon_decorators\nimport dungeon_logic\nimport dungeon_exception\nimport dungeon_logger\nimport dun_map\nimport dungeon_exception as dexc\n\n\nCOMMANDS = ['r', 'l', 'u', 'd']\nWARNINGS = {'bomb':'Bomb is near you!',\n 'treasue':'Tresure is near you'}\nMENU_COMMANDS = ['save', 'load']\n\n\nclass Character:\n\n def __init__(self, hit_points):\n \"\"\"\n :\n \"\"\"\n\n self.hit_points = hit_points\n self.position = [0, 0]\n\n\nclass Player(Character):\n\n def __init__(self):\n \"\"\"\n :param dun_map: map where player must be created\n \n \"\"\"\n \n Character.__init__(self, 3)\n self.command = \"\"\n self.treasure_picked = 0\n\n\n @dungeon_decorators.debug_time_decor\n @dungeon_decorators.debug_decor\n def get_command(self):\n\n self.command = \"\"\n while self.command not in MENU_COMMANDS and self.command not in COMMANDS:\n\n self.command = input('Enter valid command\\n')\n\n\n @dungeon_decorators.debug_time_decor\n @dungeon_decorators.debug_decor\n def applay_damage(self, damage):\n \"\"\"\n :param damage: damage to applay\n :type damage: int\n \"\"\"\n\n if damage < 0:\n raise dexc.DamageError(f'{damage}')\n self.hit_points -= damage\n\n\n @dungeon_decorators.debug_time_decor\n @dungeon_decorators.debug_decor\n def change_score(self, diff):\n \"\"\"\n :param diff: score difference\n :type diff: int\n \"\"\"\n\n self.treasure_picked += diff\n \n @dungeon_decorators.debug_time_decor\n @dungeon_decorators.debug_decor\n def process_move(self, dung_map):\n \"\"\"\n :param dung_map: map of the game\n :type dung_map: DungeonMap\n \"\"\"\n\n position = self.position\n size = len(dung_map.dun_map)\n command = self.command\n game_map = dung_map.dun_map\n\n if command not in COMMANDS:\n raise dungeon_exception.CommandError('Invalid command was entered!')\n\n dungeon_logger.logger.debug(f\"Move with command: {command}\")\n \n try:\n game_map[position[0]][position[1]] = '0'\n except TypeError as error:\n dungeon_logger.logger.info(f'TypeError occured: {error}')\n move_ret_val = dungeon_logic.make_move_if_possible(position, size, command)\n \n if move_ret_val == False:\n\n dungeon_logger.logger.info('Move is not possible')\n game_map[position[0]][position[1]] = 'y'\n\n return\n \n cage_state = dungeon_logic.check_pos(game_map, position)\n cages = dungeon_logic.check_closest_pos(game_map, position, size)\n for cage in cages:\n dungeon_logger.logger.info(WARNINGS[cage])\n\n try:\n game_map[position[0]][position[1]] = 'y'\n except IndexError as error:\n dungeon_logger.logger.info(f'Player on wrong position/n IndexError: {error}')\n\n if cage_state == 'bomb':\n \n try:\n self.applay_damage(1)\n except dungeon_exception.DamageError as error:\n dungeon_logger.logger.info(f'DamageError raised: {error}')\n dungeon_logger.logger.info('You picked the bomb')\n \n elif cage_state == 'treasue':\n self.change_score(1)\n dungeon_logger.logger.info('You picked the treasure')\n\n elif cage_state is not 'empty':\n raise dungeon_exception.MapCageError(game_map, position, cage_state)\n\n","sub_path":"Viktor_Miroshnychenko/dungeon_pkg/build/lib/game_pkg/dun_player.py","file_name":"dun_player.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"276644717","text":"from queue import PriorityQueue\nfrom typing import List\n\n\nclass Solution:\n\n def constrainedSubsetSum(self, nums: List[int], k: int) -> int:\n if len(nums)==1: return nums[0]\n dp = [0]*len(nums)\n dp[0] = nums[0]\n max_value = dp[0]\n pq = PriorityQueue()\n pq.put((-max_value, 0))\n for i in range(1, len(nums)):\n dp[i] = max(max_value, 0) + nums[i]\n\n if max_value < dp[i]:\n max_value = dp[i]\n pq.put((-dp[i], i))\n if i>=k:\n v = pq.get()\n max_value, max_idx = -v[0], v[1]\n if max_idx <= i-k:\n v= pq.get()\n max_value = -v[0]\n max_idx = v[1]\n\n pq.put((-max_value, max_idx))\n\n return max(dp)\n\nif __name__ == '__main__':\n # pq = PriorityQueue()\n #\n # for i in range(3, 0, -1):\n # pq.put(i)\n #\n # pq = PriorityQueue()\n #\n # for i in range(3, 0, -1):\n # pq.put(-i)\n #\n # print(-pq.get())\n\n s = Solution()\n nums = [10, 2, -10, 5, 20]\n k = 2\n ans = s.constrainedSubsetSum(nums, k)\n print(ans)\n assert ans == 37\n\n nums = [10, -2, -10, -5, 20]\n k = 2\n ans = s.constrainedSubsetSum(nums,k)\n print(ans)\n assert ans==23\n\n nums = [-1, -2, -3]\n k = 1\n ans = s.constrainedSubsetSum(nums, k)\n print(ans)\n assert ans == -1\n","sub_path":"contests/week186/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"532561352","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nimport json\n# from django.http import HttpResponse\nfrom .models import Site\nfrom .forms import CreateSiteForm\n# Create your views here.\n\n\ndef index(request):\n if request.method == 'POST':\n id = request.POST[\"id\"]\n site = Site.objects.get(pk=id)\n site.delete()\n return HttpResponseRedirect('/management/')\n else:\n servers = []\n for site in Site.objects.all():\n servers.append({\"id\": site.id, \"name\": site.name, \"ip\": site.ip})\n return render(request, 'management/index.html', {\"servers\": servers})\n\n\ndef createsite(request):\n if request.method == 'POST':\n form = CreateSiteForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/management/')\n\n else:\n form = CreateSiteForm()\n return render(request, 'management/createsite.html', {'form': form})\n\n\ndef edit(request, id):\n site = Site.objects.get(pk=id)\n if request.method == 'POST':\n form = CreateSiteForm(request.POST, instance=site)\n form.save()\n return HttpResponseRedirect('/management/' + id + '/')\n form = CreateSiteForm(instance=site)\n return render(request, 'management/editsite.html', {'form': form, 'id': id})\n\n\ndef detail(request, id):\n try:\n site = Site.objects.get(pk=id)\n except Site.DoesNotExist:\n return HttpResponseRedirect('/management/')\n return render(request, 'management/detailsite.html',\n {'site': site})\n\n\ndef initsite(request):\n if request.method == 'POST':\n initid = request.POST['initid']\n password = request.POST['password']\n # print(\"initid is:%s, password is: %s\" % (initid, password))\n message = \"\"\n info = {\"message\": \"ok!\" + password}\n return HttpResponse(json.dumps(info), content_type=\"application/json\")\n","sub_path":"management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"57475223","text":"\"\"\"\n femagtools.plot.wdg\n ~~~~~~~~~~~~~~~~~~~\n\n Creating winding plots\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef mmf(f, title='', ax=0):\n \"\"\"plot magnetomotive force (mmf) of winding\"\"\"\n if ax == 0:\n ax = plt.gca()\n if title:\n ax.set_title(title)\n ax.plot(np.array(f['pos'])/np.pi*180, f['mmf'])\n ax.plot(np.array(f['pos_fft'])/np.pi*180, f['mmf_fft'])\n ax.set_xlabel('Position / Deg')\n\n phi = [f['alfa0']/np.pi*180, f['alfa0']/np.pi*180]\n y = [min(f['mmf_fft']), 1.1*max(f['mmf_fft'])]\n ax.plot(phi, y, '--')\n alfa0 = round(f['alfa0']/np.pi*180, 2)\n ax.text(phi[0]/2, y[0]+0.05, f\"{alfa0}°\",\n ha=\"center\", va=\"bottom\")\n ax.annotate(f\"\", xy=(phi[0], y[0]),\n xytext=(0, y[0]), arrowprops=dict(arrowstyle=\"->\"))\n ax.grid()\n\n\ndef mmf_fft(f, title='', mmfmin=1e-2, ax=0):\n \"\"\"plot winding mmf harmonics\"\"\"\n if ax == 0:\n ax = plt.gca()\n if title:\n ax.set_title(title)\n else:\n ax.set_title('MMF Harmonics (per phase)')\n ax.grid(True)\n order, mmf = np.array([(n, m) for n, m in zip(f['nue'],\n f['mmf_nue']) if m > mmfmin]).T\n try:\n markerline1, stemlines1, _ = ax.stem(order, mmf, '-.', basefmt=\" \",\n use_line_collection=True)\n ax.set_xticks(order)\n except ValueError: # empty sequence\n pass\n\n\ndef zoneplan(wdg, ax=0):\n \"\"\"plot zone plan of winding wdg\"\"\"\n from matplotlib.patches import Rectangle\n upper, lower = wdg.zoneplan()\n Qb = len([n for l in upper for n in l])\n from femagtools.windings import coil_color\n rh = 0.5\n if lower:\n yl = rh\n ymax = 2*rh + 0.2\n else:\n yl = 0\n ymax = rh + 0.2\n if ax == 0:\n ax = plt.gca()\n ax.axis('off')\n ax.set_xlim([-0.5, Qb-0.5])\n ax.set_ylim([0, ymax])\n ax.set_aspect(Qb/6+0.3)\n\n for i, p in enumerate(upper):\n for x in p:\n ax.add_patch(Rectangle((abs(x)-1.5, yl), 1, rh,\n facecolor=coil_color[i],\n edgecolor='white', fill=True))\n s = f'+{i+1}' if x > 0 else f'-{i+1}'\n ax.text(abs(x)-1, yl+rh/2, s, color='black',\n ha=\"center\", va=\"center\")\n for i, p in enumerate(lower):\n for x in p:\n ax.add_patch(Rectangle((abs(x)-1.5, yl-rh), 1, rh,\n facecolor=coil_color[i],\n edgecolor='white', fill=True))\n s = f'+{i+1}' if x > 0 else f'-{i+1}'\n ax.text(abs(x)-1, yl-rh/2, s, color='black',\n ha=\"center\", va=\"center\")\n\n yu = yl+rh\n step = 1 if Qb < 25 else 2\n if lower:\n yl -= rh\n margin = 0.05\n ax.text(-0.5, yu+margin, f'Q={wdg.Q}, p={wdg.p}, q={round(wdg.q,4)}',\n ha='left', va='bottom', size=15)\n for i in range(0, Qb, step):\n ax.text(i, yl-margin, f'{i+1}', ha=\"center\", va=\"top\")\n\n\ndef winding_factors(wdg, n=8, ax=0):\n \"\"\"plot winding factors\"\"\"\n ax = plt.gca()\n ax.set_title(f'Winding factors Q={wdg.Q}, p={wdg.p}, q={round(wdg.q,4)}')\n ax.grid(True)\n order, kwp, kwd, kw = np.array(\n [(n, k1, k2, k3)\n for n, k1, k2, k3 in zip(wdg.kw_order(n),\n wdg.kwp(n),\n wdg.kwd(n),\n wdg.kw(n))]).T\n try:\n markerline1, stemlines1, _ = ax.stem(order-1, kwp,\n 'C1:', basefmt=\" \",\n markerfmt='C1.',\n use_line_collection=True,\n label='Pitch')\n markerline2, stemlines2, _ = ax.stem(order+1, kwd,\n 'C2:', basefmt=\" \",\n markerfmt='C2.',\n use_line_collection=True,\n label='Distribution')\n markerline3, stemlines3, _ = ax.stem(order, kw,\n 'C0-', basefmt=\" \",\n markerfmt='C0o',\n use_line_collection=True,\n label='Total')\n ax.set_xticks(order)\n ax.legend()\n except ValueError: # empty sequence\n pass\n\n\ndef winding(wdg, ax=0):\n \"\"\"plot coils of windings wdg\"\"\"\n from matplotlib.patches import Rectangle\n from matplotlib.lines import Line2D\n from femagtools.windings import coil_color\n\n coil_len = 25\n coil_height = 4\n dslot = 8\n arrow_head_length = 2\n arrow_head_width = 2\n\n if ax == 0:\n ax = plt.gca()\n z = wdg.zoneplan()\n xoff = 0\n if z[-1]:\n xoff = 0.75\n yd = dslot*wdg.yd\n mh = 2*coil_height/yd\n slots = sorted([abs(n) for m in z[0] for n in m])\n smax = slots[-1]*dslot\n for n in slots:\n x = n*dslot\n ax.add_patch(Rectangle((x + dslot/4, 1), dslot /\n 2, coil_len - 2, fc=\"lightblue\"))\n ax.text(x, coil_len / 2,\n str(n),\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n backgroundcolor=\"white\",\n bbox=dict(boxstyle='circle,pad=0', fc=\"white\", lw=0))\n line_thickness = [0.6, 1.2]\n for i, layer in enumerate(z):\n b = -xoff if i else xoff\n lw = line_thickness[i]\n direction = ['right', 'left']\n d = 1\n for m, mslots in enumerate(layer):\n for k in mslots:\n x = abs(k) * dslot + b\n xpoints = []\n ypoints = []\n if wdg.q >= 1 or wdg.l > 1:\n if (i == 0 and (k > 0 or (k < 0 and wdg.l > 1))):\n d = 0 # right\n else:\n d = 1 # left\n elif d == 0:\n d = 1\n else:\n d = 0\n if direction[d] == 'right':\n # first layer, positive dir or neg. dir and 2-layers:\n # from right bottom\n if x + yd > smax+b:\n dx = dslot if yd > dslot else yd/4\n xpoints = [x + yd//2 + dx - xoff]\n ypoints = [-coil_height + mh*dx]\n xpoints += [x + yd//2 - xoff, x, x, x + yd//2-xoff]\n ypoints += [-coil_height, 0, coil_len,\n coil_len+coil_height]\n if x + yd > smax+b:\n xpoints += [x + yd//2 + dx - xoff]\n ypoints += [coil_len+coil_height - mh*dx]\n else:\n # from left bottom\n if x - yd < 0: # and x - yd/2 > -3*dslot:\n dx = dslot if yd > dslot else yd/4\n xpoints = [x - yd//2 - dx + xoff]\n ypoints = [- coil_height + mh*dx]\n xpoints += [x - yd//2+xoff, x, x, x - yd/2+xoff]\n ypoints += [-coil_height, 0, coil_len,\n coil_len+coil_height]\n if x - yd < 0: # and x - yd > -3*dslot:\n xpoints += [x - yd//2 - dx + xoff]\n ypoints += [coil_len + coil_height - mh*dx]\n\n ax.add_line(Line2D(xpoints, ypoints,\n color=coil_color[m], lw=lw))\n\n if k > 0:\n h = arrow_head_length\n y = coil_len * 0.8\n else:\n h = -arrow_head_length\n y = coil_len * 0.2\n ax.arrow(x, y, 0, h,\n length_includes_head=True,\n head_starts_at_zero=False,\n head_length=arrow_head_length,\n head_width=arrow_head_width,\n fc=coil_color[m], lw=0)\n if False: # TODO show winding connections\n m = 0\n for k in [n*wdg.Q/wdg.p/wdg.m + 1 for n in range(wdg.m)]:\n if k < len(slots):\n x = k * dslot + b + yd/2 - xoff\n ax.add_line(Line2D([x, x],\n [-2*coil_height, -coil_height],\n color=coil_color[m], lw=lw))\n ax.text(x, -2*coil_height+0.5, str(m+1), color=coil_color[m])\n m += 1\n ax.autoscale(enable=True)\n ax.set_axis_off()\n","sub_path":"src/femagtools/plot/wdg.py","file_name":"wdg.py","file_ext":"py","file_size_in_byte":8724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"258652233","text":"import os, sys, inspect\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\n\n\nfrom bokeh.models import Panel\nfrom bokeh.layouts import gridplot\nfrom scripts.scatterplot import scatterplot_fig_categorical\nfrom scripts.Q_T2_lineplot import q_lineplot_fig, t2_lineplot_fig\n\n\ndef q_t2_tab(\n cds,\n categories,\n Q_col_name,\n T2_col_name,\n cat_col_name,\n index_col_name,\n Q_low_col_name,\n Q_up_col_name,\n T2_low_col_name,\n T2_up_col_name,\n **kwargs\n):\n\n x_axis_label = \"T² statistic\"\n y_axis_label = \"Q scores\"\n\n scatter = scatterplot_fig_categorical(\n cds,\n categories,\n x_col_name=T2_col_name,\n y_col_name=Q_col_name,\n leg_col_name=cat_col_name,\n x_label=x_axis_label,\n y_label=y_axis_label,\n **kwargs\n )\n\n q_line = q_lineplot_fig(\n cds, Q_col_name, Q_low_col_name, Q_up_col_name, index_col_name, **kwargs\n )\n\n t2_line = t2_lineplot_fig(\n cds, T2_col_name, T2_low_col_name, T2_up_col_name, index_col_name, **kwargs\n )\n\n p = gridplot([[t2_line, q_line], [scatter, None]])\n\n # Note: We need to add the figure p to a panel, which is what we return.\n tab = Panel(child=p, title=\"Q-T2 Tab\")\n\n return tab\n","sub_path":"bokeh_app/scripts/Q_T2_tab.py","file_name":"Q_T2_tab.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"384143056","text":"# https://www.hackerrank.com/challenges/s10-the-central-limit-theorem-2/problem\n\n# The central limit theorem (CLT) states that, for a large enough sample (N), \n# the distribution of the sample mean will approach normal distribution. \n# This holds for a sample of independent random variables from any distribution with a finite standard deviation.\n\n# The central limit theorem states that if you have a population with mean μ and standard deviation σ \n# and take sufficiently large random samples from the population with replacement, \n# then the distribution of the sample means will be approximately normally distributed.\n\n# When independent random variables are added, their properly normalized sum tends toward a normal distribution \n# (informally a bell curve) even if the original variables themselves are not normally distributed.\n\nfrom math import erf,sqrt\n\nm = int(input())\nn = int(input())\nmean = float(input())\nstd = float(input())\n\nstd = std/sqrt(n)\ny = m/n\n\ncdf = lambda x: 0.5 * (1 + erf((x-mean)/(std*sqrt(2))))\n\nprint(round(cdf(y),4))\n","sub_path":"Day 6: The Central Limit Theorem II.py","file_name":"Day 6: The Central Limit Theorem II.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"560281188","text":"import pyaudio\nimport socket\nimport time,threading,Queue,sys,os,time\n\n\nglobal frames,CHUNK,CHANNELS,RATE,stream,NUMBER_OF_THREADS\nframes = []\njob_number=[1,2]\nqueue=Queue.Queue()\nNUMBER_OF_THREADS=2\n\nFORMAT = pyaudio.paInt32\nCHANNELS = 2\nRATE = 44100\nCHUNK = 1024\np = pyaudio.PyAudio()\n\nstream = p.open(format = FORMAT,\n\t\t\t\tchannels = CHANNELS,\n\t\t\t\trate = RATE,\n\t\t\t\tinput = True,\n\t\t\t\tframes_per_buffer = CHUNK,\n\t\t\t\t)\n\ndef createThreads():\n\tfor i in range(NUMBER_OF_THREADS):\n\t\tt=threading.Thread(target=doStuff)\n\t\tt.daemon=True\n\t\tt.start()\n\t\t\ndef createStuffToDo():\n\tfor i in job_number:\n\t\tqueue.put(i)\n\tqueue.join()\n\t\n\ndef doStuff():\n\twhile True:\n\t\tx=queue.get()\n\t\tif x == 1:\n\t\t\tudpStream()\n\t\tif x == 2:\n\t\t\trecord()\n\t\tqueue.task_done()\n\ndef udpStream():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \n\n while True:\n if len(frames) > 0:\n s.sendto(frames.pop(0), (\"127.0.0.1\", 12345))\n\n s.close()\n\ndef record(): \n while True:\n frames.append(stream.read(CHUNK))\n\ncreateThreads()\ncreateStuffToDo()\n","sub_path":"Communication Breakdown/clientvoic.py","file_name":"clientvoic.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"608824381","text":"# -*- coding: utf-8 -*-\n__author__ = 'mateusenricosrecaruso'\n\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nfrom numpy import linspace\n\nmu = 0.5 # asfalto seco e pneu, atrito estatico\nm = 950 # kg, media entre 850 e 1050\ng = 9.8 # m/s^2\nCd = 0.0090 # de um pdf que achei para angulo de ataque 0º e um tipo de geometria de avião\nrho = 1.225 # kg/m^3, em 15ºC e nível do mar\nA = 30 # m^2\nCl = 0.0943 # de um pdf que achei para angulo de ataque 0º e um tipo de geometria de avião\n# Ac= // nao sei se a area do arrasto é a de contato ou a da asa; assumi como asa, mas existe essa variavel se for caso de contato\n\nx0 = 0\ny0 = 0\nvx0 = 0\nvy0 = 0\n\nS0 = [x0, y0, vx0, vy0]\nT = linspace(0, 10, 101)\n\n\ndef func(S, T):\n dxdt = S[2]\n dydt = S[3]\n if (Cl * rho * A * S[2]**2 > m * g):\n dvydt = (Cl * rho * A * S[2] * S[2] - m * g) / (2 * m)\n dvxdt = (-1)*(Cd * rho * A * S[2] * S[2]) / (2 * m)\n else:\n dvydt = (Cl * rho * A * S[2] * S[2]) / (2 * m)\n dvxdt = (mu * m * g - Cd * rho * A * S[2] * S[2]) / (2 * m)\n return dxdt, dydt, dvxdt, dvydt\n\n\nZ = odeint(func, S0, T)\n#plt.plot(T, Z, label='Carro')\nplt.plot(T, Z[:, 0], 'r', label='Espaço no eixo x')\nplt.plot(T, Z[:, 1], 'g', label='Espaço no eixo y')\nplt.plot(T, Z[:, 2], 'y', label='Velocidade no eixo x')\nplt.plot(T, Z[:, 3], 'b', label='Velocidade no eixo y')\nplt.legend(loc='upper right')\nplt.axis([0, max(T), 0, 100])\nplt.xlabel('Tempo em segundos')\nplt.ylabel('Verde e Vermelho: Espaço\\nAzul e Amarelo: Velocidade')\nplt.title('Modelagem Aproximada do Carro Voador')\nplt.show()\n","sub_path":"aviao.py","file_name":"aviao.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367410390","text":"import unittest\n\nimport numpy as np\n\n\ndef check_mat(x):\n x[x < 0.0] = 0.0\n\n return x\n\n\nclass TestNumpy(unittest.TestCase):\n\n def test(self):\n a = np.random.uniform(-5, 5, (2, 3))\n print(a)\n\n b = (a > 0).astype(int)\n print(b)\n\n def test2(self):\n a = np.arange(12).reshape((3, 4))\n print(id(a), a)\n\n b = np.arange(12).reshape((3, 4))\n print(id(b), b)\n\n def test_maximum(self):\n a = np.random.uniform(-1, 1, (3, 3))\n print(id(a))\n\n b = a\n print(id(b))\n\n c = a.copy()\n print(id(c))\n\n d = np.maximum(a, 0.0)\n print(id(d))\n\n def test_func(self):\n a = np.random.uniform(-1, 1, (3, 3))\n print(a, id(a))\n\n b = check_mat(a)\n print(b, id(b))\n\n def test_reshape(self):\n a = np.random.uniform(-1, 1, (3, 3))\n print(a, id(a))\n\n b = a.reshape((3, 3))\n print(b, id(b))\n\n def test_add_vector(self):\n a = np.arange(12).reshape((4, 3))\n\n b = np.ones(4)\n for i in range(4):\n b[i] = i + 1\n\n print(a)\n print(b)\n print(a + b)\n","sub_path":"test/TestNumpy.py","file_name":"TestNumpy.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48872989","text":"import os\n#check the filetype under current location\nall_files = os.listdir(os.curdir)# us os.curdir to represent the current dir\ntype_dict={}\n\nfor each_file in all_files:\n\tif os.path.isdir(each_file):\n\t\ttype_dict.setdefault('Folder',0)\n\t\ttype_dict['Folder']+=1\n\telse:\n\t\text =os.path.splitext(each_file)[1]\n\t\ttype_dict.setdefualt(ext,0)\n\t\ttype_dict[ext]+=1\n\nfor each_type in type_dict.keys():\n\tprint('There are %d type of %s files in current dir'%(type_dict[each_type],each_type)) \n","sub_path":"Pyclass/OS/filetype.py","file_name":"filetype.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"18406796","text":"#\n# Based on https://github.com/dmlc/dgl/examples/pytorch/gcn/README.md. \nimport argparse, time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl import DGLGraph\n#from dgl.data import register_data_args, load_data\nfrom load_data import NodeandEdgeDataset\nimport pandas as pd\n\n#from gcn import GCN\n#from gcn_mp import GCN\nfrom gcn_spmv import GCN\n\n\n\ndef load_dataframe_from_file(filepath: str, delimiter = ','):\n df = pd.read_csv(filepath, sep = delimiter, header='infer')\n return df\n\ndef set_training_data(node_features: pd.DataFrame, graph_edgelist: pd.DataFrame, cuda: bool):\n # load and preprocess dataset, starting with the node features\n NodeandEdgeDataset_instance = NodeandEdgeDataset()\n features = NodeandEdgeDataset_instance._load_node_attr(node_features)\n labels = NodeandEdgeDataset_instance._load_node_labels(node_features)\n features = torch.FloatTensor(features.todense())\n labels = torch.LongTensor(labels)\n # train_mask = torch.ByteTensor(data.train_mask)\n # val_mask = torch.ByteTensor(data.val_mask)\n # test_mask = torch.ByteTensor(data.test_mask)\n in_feats = features.shape[1]\n n_classes = labels.shape[1]\n # build the graph\n graph = NodeandEdgeDataset_instance._load_graph(graph_edgelist, node_features)\n n_edges = graph.number_of_edges()\n if cuda:\n cuda = True\n torch.cuda.set_device(args.gpu)\n features = features.cuda()\n labels = labels.cuda()\n # train_mask = train_mask.cuda()\n # val_mask = val_mask.cuda()\n # test_mask = test_mask.cuda()\n\n # graph preprocess and calculate normalization factor\n g = graph\n # add self loop\n if args.self_loop:\n g.remove_edges_from(g.selfloop_edges())\n g.add_edges_from(zip(g.nodes(), g.nodes()))\n g = DGLGraph(g)\n n_edges = g.number_of_edges()\n # normalization\n degs = g.in_degrees().float()\n norm = torch.pow(degs, -0.5)\n norm[torch.isinf(norm)] = 0\n if cuda:\n norm = norm.cuda()\n g.ndata['norm'] = norm.unsqueeze(1)\n graph = g\n print(\"\"\"----Data statistics------'\n #Edges %d\n #Classes %d\n \"\"\" % (n_edges, n_classes))\n return features, labels, graph, in_feats, n_classes\n\ndef train(graph, features, labels, in_feats, n_epochs, n_edges, n_classes, weight_decay, cuda):\n # create GCN model\n model = GCN(graph,\n in_feats,\n args.n_hidden,\n n_classes,\n args.n_layers,\n F.relu,\n args.dropout)\n\n if cuda:\n model.cuda()\n loss_fcn = torch.nn.CrossEntropyLoss()\n\n # use optimizer\n optimizer = torch.optim.Adam(model.parameters(),\n lr=args.lr,\n weight_decay=weight_decay)\n\n # initialize graph\n dur = []\n labels = np.argmax(labels,axis=1)\n# for epoch in range(args.n_epochs):\n for epoch in range(n_epochs):\n model.train()\n if epoch >= 3:\n t0 = time.time()\n # forward\n logits = model(features)\n #loss = loss_fcn(logits[train_mask], labels[train_mask])\n loss = loss_fcn(logits, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if epoch >= 3:\n dur.append(time.time() - t0)\n\n # acc = evaluate(model, features, labels, val_mask)\n # print(\"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | \"\n # \"ETputs(KTEPS) {:.2f}\". format(epoch, np.mean(dur), loss.item(),\n # acc, n_edges / np.mean(dur) / 1000))\n print(\"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | \"\n \"ETputs(KTEPS) {:.2f}\". format(epoch, np.mean(dur), loss.item(),\n n_edges / np.mean(dur) / 1000))\n return model \n\ndef evaluate(model, features, labels):\n features = torch.FloatTensor(features.todense())\n model.eval()\n with torch.no_grad():\n logits = model(features)\n # logits = logits[mask]\n # labels = labels[mask]\n _, indices = torch.max(logits, dim=1)\n correct = torch.sum(indices == labels)\n percentage_correct = correct.item() * 1.0 / len(labels)\n return indices, percentage_correct\n\ndef main(args):\n cuda = False\n if args.gpu < 0:\n cuda = False\n else:\n cuda = True\n\n # Load the node attributes table for training.\n node_features = load_dataframe_from_file(\"learningData.csv\")\n\n # Load the graph edgelist.\n graph_edgelist = load_dataframe_from_file(\"edgeList.csv\")\n\n # Set training parameters.\n features = None\n labels = None\n graph = None\n in_feats = None\n features, labels, graph, in_feats, n_classes = set_training_data(node_features, graph_edgelist, cuda)\n\n n_epochs = args.n_epochs\n weight_decay = args.weight_decay\n n_edges = graph.number_of_edges()\n\n # Train the model.\n model = train(graph, features, labels, in_feats, n_epochs, n_edges, n_classes, weight_decay, cuda)\n\n # Load the test data.\n test_node_features_df = load_dataframe_from_file(\"testingData.csv\")\n NodeandEdgeDataset_instance = NodeandEdgeDataset()\n test_node_features = NodeandEdgeDataset_instance._load_node_attr(test_node_features_df)\n\n # Load the correct labels for the test data.\n test_node_labels_df = load_dataframe_from_file(\"testingDatawithLabels.csv\")\n test_node_labels = NodeandEdgeDataset_instance._load_node_labels(test_node_labels_df)\n\n # Test the model.\n indices, acc = evaluate(model, test_node_features, test_node_labels)\n print(\"Test Accuracy {:.4f}\".format(acc))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='GCN')\n #register_data_args(parser)\n parser.add_argument(\"--dropout\", type=float, default=0.5,\n help=\"dropout probability\")\n parser.add_argument(\"--gpu\", type=int, default=-1,\n help=\"gpu\")\n parser.add_argument(\"--lr\", type=float, default=1e-2,\n help=\"learning rate\")\n parser.add_argument(\"--n-epochs\", type=int, default=200,\n help=\"number of training epochs\")\n parser.add_argument(\"--n-hidden\", type=int, default=16,\n help=\"number of hidden gcn units\")\n parser.add_argument(\"--n-layers\", type=int, default=1,\n help=\"number of hidden gcn layers\")\n parser.add_argument(\"--weight-decay\", type=float, default=5e-4,\n help=\"Weight for L2 loss\")\n parser.add_argument(\"--self-loop\", action='store_true',\n help=\"graph self-loop (default=False)\")\n parser.set_defaults(self_loop=False)\n args = parser.parse_args()\n print(args)\n\n main(args)\n","sub_path":"nk_gcn_vertex_class/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"611061239","text":"from case.CBaseCase import *\n\nclass T4773_uefi_CheckCPUMaxPerf(CBaseCase):\n '''\n [Purpose ]: Make sure CPU is configured in Max Performance mode.\n First we check MSR 0x1FC bit25; \n if bit 25 == 0:\n read MSR 0x1B0 0:3; should be within [0, 3].\n if bit 25 == 1:\n read MSR 0xA01 3:6; should be within [0, 3].\n [Author ]: shark.y.liu@emc.com\n [Tickets ]: ATOM-1538\n [Platform]: All\n [Type ]: Auto\n '''\n def __init__(self):\n CBaseCase.__init__(self, self.__class__.__name__)\n\n\n def config(self):\n CBaseCase.config(self)\n\n #power cycle host and stop at end of POST alphabet:\n self.enclosure.sp.power_cycle_host()\n\n ret=self.enclosure.sp.stop_at_end_of_post_alphabet()\n if ret is not True:\n errmsg='Fail to stop at end of POST alphabet'\n raise Exception('FAIL', errmsg)\n \n\n def test(self):\n #read msr by Arium ECM-XDP3 or POST:\n #pwr_perf_tuning_cfg_mode is bit 25.\n mask_1fc=0x2000000\n ret=self.enclosure.sp.msr(0x1fc) & mask_1fc\n pwr_perf_tuning_cfg_mode = ret>>25\n if pwr_perf_tuning_cfg_mode == 0:\n #read msr 0x1b0 bit 0:3\n mask_1b0=0xf\n energy_policy=self.enclosure.sp.msr(0x1b0) & mask_1b0\n if energy_policy not in range(4):\n errmsg='CPU energy policy is %d, \\\n not max performance(0-3) as required.'\n raise Exception('FAIL', errmsg)\n\n elif pwr_perf_tuning_cfg_mode == 1:\n #read msr 0xa01 bit3:6\n mask_a01=0x78 #0x1111000, bit3:6\n energy_policy=(self.enclosure.sp.msr(0xa01) & mask_a01)>>3\n if energy_policy not in range(4):\n errmsg='CPU energy policy is %d, \\\n not max performance(0-3) as required.'\n raise Exception('FAIL', errmsg)\n\n self.log('INFO', 'CPU is configured to run at \\\n Max Performance Mode as required.')\n return\n\n","sub_path":"case/regression/bios/T4773_uefi_CheckCPUMaxPerf.py","file_name":"T4773_uefi_CheckCPUMaxPerf.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381120344","text":"\"\"\"\r\nValueError occurs after numerator.\r\nZeroDivisionError occurs after denominator\r\nYou could place an if statement and while loop after denominator that\r\ncould ask you to re-input a number, this time higher than 0\r\n\"\"\"\r\n\r\ntry:\r\n numerator = int(input(\"Enter the numerator: \"))\r\n denominator = int(input(\"Enter the denominator: \"))\r\n fraction = numerator / denominator\r\nexcept ValueError:\r\n print(\"Numerator and denominator must be valid numbers!\")\r\nexcept ZeroDivisionError:\r\n print(\"Cannot divide by zero!\")\r\n print(\"Finished.\")","sub_path":"Prac02/value_error_checking.py","file_name":"value_error_checking.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"155457949","text":"## Euler 52\n##\n## It can be seen that the number, 125874, and its double,\n## 251748, contain exactly the same digits,\n## but in a different order.\n##\n## Find the smallest positive integer, x,\n## such that 2x, 3x, 4x, 5x, and 6x,\n## contain the same digits.\n##\n## Answer: 142857\n## CPU time: 0.91\nimport time\nstart_time = time.time()\n\ndef sort_digits(number):\n return ''.join(sorted(list(str(number))))\n\nfound = False\nfor i in range(1,1258750):\n for x in range(2,7):\n if sort_digits(i) != sort_digits(i*x):\n break\n if x == 6:\n print(i)\n found = True\n break\n if found == True:\n break\n\n\nend_time = time.time()\nprint('{:0.2F}'.format((end_time - start_time)))\n","sub_path":"finished/052/052.py","file_name":"052.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"466980064","text":"import time\nimport calendar\nimport xml.etree.cElementTree as ET\nimport csv\nimport re\n\n\n\ntree1 = ET.parse(\"D:/export_other_to_crew_bid_MHCC_20170626_080918_latchr.xml\")\n\n\n\ndef get_dates(s):\n regex = r\"wom:?\\D+(\\d+)\"\n matches = re.findall(regex,s)\n s = calendar.monthcalendar(2017,)\n for match in range (0,len(matches)):\n print(str(s[match])+\"Jan\"+\"2017\")\n\n\ndef setPoints(n): # returns bid points as a string\n m=n%10\n if n>100:\n return str(100)\n elif n<10:\n return str(10)\n elif (m<5):\n return str((n-m))\n elif (m>=5):\n return str((n+(10-m)))\n\n\ndef setMaxTimesRoster(n): # returns max times roster as string\n if n in ['1', '2', '3', '4', '5', '6', '7']:\n return str(n)\n else:\n try:\n maxTimes=int(n)\n if maxTimes > 7:\n return 'Max'\n elif maxTimes< 1:\n return str(1)\n except ValueError:\n return str(1)\n\n\ndef conv_d(d):# returns date in a dd/mm/yy format\n if d is not None:\n length = len((str(d).strip()))\n if length>=8 and length <=9:\n final_d = time.strptime(d, \"%d-%b-%y\")\n elif length>=10 and length <=12:\n final_d = time.strptime(d, \"%d%b%y %H%M\")\n elif length>=13 and length <=14:\n final_d = time.strptime(d, \"%d%b%Y %H%M\")\n return time.strftime(\"%d/%m/%y\",final_d)\n else:\n return ''\n\ndef conv_t(d): # returns time in an hh:mm\n if d is not None:\n try:\n final_t = time.strptime(d, \"%d%b%Y %H%M\")\n except TypeError:\n final_t = time.strptime(d, \"%d%b%y %H%M\")\n finally:\n return time.strftime(\"%H:%M\",final_t)\n else:\n return ''\n\n\ndef get_avoid(d): # returns boolean\n av = str(d)\n if av in ['AGT', 'AST', 'ATDA']:\n return True\n else:\n return ''\n\ndef get_region(r): # returns region\n regions ={\n 'AUS':'AUSTRALIA',\n 'NAM':'AMERICA',\n 'ORI':'ASIA',\n 'NZ':'NEW ZEALAND',\n 'PAC':'PACIFIC',\n 'SAM':'AMERICA'}\n if r is not None:\n return regions.get(r)\n else:\n return ''\n\ndef get_layoverData(tree): # returns layover ports present in the JCR JCB export file in a list\n layovers=[]\n for node in tree.iterfind(\".//destinationdata/destinations\"):\n destinationData = node.findall('destination')\n for node2 in destinationData:\n if node2.attrib['layover']==\"true\":\n layovers.append(node2.attrib['airport'])\n return layovers\n\n\ndef get_transitData(tree): # returns transit ports present in the JCR JCB export file in a list\n transits=[]\n for node in tree.iterfind(\".//destinationdata/destinations\"):\n destinationData = node.findall('destination')\n for node2 in destinationData:\n if node2.attrib['stop']==\"true\":\n transits.append(node2.attrib['airport'])\n return transits\n\n\ndef get_transit(t): # checks for transit returns transit\n transits = get_transitData(tree1)\n if t is not None:\n if str(t).strip() in transits:\n return t\n else:\n return ''\n\ndef get_layover(l): # checks for transit returns transit\n layovers = get_layoverData(tree1)\n if l is not None:\n if str(l).strip() in layovers:\n return l\n else:\n return ''\n\n\ndef get_pax(s):\n px = str(s).strip()\n if px == \"ON\":\n return \"PAX to home base\"\n else:\n return ''\n\n\ndef group(L):\n if len(L) == 0:\n return (0,0)\n else:\n first = last = L[0]\n for n in L[1:]:\n if (n - 1) == last: # Part of the group, bump the end\n last = n\n else: # Not part of the group, yield current group and start a new\n yield first, last\n first = last = n\n yield first, last # Yield the last group\n\n\ndef get_days(s): # returns dow in 3 letter day initials eg. 1 - Mon\n s_stripped = \"\".join(s.split())\n if 'dow' in s_stripped:\n # print(\"I'm in\")\n regex = r\"dow:?\\D+(\\d+)\"\n dow = re.findall(regex, s_stripped) # returns array but for this case, array will have 1 element only i.e at index 0\n dow_list = []\n for i in dow[0]:\n dow_list.append(int(i))\n return dow_list\n else:\n return []\n\ndaydict = {\"\": \"\", 1: \"Mon\", 2: \"Tue\", 3: \"Wed\", 4: \"Thu\", 5: \"Fri\", 6: \"Sat\", 7: \"Sun\"}\n\ncsvDReader = csv.DictReader(csvFile)\n\noutputFile = open('output.csv', 'w', newline='')\noutputWriter = csv.writer(outputFile)\nt_count = 0\ncount_gen_pairing = 0\ncount_gof = 0\ncount_day_off = 0\ncount_rule_relax = 0\ncount_time_off = 0\ncount_spec_pairing = 0\n\n\nfor row in csvDReader:\n # print(row)\n bid_type = row['.']\n crewid = row['Number']\n avoid = str(get_avoid(row['Pref Type']))\n max_times_roster = setMaxTimesRoster(row['Rqd'])\n region = get_region(row['Rgn'])\n layover = get_transit(row['L/O'])\n transit = get_layover(row['Tod/Port'])\n max_lo_nt = row['Nt']\n bid_points = setPoints((int(row['Wt'])))\n pax = get_pax(row['Px'])\n dow_to_list = get_days(row['Remarks'])\n # print(type(dow_to_list))\n day_range = list(group(dow_to_list))\n # day_from = int(day_range[0])\n # day_to = day_range[0][1]\n # print(day_range)\n if bid_type == 'GEN_PAIRING' and len(day_range)!=0:\n date_from = conv_d(row['From'])\n date_to = conv_d(row['Until'])\n for i in range(0,len(day_range)):\n day_from = day_range[i][0]\n day_to = day_range[i][1]\n outputWriter.writerow([bid_type, crewid, date_from, date_to, avoid, daydict[day_from], daydict[day_to], max_times_roster, region, layover, max_lo_nt, transit, pax, bid_points])\n t_count= t_count + 1\n count_gen_pairing = count_gen_pairing + 1\n elif bid_type == 'GEN_PAIRING' and len(day_range)==0:\n date_from = conv_d(row['From'])\n date_to = conv_d(row['Until'])\n outputWriter.writerow([bid_type, crewid, date_from, date_to, avoid, daydict[''], daydict[''], max_times_roster, region, layover, max_lo_nt, transit, pax, bid_points])\n t_count = t_count + 1\n count_gen_pairing = count_gen_pairing + 1\n elif bid_type == 'GOLDEN_DO':\n date_from = conv_d(row['From'])\n outputWriter.writerow([bid_type, crewid, date_from, bid_points])\n t_count = t_count + 1\n count_gof = count_gof + 1\n elif bid_type == 'SPEC_DO':\n date_from = conv_d(row['From'])\n date_to = conv_d(row['Until'])\n outputWriter.writerow([bid_type, crewid, date_from, date_to, bid_points])\n t_count = t_count + 1\n count_day_off = count_day_off + 1\n elif bid_type == 'GROUP_DAYS':\n outputWriter.writerow([bid_type, crewid])\n t_count = t_count + 1\n count_rule_relax = count_rule_relax + 1\n elif bid_type == 'WAIVE_WEEK':\n outputWriter.writerow([bid_type, crewid])\n t_count = t_count + 1\n count_rule_relax = count_rule_relax + 1\n elif bid_type == 'SPEC_TIMEOFF':\n t_count = t_count + 1\n count_time_off = count_time_off + 1\n elif bid_type == 'QUAL_TIMEOFF':\n t_count = t_count + 1\n count_time_off = count_time_off + 1\n elif bid_type == 'GEN_TIMEOFF':\n t_count = t_count + 1\n count_time_off = count_time_off + 1\n elif bid_type == 'SPEC_PAIRING':\n t_count = t_count + 1\n count_spec_pairing = count_spec_pairing + 1\n else:\n t_count = t_count + 1\n continue\n\ncount_translated_bids = count_rule_relax + count_day_off + count_gen_pairing + count_gof\n\n\nprint(\"Bids translated : \" + str(count_translated_bids) + \" out of \" + str(t_count)+\"\\n\"\n +\"Generic pairing bids : \"+ str(count_gen_pairing)+\"\\n\"\n +\"Golden day off bis : \"+ str(count_gof)+\"\\n\"\n +\"Day off bids : \"+ str(count_day_off)+\"\\n\"\n +\"Rule relaxations : \"+ str(count_rule_relax)+\"\\n\")\nprint(\"Bids not translated : \"+str(count_time_off+ count_spec_pairing)+ \" out of \" + str(t_count)+\"\\n\"\n +\"Specific pairing bids : \"+str(count_spec_pairing)+\"\\n\"\n +\"Time off bids : \"+str(count_time_off))\n\noutputFile.close()\n\n\n\n\n","sub_path":"JCR2JCB_export/tests/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":8174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"249859985","text":"# May 1st 2020 Challenge\n\n# The isBadVersion API is already defined for you.\n# @param version, an integer\n# @return a bool\n# def isBadVersion(version):\n\nclass Solution:\n def firstBadVersion(self, n):\n first = 1\n last = n \n key = n\n while(first<=last):\n mid = int((first+last)/2)\n if(isBadVersion(mid)==False):\n first = mid + 1\n if(isBadVersion(mid)):\n last = mid - 1\n if(mid= self.limite:\n if self.valetCarreau.x >= self.limite:\n if self.valetTrefle.x >= self.limite:\n if self.valetPique.x >= self.limite:\n self.carte = cartes.tirerCarte()\n if self.carte.couleur == \"coeur\":\n self.valetCoeur.reculer()\n if self.carte.couleur == \"carreau\":\n self.valetCarreau.reculer()\n if self.carte.couleur == \"trefle\":\n self.valetTrefle.reculer()\n if self.carte.couleur == \"pique\":\n self.valetPique.reculer()\n self.limite = 10\n self.isShow = False","sub_path":"riviere.py","file_name":"riviere.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"189311957","text":"from django.conf import settings\n\n\ndef get_backend():\n from django.conf import settings\n if hasattr(settings, 'POST_OFFICE_BACKEND'):\n backend = getattr(settings, 'POST_OFFICE_BACKEND')\n else:\n backend = getattr(settings, 'EMAIL_BACKEND',\n 'django.core.mail.backends.smtp.EmailBackend')\n # If EMAIL_BACKEND is set to use PostOfficeBackend\n # and POST_OFFICE_BACKEND is not set, fall back to SMTP\n if 'post_office.EmailBackend' in backend:\n backend = 'django.core.mail.backends.smtp.EmailBackend'\n return backend","sub_path":"post_office/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"329677776","text":"import socket\nimport select\n\nfrom stirling.component.client import Client\n\nclass SocketServer(object):\n def __init__(self, manager, address):\n self.manager = manager\n\n if type(address) == tuple:\n self.address = address\n else:\n self.address = ('localhost', address)\n\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind(self.address)\n self.socket.listen(5)\n\n def run_once(self):\n r, w, e = select.select([self.socket], [], [], .1)\n if r:\n new_conn, addr = self.socket.accept()\n entity = self.manager.new_entity(Client)\n self.manager.get_component(entity, Client).update(\n {'addr': addr,\n 'socket': new_conn})\n \n self.manager.run_once()\n\n def run(self):\n while True:\n self.run_once()\n\n","sub_path":"socketserver.py","file_name":"socketserver.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"136732430","text":"from apiclient import errors\nimport time\nimport traceback\nimport logging\n\nSCOPES = ['https://mail.google.com/']\nCREDENTIALS = 'credentials.json'\nCMD_DICT = {'get', 'help', 'has', 'show'}\n\nclass GmailListener:\n def __init__(self, service,\n process_queue, send_queue,\n auth_accounts,\n debug):\n self.service = service\n self.process_queue = process_queue\n self.send_queue = send_queue\n self.auth_accounts = auth_accounts\n self.user_email = service.users().getProfile(userId='me').execute()['emailAddress']\n self.recent_history_id = -1\n\n self.__set_recent_id()\n\n self.debug = debug\n\n def __set_recent_id(self):\n results = self.service.users().messages().list(userId='me',labelIds = ['INBOX']).execute()\n messages = results.get('messages', [])\n if not messages:\n print('No messages found.')\n else:\n recent_msg = self.service.users().messages().get(userId='me', id = messages[0]['id']).execute()\n self.recent_history_id = recent_msg['historyId']\n\n def __get_sender_email_address(self, id):\n addr = self.service.users().messages().get(userId='me', id = id, format='metadata',\n metadataHeaders=['From'], fields='payload/headers').execute()\n print(addr)\n addr = addr.get('payload').get('headers')[0].get('value')\n print(addr)\n check = addr.split('<')\n if len(check) == 1:\n return addr\n else:\n return addr.split('<')[1].split('>')[0]\n\n def listen_new_emails(self):\n print(\"Listening...\")\n try:\n while self.debug[0] is not True:\n results = self.service.users().history().list(userId='me', startHistoryId=self.recent_history_id).execute()\n messages = results.get('history', [])\n if not messages:\n print('ID {0}: No new messages.'.format(self.recent_history_id))\n else:\n latest_new_message = messages[-1]\n id = latest_new_message.get('messages')[0].get('id')\n recent_msg = self.service.users().messages().get(userId='me', id = id).execute()\n recent_msg_email_addr = self.__get_sender_email_address(id)\n recent_msg_body = recent_msg['snippet'].strip()\n self.recent_history_id = recent_msg['historyId']\n\n print(recent_msg_email_addr)\n print(recent_msg_body)\n print(recent_msg['historyId'])\n\n if recent_msg_email_addr not in self.auth_accounts:\n print(\"Request IGNORED: Unauthorized email\")\n elif recent_msg_email_addr != self.user_email:\n self.process_queue.put((recent_msg_email_addr, recent_msg_body))\n\n time.sleep(3)\n print(\"STOP: Listening\")\n except Exception as e:\n logging.error(traceback.format_exc())\n","sub_path":"gmail_listener.py","file_name":"gmail_listener.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"640319029","text":"'''\nCreated on 14 июн. 2019 г.\n\n@author: alex\n'''\nfrom config import *\n\nfrom copy import copy\n\n\ndef getColumnID (WB, columnName=EMPLOYERID):\n \"Ищем номер столбца с заданным именем\"\n for i in range(1, WB.max_column+1):\n if str(WB.cell(row=1, column=i).value) == columnName:\n return i\n\n \ndef copyCell(cellResult, cellSource):\n \"Полностью копируем ячейку\"\n cellResult.data_type = cellSource.data_type\n cellResult.value = cellSource.value\n \n if cellSource.has_style:\n cellResult.font = copy(cellSource.font)\n cellResult.border = copy(cellSource.border)\n cellResult.fill = copy(cellSource.fill)\n cellResult.number_format = copy(cellSource.number_format)\n cellResult.protection = copy(cellSource.protection)\n cellResult.alignment = copy(cellSource.alignment)\n\n \n\ndef copyTableHeader (WSResult, WSSource):\n \"Копируем заголовок таблицы и добавляем еще два поля в конце\"\n for i in range(1, WSSource.max_column+1):\n copyCell(WSResult.cell(row=1, column=i), WSSource.cell(row=1, column=i))\n \n m=WSSource.max_column\n copyCell(WSResult.cell(row=1, column=m+1),WSSource.cell(row=1, column=m))\n WSResult.cell(row=1, column=m+1).value=\"ФОТ в предыдущем срезе\"\n\n copyCell(WSResult.cell(row=1, column=m+2),WSSource.cell(row=1, column=m)) \n WSResult.cell(row=1, column=m+2).value=\"Δ в %\"\n\n WSResult.auto_filter.ref = WSResult.dimensions \n \n \n \n","sub_path":"services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27552236","text":"from flask import Flask\nfrom flask import request\nfrom flask import render_template\nfrom flask import redirect,url_for\napp=Flask(__name__)\n@app.route(\"/\")\ndef get_index():\n return(\"hi\");\n@app.route(\"/project\",methods=[\"POST\",\"GET\"])\ndef get_project():\n if request.method=='POST':\n p=request.form[\"project\"];\n return redirect((url_for(\"proj_name\",name=p)));\n #return(p);\n \n return(render_template(\"project.html\"));\n@app.route(\"/project/\")\ndef proj_name(name):\n s=\"project name is **\"+name;\n return(s);\n\nif __name__==\"__main__\":\n app.debug=True;\n app.run();\n \n","sub_path":"flask_examples/Html-post/htmlpost.py","file_name":"htmlpost.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"441696593","text":"# Copyright 2017 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport keras.backend as K\nfrom keras.applications import InceptionV3, inception_v3\nfrom keras.applications.imagenet_utils import decode_predictions\nimport numpy as np\nimport tensorflow as tf\n\nfrom pyspark.ml import Transformer\nfrom pyspark.ml.param import Param, Params, TypeConverters\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql.types import (ArrayType, FloatType, StringType, StructField, StructType)\n\nimport sparkdl.graph.utils as tfx\nfrom sparkdl.image.imageIO import resizeImage\nfrom sparkdl.transformers.param import (\n keyword_only, HasInputCol, HasOutputCol, SparkDLTypeConverters)\nfrom sparkdl.transformers.tf_image import TFImageTransformer\nfrom sparkdl.transformers.utils import (\n imageInputPlaceholder, InceptionV3Constants)\n\n\nSUPPORTED_MODELS = [\"InceptionV3\"]\n\n\nclass DeepImagePredictor(Transformer, HasInputCol, HasOutputCol):\n \"\"\"\n Applies the model specified by its popular name to the image column in DataFrame.\n The output is a MLlib Vector.\n \"\"\"\n\n modelName = Param(Params._dummy(), \"modelName\", \"A deep learning model name\",\n typeConverter=SparkDLTypeConverters.supportedNameConverter(SUPPORTED_MODELS))\n decodePredictions = Param(Params._dummy(), \"decodePredictions\",\n \"If true, output predictions in the (class, description, probability) format\",\n typeConverter=TypeConverters.toBoolean)\n topK = Param(Params._dummy(), \"topK\", \"How many classes to return if decodePredictions is True\",\n typeConverter=TypeConverters.toInt)\n\n @keyword_only\n def __init__(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,\n topK=5):\n \"\"\"\n __init__(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,\n topK=5)\n \"\"\"\n super(DeepImagePredictor, self).__init__()\n self._setDefault(decodePredictions=False)\n self._setDefault(topK=5)\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n @keyword_only\n def setParams(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,\n topK=5):\n \"\"\"\n setParams(self, inputCol=None, outputCol=None, modelName=None, decodePredictions=False,\n topK=5)\n \"\"\"\n kwargs = self._input_kwargs\n self._set(**kwargs)\n return self\n\n def setModelName(self, value):\n return self._set(modelName=value)\n\n def getModelName(self):\n return self.getOrDefault(self.modelName)\n\n def _transform(self, dataset):\n transformer = _NamedImageTransformer(inputCol=self.getInputCol(),\n outputCol=self._getIntermediateOutputCol(),\n modelName=self.getModelName(), featurize=False)\n transformed = transformer.transform(dataset)\n if self.getOrDefault(self.decodePredictions):\n return self._decodeOutputAsPredictions(transformed)\n else:\n return transformed.withColumnRenamed(\n self._getIntermediateOutputCol(), self.getOutputCol())\n\n def _decodeOutputAsPredictions(self, df):\n # If we start having different weights than imagenet, we'll need to\n # move this logic to individual model building in NamedImageTransformer.\n # Also, we could put the computation directly in the main computation\n # graph or use a scala UDF for potentially better performance.\n topK = self.getOrDefault(self.topK)\n def decode(predictions):\n pred_arr = np.expand_dims(np.array(predictions), axis=0)\n decoded = decode_predictions(pred_arr, top=topK)[0]\n # convert numpy dtypes to python native types\n return [(t[0], t[1], t[2].item()) for t in decoded]\n decodedSchema = ArrayType(\n StructType([StructField(\"class\", StringType(), False),\n StructField(\"description\", StringType(), False),\n StructField(\"probability\", FloatType(), False)]))\n decodeUDF = udf(decode, decodedSchema)\n interim_output = self._getIntermediateOutputCol()\n return (\n df.withColumn(self.getOutputCol(), decodeUDF(df[interim_output]))\n .drop(interim_output)\n )\n\n def _getIntermediateOutputCol(self):\n return \"__tmp_\" + self.getOutputCol()\n\n\n# TODO: give an option to take off multiple layers so it can be used in tuning\n# (could be the name of the layer or int for how many to take off).\nclass DeepImageFeaturizer(Transformer, HasInputCol, HasOutputCol):\n \"\"\"\n Applies the model specified by its popular name, with its prediction layer(s) chopped off,\n to the image column in DataFrame. The output is a MLlib Vector so that DeepImageFeaturizer\n can be used in a MLlib Pipeline.\n \"\"\"\n\n modelName = Param(Params._dummy(), \"modelName\", \"A deep learning model name\",\n typeConverter=SparkDLTypeConverters.supportedNameConverter(SUPPORTED_MODELS))\n\n @keyword_only\n def __init__(self, inputCol=None, outputCol=None, modelName=None):\n \"\"\"\n __init__(self, inputCol=None, outputCol=None, modelName=None)\n \"\"\"\n super(DeepImageFeaturizer, self).__init__()\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n @keyword_only\n def setParams(self, inputCol=None, outputCol=None, modelName=None):\n \"\"\"\n setParams(self, inputCol=None, outputCol=None, modelName=None)\n \"\"\"\n kwargs = self._input_kwargs\n self._set(**kwargs)\n return self\n\n def setModelName(self, value):\n return self._set(modelName=value)\n\n def getModelName(self):\n return self.getOrDefault(self.modelName)\n\n def _transform(self, dataset):\n transformer = _NamedImageTransformer(inputCol=self.getInputCol(),\n outputCol=self.getOutputCol(),\n modelName=self.getModelName(), featurize=True)\n return transformer.transform(dataset)\n\n\nclass _NamedImageTransformer(Transformer, HasInputCol, HasOutputCol):\n \"\"\"\n For internal use only. NamedImagePredictor and NamedImageFeaturizer are the recommended classes\n to use.\n\n Applies the model specified by its popular name to the image column in DataFrame. There are\n two output modes: predictions or the featurization from the model. In either case the output\n is a MLlib Vector.\n \"\"\"\n\n modelName = Param(Params._dummy(), \"modelName\", \"A deep learning model name\",\n typeConverter=SparkDLTypeConverters.supportedNameConverter(SUPPORTED_MODELS))\n featurize = Param(Params._dummy(), \"featurize\",\n \"If true, output features. If false, output predictions. Either way the output is a vector.\",\n typeConverter=TypeConverters.toBoolean)\n\n @keyword_only\n def __init__(self, inputCol=None, outputCol=None, modelName=None, featurize=False):\n \"\"\"\n __init__(self, inputCol=None, outputCol=None, modelName=None, featurize=False)\n \"\"\"\n super(_NamedImageTransformer, self).__init__()\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n self._inputTensorName = None\n self._outputTensorName = None\n self._outputMode = None\n\n @keyword_only\n def setParams(self, inputCol=None, outputCol=None, modelName=None, featurize=False):\n \"\"\"\n setParams(self, inputCol=None, outputCol=None, modelName=None, featurize=False)\n \"\"\"\n kwargs = self._input_kwargs\n self._set(**kwargs)\n return self\n\n def setModelName(self, value):\n return self._set(modelName=value)\n\n def getModelName(self):\n return self.getOrDefault(self.modelName)\n\n def setFeaturize(self, value):\n return self._set(featurize=value)\n\n def getFeaturize(self):\n return self.getOrDefault(self.featurize)\n\n def _transform(self, dataset):\n modelGraphSpec = _buildTFGraphForName(self.getModelName(), self.getFeaturize())\n inputCol = self.getInputCol()\n resizedCol = \"__sdl_imagesResized\"\n tfTransformer = TFImageTransformer(inputCol=resizedCol,\n outputCol=self.getOutputCol(),\n graph=modelGraphSpec[\"graph\"],\n inputTensor=modelGraphSpec[\"inputTensorName\"],\n outputTensor=modelGraphSpec[\"outputTensorName\"],\n outputMode=modelGraphSpec[\"outputMode\"])\n resizeUdf = resizeImage(modelGraphSpec[\"inputTensorSize\"])\n result = tfTransformer.transform(dataset.withColumn(resizedCol, resizeUdf(inputCol)))\n return result.drop(resizedCol)\n\n\ndef _buildTFGraphForName(name, featurize):\n if name == \"InceptionV3\":\n modelData = _buildInceptionV3Session(featurize)\n else:\n raise ValueError(\"%s is not a supported model. Supported models: %s\" % name,\n str(SUPPORTED_MODELS))\n\n sess = modelData[\"session\"]\n outputTensorName = modelData[\"outputTensorName\"]\n graph = tfx.strip_and_freeze_until([outputTensorName], sess.graph, sess, return_graph=True)\n\n modelData[\"graph\"] = graph\n return modelData\n\n\ndef _buildInceptionV3Session(featurize):\n sess = tf.Session()\n with sess.as_default():\n K.set_learning_phase(0)\n inputImage = imageInputPlaceholder(nChannels=3)\n preprocessed = inception_v3.preprocess_input(inputImage)\n model = InceptionV3(input_tensor=preprocessed, weights=\"imagenet\",\n include_top=(not featurize))\n return dict(inputTensorName=inputImage.name,\n outputTensorName=model.output.name,\n session=sess,\n inputTensorSize=InceptionV3Constants.INPUT_SHAPE,\n outputMode=\"vector\")\n","sub_path":"python/sparkdl/transformers/named_image.py","file_name":"named_image.py","file_ext":"py","file_size_in_byte":10632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"417688785","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport shutil\nimport argparse\nimport os\nimport json\nimport random\nimport warnings\nfrom termcolor import colored\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.utils.data as data\n\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import ModelCheckpoint, Timer\nfrom ignite.metrics import RunningAverage\nfrom tensorboardX import SummaryWriter\n\nimport imgaug # https://github.com/aleju/imgaug\nfrom imgaug import augmenters as iaa\n\nimport misc\nimport dataset\nfrom net import DenseNet\nfrom config import Config\nfrom focal_loss import FocalLoss\n####\nclass Trainer(Config):\n ####\n def view_dataset(self, mode='train'):\n train_pairs, valid_pairs = dataset.prepare_data_CANCER()\n if mode == 'train':\n train_augmentors = self.train_augmentors()\n ds = dataset.DatasetSerial(train_pairs,\n shape_augs=iaa.Sequential(train_augmentors[0]),\n input_augs=iaa.Sequential(train_augmentors[1]))\n else:\n infer_augmentors = self.infer_augmentors()\n ds = dataset.DatasetSerial(valid_pairs,\n shape_augs=iaa.Sequential(infer_augmentors))\n dataset.visualize(ds, 1)\n return\n ####\n def train_step(self, net, batch, optimizer, device):\n net.train() # train mode\n\n imgs, true = batch # batch is NHWC\n imgs = imgs.permute(0, 3, 1, 2) # to NCHW\n\n\n\n # push data to GPUs and convert to float32\n imgs = imgs.to(device).float()\n true = true.to(device).long()# not one-hot\n\n # -----------------------------------------------------------\n net.zero_grad() # not rnn so not accumulate\n\n logit = net(imgs) # forward\n prob = F.softmax(logit, dim=1)\n\n # has built-int log softmax so accept logit\n # true = torch.squeeze(true)\n focal_loss=FocalLoss(gamma=0.5)\n\n loss = focal_loss(logit, true)\n\n prob = prob.permute(0, 2, 3, 1) # to NHWC\n pred = torch.argmax(prob, dim=-1)\n\n # with ignore index at 0\n foc = (true > 0).type(torch.float32)\n acc = (pred == true).type(torch.float32) * foc\n acc = torch.sum(acc) / torch.sum(foc)\n\n # gradient update\n loss.backward()\n optimizer.step()\n\n # -----------------------------------------------------------\n return dict(loss=loss.item(), \n acc=acc.item())\n ####\n # def infer_step(self, net, batch, device):\n # net.eval() # infer mode\n #\n # imgs, true = batch # batch is NHWC\n # imgs = imgs.permute(0, 3, 1, 2) # to NCHW\n #\n # # push data to GPUs and convert to float32\n # imgs = imgs.to(device).float()\n #\n # # -----------------------------------------------------------\n # with torch.no_grad(): # dont compute gradient\n # logit = net(imgs) # forward\n # prob = nn.functional.softmax(logit, dim=1)\n # prob = prob.permute(0, 2, 3, 1) # to NHWC\n # return dict(prob=prob.cpu().numpy(),\n # true=true.numpy())\n ####\n def run_once(self):\n \n log_dir = self.log_dir\n\n misc.check_manual_seed(self.seed)\n train_pairs, valid_pairs = dataset.prepare_data_CANCER()\n print(len(train_pairs))\n # --------------------------- Dataloader\n\n train_augmentors = self.train_augmentors()\n train_dataset = dataset.DatasetSerial(train_pairs[:],\n shape_augs=iaa.Sequential(train_augmentors[0]),\n input_augs=iaa.Sequential(train_augmentors[1]))\n\n infer_augmentors = self.infer_augmentors()\n infer_dataset = dataset.DatasetSerial(valid_pairs[:],\n shape_augs=iaa.Sequential(infer_augmentors))\n\n train_loader = data.DataLoader(train_dataset, \n num_workers=self.nr_procs_train, \n batch_size=self.train_batch_size, \n shuffle=True, drop_last=True)\n\n valid_loader = data.DataLoader(infer_dataset, \n num_workers=self.nr_procs_valid, \n batch_size=self.infer_batch_size, \n shuffle=True, drop_last=False)\n\n # --------------------------- Training Sequence\n\n if self.logging:\n misc.check_log_dir(log_dir)\n\n device = 'cuda'\n\n # networks\n input_chs = 3 \n net = DenseNet(input_chs, self.nr_classes)\n net = torch.nn.DataParallel(net).to(device)\n # print(net)\n\n # optimizers\n optimizer = optim.Adam(net.parameters(), lr=self.init_lr)\n scheduler = optim.lr_scheduler.StepLR(optimizer, self.lr_steps)\n\n # load pre-trained models\n if self.load_network:\n saved_state = torch.load(self.save_net_path)\n net.load_state_dict(saved_state)\n #\n trainer = Engine(lambda engine, batch: self.train_step(net, batch, optimizer, 'cuda'))\n inferer = Engine(lambda engine, batch: self.infer_step(net, batch, 'cuda'))\n\n train_output = ['loss', 'acc']\n infer_output = ['prob', 'true']\n ##\n\n if self.logging:\n checkpoint_handler = ModelCheckpoint(log_dir, self.chkpts_prefix, \n save_interval=1, n_saved=120, require_empty=False)\n # adding handlers using `trainer.add_event_handler` method API\n trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler,\n to_save={'net': net}) \n\n timer = Timer(average=True)\n timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)\n timer.attach(inferer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,\n pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)\n\n # attach running average metrics computation\n # decay of EMA to 0.95 to match tensorpack default\n RunningAverage(alpha=0.95, output_transform=lambda x: x['loss']).attach(trainer, 'loss')\n RunningAverage(alpha=0.95, output_transform=lambda x: x['acc']).attach(trainer, 'acc')\n\n # attach progress bar\n pbar = ProgressBar(persist=True)\n pbar.attach(trainer, metric_names=['loss'])\n pbar.attach(inferer)\n\n # adding handlers using `trainer.on` decorator API\n @trainer.on(Events.EXCEPTION_RAISED)\n def handle_exception(engine, e):\n if isinstance(e, KeyboardInterrupt) and (engine.state.iteration > 1):\n engine.terminate()\n warnings.warn('KeyboardInterrupt caught. Exiting gracefully.')\n checkpoint_handler(engine, {'net_exception': net})\n else:\n raise e\n\n # writer for tensorboard logging\n if self.logging:\n writer = SummaryWriter(log_dir=log_dir)\n json_log_file = log_dir + '/stats.json'\n with open(json_log_file, 'w') as json_file:\n json.dump({}, json_file) # create empty file\n\n @trainer.on(Events.EPOCH_STARTED)\n def log_lrs(engine):\n if self.logging:\n lr = float(optimizer.param_groups[0]['lr'])\n writer.add_scalar(\"lr\", lr, engine.state.epoch)\n # advance scheduler clock\n scheduler.step()\n\n ####\n def update_logs(output, epoch, prefix, color):\n # print values and convert\n max_length = len(max(output.keys(), key=len))\n for metric in output:\n key = colored(prefix + '-' + metric.ljust(max_length), color)\n print('------%s : ' % key, end='')\n print('%0.7f' % output[metric])\n if 'train' in prefix:\n lr = float(optimizer.param_groups[0]['lr'])\n key = colored(prefix + '-' + 'lr'.ljust(max_length), color)\n print('------%s : %0.7f' % (key, lr))\n\n if not self.logging:\n return\n\n # create stat dicts\n stat_dict = {}\n for metric in output:\n metric_value = output[metric] \n stat_dict['%s-%s' % (prefix, metric)] = metric_value\n\n # json stat log file, update and overwrite\n with open(json_log_file) as json_file:\n json_data = json.load(json_file)\n\n current_epoch = str(epoch)\n if current_epoch in json_data:\n old_stat_dict = json_data[current_epoch]\n stat_dict.update(old_stat_dict)\n current_epoch_dict = {current_epoch : stat_dict}\n json_data.update(current_epoch_dict)\n\n with open(json_log_file, 'w') as json_file:\n json.dump(json_data, json_file)\n\n # log values to tensorboard\n for metric in output:\n writer.add_scalar(prefix + '-' + metric, output[metric], current_epoch)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_train_running_results(engine):\n \"\"\"\n running training measurement\n \"\"\"\n training_ema_output = engine.state.metrics #\n update_logs(training_ema_output, engine.state.epoch, prefix='train-ema', color='green')\n\n ####\n def get_init_accumulator(output_names):\n return {metric : [] for metric in output_names}\n\n import cv2\n def process_accumulated_output(output):\n def uneven_seq_to_np(seq, batch_size=self.infer_batch_size):\n if self.infer_batch_size == 1:\n return np.squeeze(seq)\n \n item_count = batch_size * (len(seq) - 1) + len(seq[-1])\n cat_array = np.zeros((item_count,) + seq[0][0].shape, seq[0].dtype)\n for idx in range(0, len(seq)-1):\n cat_array[idx * batch_size : \n (idx+1) * batch_size] = seq[idx] \n cat_array[(idx+1) * batch_size:] = seq[-1]\n return cat_array\n #\n prob = uneven_seq_to_np(output['prob'])\n true = uneven_seq_to_np(output['true'])\n\n # cmap = plt.get_cmap('jet')\n # epi = prob[...,1]\n # epi = (cmap(epi) * 255.0).astype('uint8')\n # cv2.imwrite('sample.png', cv2.cvtColor(epi, cv2.COLOR_RGB2BGR))\n\n pred = np.argmax(prob, axis=-1)\n true = np.squeeze(true)\n\n # deal with ignore index\n pred = pred.flatten()\n true = true.flatten()\n pred = pred[true != 0] - 1\n true = true[true != 0] - 1\n\n acc = np.mean(pred == true)\n inter = (pred * true).sum()\n total = (pred + true).sum()\n dice = 2 * inter / total\n #\n proc_output = dict(acc=acc, dice=dice)\n return proc_output\n\n # @trainer.on(Events.EPOCH_COMPLETED)\n # def infer_valid(engine):\n # \"\"\"\n # inference measurement\n # \"\"\"\n # inferer.accumulator = get_init_accumulator(infer_output)\n # inferer.run(valid_loader)\n # output_stat = process_accumulated_output(inferer.accumulator)\n # update_logs(output_stat, engine.state.epoch, prefix='valid', color='red')\n\n @inferer.on(Events.ITERATION_COMPLETED)\n def accumulate_outputs(engine):\n batch_output = engine.state.output\n for key, item in batch_output.items():\n engine.accumulator[key].extend([item])\n ###\n #Setup is done. Now let's run the training\n trainer.run(train_loader, self.nr_epochs)\n return\n ####\n\n####\nif __name__ == '__main__':\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = \"0,1\"\n trainer = Trainer()\n trainer.run_once()\n # trainer.view_dataset()","sub_path":"segmentation/train_focal.py","file_name":"train_focal.py","file_ext":"py","file_size_in_byte":12387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"650176327","text":"import asyncio\nimport datetime\nimport json\nimport logging\nimport os\nimport re\nfrom datetime import timezone, timedelta, datetime\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import *\nfrom urllib import request\nfrom zipstream import AioZipStream\n\nimport aiofiles\nimport telethon\nfrom aiodav import Client\nfrom telethon.events import NewMessage, CallbackQuery\nfrom telethon.tl.custom import Message, Button, MessageButton\n\nimport upload\nfrom download import download_url\n\nlogging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',\n level=logging.WARNING)\n\nif __name__ == '__main__':\n\n loop = asyncio.get_event_loop()\n\n admin_id: str\n api_id: int\n api_hash: str\n bot_token: str\n auth_users: dict\n zipping: dict\n users_channel: int\n users_post_id: int\n\n\n async def load_env():\n global admin_id, api_id, api_hash, bot_token, auth_users, users_channel, users_post_id\n if \"env.json\" in os.listdir('.'):\n with open('env.json', 'r') as env_doc:\n env: dict = json.load(env_doc)\n admin_id = env['ADMIN_ID']\n api_id = int(env['API_ID'])\n api_hash = env['API_HASH']\n bot_token = env['BOT_TOKEN']\n users_channel = int(env['USERS_CHANNEL'])\n users_post_id = int(env['USERS_POST_ID'])\n else:\n admin_id = os.getenv('ADMIN_ID')\n api_id = int(os.getenv('API_ID'))\n api_hash = os.getenv('API_HASH')\n bot_token = os.getenv('BOT_TOKEN')\n users_channel = int(os.getenv('USERS_CHANNEL'))\n users_post_id = int(os.getenv('USERS_POST_ID'))\n\n\n async def load_users():\n global auth_users, bot, users_channel, users_post_id\n users_message: Message = await bot.get_messages(users_channel, ids=users_post_id)\n file = await bot.download_media(users_message, 'users/users.json')\n with open(file, 'r') as doc:\n auth_users = json.load(doc)\n\n\n asyncio.get_event_loop().run_until_complete(load_env())\n bot = telethon.TelegramClient('bot', api_id=api_id, api_hash=api_hash).start(bot_token=bot_token)\n asyncio.get_event_loop().run_until_complete(load_users())\n up_lock_dict = {}\n down_lock_dict = {}\n tasks_dict = {}\n zipping = {}\n upload_path = '/TG Uploads/'\n slow_time = 5\n\n\n # region users\n\n @bot.on(NewMessage(pattern='/start'))\n async def start(event: NewMessage.Event):\n chatter = str(event.chat_id)\n if chatter not in auth_users.keys() and chatter != admin_id:\n return\n if 'username' not in auth_users[chatter].keys():\n await event.respond('Please type /login')\n return\n await event.respond('Send me a file and I will upload it to your owncloud server')\n\n\n @bot.on(NewMessage(pattern=r'/login'))\n async def login(event: Union[NewMessage, Message]):\n chatter = str(event.chat_id)\n if chatter not in auth_users.keys() and chatter != admin_id:\n return\n\n async with bot.conversation(event.chat_id) as conv:\n try:\n await conv.send_message('Please select your NextCLoud server\\n'\n '/UCLV\\n'\n '/UO')\n resp: Message = await conv.get_response(timeout=60)\n if resp.raw_text == '/UCLV':\n auth_users[chatter]['cloud'] = \"https://nube.uclv.cu\"\n elif resp.raw_text == '/UO':\n auth_users[chatter]['cloud'] = \"https://nube.uo.edu.cu\"\n else:\n await conv.send_message('Invalid server, please try again')\n return\n await conv.send_message('Please send your nextcloud username')\n resp = await conv.get_response(timeout=60)\n auth_users[chatter]['username'] = resp.raw_text\n await conv.send_message('Now send your password please')\n resp = await conv.get_response(timeout=60)\n auth_users[chatter]['password'] = resp.raw_text\n await save_auth_users()\n await conv.send_message('User saved correctly, you may start using the bot')\n except:\n await conv.send_message('Login failed')\n\n\n @bot.on(NewMessage())\n async def file_handler(event: Union[NewMessage.Event, Message]):\n chatter = str(event.chat_id)\n if not event.file or event.sticker or event.voice or zipping.get(chatter):\n return\n if chatter not in auth_users.keys():\n return\n if not auth_users[chatter]['username']:\n await event.respond('Please type /login')\n return\n user_tasks = get_user_task_dict(chatter)\n reply: Message = await event.reply('File download queued')\n button = [Button.inline('Cancel', b'cancel_task=' + str(reply.id).encode())]\n await reply.edit('File download queued', buttons=button)\n user_tasks[reply.id] = loop.create_task(file_task(event, reply, chatter, button))\n try:\n await user_tasks[reply.id]\n finally:\n pass\n\n\n @bot.on(NewMessage(pattern=r'/link\\s([^\\s]+)(?:\\s+\\|\\s+)?([^\\s].*)?'))\n async def link_handler(event: Union[NewMessage, Message]):\n chatter = str(event.chat_id)\n if chatter not in auth_users.keys() or zipping.get(chatter):\n raise\n if not auth_users[chatter]['username']:\n await event.respond('Please type /login')\n raise\n user_tasks = get_user_task_dict(chatter)\n url = event.pattern_match.group(1)\n filename = None\n try:\n if event.pattern_match.group(2).strip():\n filename = str(event.pattern_match.group(2)).strip()\n except:\n filename = None\n reply: Message = await event.respond(f'{filename if filename else url} download queued')\n button = [Button.inline('Cancel', b'cancel_task=' + str(reply.id).encode())]\n await reply.edit(f'{filename if filename else url} download queued', buttons=button)\n user_tasks[reply.id] = loop.create_task(link_task(event, reply, chatter, url, filename, button))\n try:\n await user_tasks[reply.id]\n finally:\n pass\n\n\n @bot.on(NewMessage(pattern=r'/zip\\s(.+)'))\n async def zip_handler(event: Union[NewMessage.Event, Message]):\n global zipping\n chatter = str(event.chat_id)\n if chatter not in auth_users.keys() or zipping.get(chatter):\n raise\n if not auth_users[chatter]['username']:\n await event.respond('Please type /login')\n raise\n user_tasks = get_user_task_dict(chatter)\n zip_name = event.pattern_match.group(1)\n folder_path: Path = get_down_path(chatter).joinpath(zip_name)\n zipping[chatter] = True\n try:\n async with bot.conversation(event.chat_id) as conv:\n r: Message = await conv.send_message('Start sending me files and i\\'ll zip and upload them'\n '\\n/stop to start zipping\\n/cancel to cancel', reply_to=event)\n m: Message = await conv.get_response()\n m_download_list: List[Message] = []\n while not m.raw_text.startswith(('/cancel', '/stop')):\n if not m.file or m.sticker or m.voice:\n m = await conv.get_response()\n continue\n m_download_list.append(m)\n m = await conv.get_response()\n zipping[chatter] = False\n if m.raw_text.startswith('/cancel'):\n await conv.send_message('Ok, cancelled', reply_to=m)\n return\n except:\n zipping[chatter] = False\n raise\n try:\n button = [Button.inline('Cancel', b'cancel_task=' + str(r.id).encode())]\n await r.edit(f'{zip_name} download queued', buttons=button)\n user_tasks[r.id] = loop.create_task(\n zip_task(m_download_list, folder_path, r, zip_name, chatter, event, button))\n try:\n await user_tasks[r.id]\n finally:\n pass\n except:\n raise\n\n @bot.on(CallbackQuery(data=re.compile(b'cancel_task=(\\d+)')))\n async def cancel_handler(event: CallbackQuery):\n chatter = str(event.chat_id)\n task_to_cancel = int(event.data_match.group(1).decode())\n user_tasks = get_user_task_dict(chatter)\n user_tasks[task_to_cancel].cancel()\n await bot.edit_message(int(chatter), message=task_to_cancel, text='Cancelled')\n\n\n # endregion\n\n # region admin\n\n @bot.on(NewMessage(pattern=r'/add_user_(-?\\d+)'))\n async def add_user(event: Union[NewMessage.Event, Message]):\n chatter = str(event.chat_id)\n if chatter != admin_id:\n return\n user = event.pattern_match.group(1)\n auth_users[user] = {}\n await save_auth_users()\n await event.respond('User added')\n\n\n @bot.on(NewMessage(pattern=r'/del_user_(-?\\d+)'))\n async def del_user(event: Union[NewMessage.Event, Message]):\n chatter = str(event.chat_id)\n if chatter != admin_id:\n return\n user = event.pattern_match.group(1);\n auth_users.pop(user)\n await save_auth_users()\n await event.respond('User deleted')\n\n\n @bot.on(NewMessage(pattern='/broadcast'))\n async def broadcast(event: Union[NewMessage, Message]):\n chatter = str(event.chat_id)\n if chatter != admin_id or event.reply_to_msg_id is None:\n return\n bc: Message = await event.get_reply_message()\n for user in auth_users.keys():\n try:\n if user != admin_id:\n await bot.send_message(int(user), message=bc)\n except:\n continue\n\n\n # endregion\n\n # region tasks\n\n\n async def zip_task(message_download_list: List[Message], folder_path: Path, reply_message: Message, zip_name: str,\n chatter: str, event, button):\n\n files: List[{}] = []\n for mes in message_download_list:\n if not mes.file.name:\n filename = str(message_download_list.index(mes)) + mes.file.ext\n else:\n filename = mes.file.name\n async with get_down_lock(chatter):\n files.append({'file': await tg_download(mes, reply_message, filename=filename,\n download_path=folder_path, button=button)})\n zip_path = str(folder_path) + '.zip'\n await reply_message.edit('Zipping...', buttons=button)\n await zip_async(zip_path, files, slow(slow_time)(\n partial(refresh_progress_status, zip_name, reply_message, 'Zipped', button)))\n await reply_message.edit(f'{zip_name} upload queued', buttons=button)\n async with get_up_lock(chatter):\n await cloud_upload(zip_path, reply_message, event, button=button)\n\n\n async def file_task(event, reply_message, chatter, button):\n async with get_down_lock(chatter):\n try:\n downloaded_file = await tg_download(event=event, reply=reply_message,\n download_path=get_down_path(chatter), button=button)\n except:\n return\n await reply_message.edit(f'{os.path.basename(downloaded_file)} upload queued', buttons=button)\n async with get_up_lock(chatter):\n try:\n await cloud_upload(downloaded_file, reply_message, event, button)\n except Exception as exc:\n raise exc\n\n\n async def link_task(event, reply_message, chatter, url, filename, button):\n async with get_down_lock(chatter):\n filepath = await url_download(reply_message, url, filename, get_down_path(chatter), button=button)\n await reply_message.edit(f'{os.path.basename(filepath)} upload queued', buttons=button)\n async with get_up_lock(chatter):\n await cloud_upload(filepath, reply_message, event, button=button)\n\n\n # endregion\n\n # region funcs\n\n async def tg_download(event: Union[NewMessage.Event, Message], reply, filename: str = None,\n download_path: Path = Path('./downloads'), button=None) -> str:\n\n if not filename:\n if not event.file.name:\n async with bot.conversation(event.chat_id) as conv:\n s: Message = await conv.send_message('File has no filename. Please Provide one.'\n '\\nNote that extension is not needed.'\n '\\nThis option expires in 1 min.'\n '\\nYou can cancel using /cancel.')\n e = Exception()\n try:\n resp: Message = await conv.get_response(s, timeout=60)\n if resp.raw_text == '/cancel':\n await s.edit('Cancelled')\n e = Exception('que loco')\n raise\n else:\n filename = f'{resp.raw_text}{event.file.ext}'\n await s.edit(f'File name set to {filename}')\n except Exception as efe:\n if efe is e:\n raise\n await s.edit('File name was never provided. File could not be processed.')\n raise\n else:\n filename = event.file.name\n os.makedirs(download_path, exist_ok=True)\n if filename in os.listdir(download_path):\n await reply.edit(f'{filename} already downloaded', buttons=button)\n return str(download_path.joinpath(filename))\n else:\n await reply.edit(f'{filename} being downloaded', buttons=button)\n\n try:\n filepath = await event.download_media(download_path, progress_callback=slow(slow_time)(\n partial(refresh_progress_status, filename, reply, 'Downloaded', button)))\n await reply.edit(f'{filename} downloaded', buttons=button)\n except Exception as exc:\n print(exc)\n await reply.edit(f'{filename} could not be downloaded\\n{exc}')\n raise\n return filepath\n\n\n async def cloud_upload(filepath: str, reply: Message, event: Union[NewMessage.Event, Message], button=None):\n filename = os.path.basename(filepath)\n uppath = upload_path + filename\n user = auth_users[str(event.chat_id)]\n await reply.edit(f'{filename} being uploaded', buttons=button)\n\n try:\n async with Client(f'{user[\"cloud\"]}/remote.php/webdav', login=user['username'],\n password=user['password'], chunk_size=1024 * 1024) as cloud_client:\n if not await cloud_client.exists(upload_path):\n await cloud_client.create_directory(upload_path)\n file_cloud_name = filename\n while await cloud_client.exists(upload_path + file_cloud_name):\n uppath += 'copy'\n file_cloud_name += 'copy'\n async with aiofiles.open(filepath, 'rb') as file:\n await upload.upload_to(cloud_client, path=uppath, buffer=file,\n buffer_size=os.path.getsize(filepath),\n progress=slow(slow_time)(\n partial(refresh_progress_status, filename, reply, 'Uploaded', button)))\n await reply.edit(f'{filename} uploaded correctly')\n except Exception as exc:\n print(exc)\n await reply.edit(f'{filename} could not be uploaded')\n raise exc\n\n\n async def url_download(reply, url: str, filename: str = None, download_path: Path = Path('./downloads'),\n button=None) -> str:\n try:\n req = request.Request(url)\n req.add_header(\"User-Agent\",\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0\")\n try:\n httpResponse = request.urlopen(req)\n except:\n await reply.edit('An error occurred accessing the url')\n raise\n path = Path(httpResponse.url)\n if not filename:\n if str(httpResponse.status).startswith('2'):\n if not path.name or not path.suffix:\n reply.edit('File has no file name please provide one in the link upload request\\n'\n '/link | ')\n raise Exception('No file name')\n else:\n filename = str(path.name)\n else:\n await reply.edit(f'Request error with code {httpResponse.status}.')\n raise Exception(f'Request error with code {httpResponse.status}.')\n if '.' not in filename:\n filename = filename + ''.join(path.suffixes)\n file_size = httpResponse.length\n if not file_size:\n await reply.edit(\"Invalid file, has no filesize\")\n raise Exception(\"Invalid file, has no filesize\")\n await reply.edit(f'Downloading {filename}', buttons=button)\n async with aiofiles.open(download_path.joinpath(filename), 'wb') as o_file:\n await download_url(o_file, url, file_size,\n callback=slow(slow_time)(\n partial(refresh_progress_status, filename, reply, 'Downloaded', button)))\n await reply.edit(\"Link downloaded\", buttons=button)\n return str(download_path.joinpath(filename))\n except Exception as e:\n await reply.respond(str(e))\n await reply.edit('Cannot access url')\n raise\n\n\n async def save_auth_users():\n with open('users/users.json', 'w') as doc:\n json.dump(auth_users, doc)\n await bot.edit_message(entity=users_channel, message=users_post_id, file='users/users.json')\n\n\n def get_up_lock(user: str) -> asyncio.Lock:\n if not up_lock_dict.get(user):\n up_lock_dict[user] = asyncio.Lock()\n return up_lock_dict[user]\n\n\n def get_down_lock(user: str) -> asyncio.Lock:\n if not down_lock_dict.get(user):\n down_lock_dict[user] = asyncio.Lock()\n return down_lock_dict[user]\n\n\n def get_user_task_dict(user: str):\n if not tasks_dict.get(user):\n tasks_dict[user] = {}\n return tasks_dict[user]\n\n\n def get_down_path(user: str) -> Path:\n os.makedirs(f'./downloads/{user}', exist_ok=True)\n return Path(f'./downloads/{user}/')\n\n\n def slow(secs):\n def dec(f):\n t = {'last_update': datetime.now(timezone.utc) - timedelta(minutes=1)}\n\n async def wrapper(*args, **kwargs):\n now = datetime.now(timezone.utc)\n if now - t['last_update'] < timedelta(seconds=secs):\n return\n t['last_update'] = now\n return await f(*args, **kwargs)\n\n return wrapper\n\n return dec\n\n\n async def refresh_progress_status(name: str, reply: Message, operation: str, button: List, transferred_bytes: int,\n total_bytes: int):\n try:\n await reply.edit(\n f\"{name}:\\n\"\n f\"{operation} {sizeof_fmt(transferred_bytes)} out of {sizeof_fmt(total_bytes)}\"\n f\"(\\n{round(transferred_bytes * 100 / total_bytes, 2)}%)\", buttons=button)\n finally:\n return\n\n\n async def zip_async(zipname, files, callback=None):\n chunk_size = 32 * 1024\n aio_zip = AioZipStream(files, chunksize=chunk_size)\n size = 0\n current = 0\n for file in files:\n size += os.path.getsize(file['file'])\n async with aiofiles.open(zipname, mode='wb') as z:\n async for chunk in aio_zip.stream():\n if callback:\n current += len(chunk)\n await callback(current, size)\n await z.write(chunk)\n\n\n def sizeof_fmt(num, suffix='B'):\n for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.00\n return \"%.1f%s%s\" % (num, 'Yi', suffix)\n\n\n # endregion\n\n @bot.on(NewMessage(pattern='/save'))\n async def savexd(event: Union[Message, NewMessage]):\n c_id: int = event.chat_id\n m_id: int = event.reply_to_msg_id\n await event.respond(f'{c_id}, {m_id}')\n\n\n loop.run_forever()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"522334641","text":"# OSM devops/charms - Ansible charm inside OSM devops\n#\n# Copyright 2017-2018 Universidad Carlos III de Madrid\n# Copyright 2018 Altran\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom charmhelpers.core.hookenv import (\n action_get,\n action_fail,\n action_set,\n config,\n status_set,\n)\n\nfrom charms.reactive import (\n remove_state as remove_flag,\n set_state as set_flag,\n when,\n)\nimport charms.sshproxy\n\nfrom subprocess import (\n Popen,\n CalledProcessError,\n PIPE,\n)\n\n#from charms.ansible import apply_playbook\nimport os, fnmatch\nimport subprocess\n\ncfg = config()\n\n\n# Sets the status of the charm to show in OSM: configured\n@when('config.changed')\ndef config_changed():\n set_flag('ansible-charm.configured')\n status_set('active', 'ready!')\n return\n\n\n# Edits ansible config files and executes ansible-playbook\n@when('ansible-charm.configured')\n@when('actions.ansible-playbook')\ndef ansible_playbook():\n try:\n # Retrieve the ssh parameter\n cfg = config()\n # edit ansible hosts file with the VNF parameters\n h = open(\"/etc/ansible/hosts\", \"wt\")\n h.write(\"[test]\\n\")\n h1 = \"{} ansible_connection=ssh ansible_ssh_user={} ansible_ssh_pass={} ansible_python_interpreter=/usr/bin/python3\\n\".format(cfg['ssh-hostname'],cfg['ssh-username'],cfg['ssh-password'])\n h.write(h1)\n h.close()\n # edit ansible config to enable ssh connection with th VNF\n c = open(\"/etc/ansible/ansible.cfg\", \"wt\")\n c.write(\"[defaults]\\n\")\n c.write(\"host_key_checking = False\\n\")\n c.close()\n # execute the ansible playbook\n path = find('playbook.yaml','/var/lib/juju/agents/')\n call = ['ansible-playbook', path]\n subprocess.check_call(call)\n except Exception as e:\n action_fail('command failed: {}, errors: {}'.format(e, e.output))\n remove_flag('actions.ansible-playbook')\n return\n finally:\n remove_flag('actions.ansible-playbook')\n\n\n# Function to find the playbook path\ndef find(pattern, path):\n result = ''\n for root, dirs, files in os.walk(path):\n for name in files:\n if fnmatch.fnmatch(name, pattern):\n result = os.path.join(root, name)\n return result\n","sub_path":"charms/layers/ansible-charm/reactive/ansible_charm.py","file_name":"ansible_charm.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"264998589","text":"from math import sqrt\n\ndef isPrime(n):\n if n <= 3:\n return n >= 2\n if n % 2 == 0 or n % 3 == 0:\n return False\n for i in range(5, int(n ** 0.5) + 1, 6):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n return True\n\nprimes = [2, 3]\ncomposites = set()\nsearch = 8000\n\nfor i in range(5, search, 2):\n if isPrime(i):\n primes.append(i)\n\nfor i in primes:\n for j in range(int(sqrt(search))):\n composites.add(i + 2 * j * j)\n\nfor i in range(3, search, 2):\n if i not in composites:\n print(i)\n break\n","sub_path":"46.py","file_name":"46.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"605645650","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ncap = cv2.VideoCapture(0)\nface_cascade = cv2.CascadeClassifier(\"haar-cascade-files-master/haarcascade_frontalface_alt.xml\")\n\nwhile True:\n ret, frame = cap.read()\n if ret ==False:\n continue\n faces = face_cascade.detectMultiScale(frame,1.3,5)\n\n key_pressed = cv2.waitKey(1) & 0xFF\n if key_pressed == ord('q'):\n break\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n cv2.imshow(\"Video Frame\", frame)\ncap.release()\ncv2.destroyAllWindows()","sub_path":"justfacedetection.py","file_name":"justfacedetection.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"528847046","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = \"https://www.imdb.com/chart/top\"\r\nresponse = requests.get(url)\r\nhtml_icerigi = response.content\r\nsoup = BeautifulSoup(html_icerigi,\"html.parser\")\r\n\r\nx = float(input(\"Rating: \"))\r\n\r\nbasliklar = soup.find_all(\"td\",{\"class\":\"titleColumn\"})\r\nratingler = soup.find_all(\"td\",{\"class\":\"ratingColumn imdbRating\"})\r\n\r\nfor baslik,rating in zip(basliklar,ratingler):\r\n baslik = baslik.text\r\n rating = rating.text\r\n\r\n baslik = baslik.strip()\r\n baslik = baslik.replace(\"\\n\",\"\")\r\n\r\n rating = rating.strip()\r\n rating = rating.replace(\"\\n\",\"\")\r\n\r\n if(float(rating) > x):\r\n print(\"------------------------------------------------------\")\r\n print(\"Film: {} \\nFilmin Ratingi: {}\".format(baslik,rating))","sub_path":"PyQt5/imbd.py","file_name":"imbd.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47677086","text":"import cv2\nimport numpy as np \n\ncounter = 0\npos1=()\npos2=()\ncropped = np.zeros((), np.uint8,)\nflag =0\n\ndef lol(self):\n pass\n\ndef mouse(event,x,y,z,w):\n \n global counter, pos1,pos2\n # pixel = frame[y,x]\n \n if pos1 and pos2:\n if event == cv2.EVENT_LBUTTONDOWN:\n global flag\n pixel=cropped[y,x]\n print(pixel)\n cv2.setTrackbarPos('Lower_Hue','Control',pixel[0]-25)\n cv2.setTrackbarPos('Lower_Saturation','Control',pixel[1]-50)\n cv2.setTrackbarPos('Lower_Value','Control',pixel[2]-50)\n cv2.setTrackbarPos('Upper_Hue','Control',pixel[0]+25)\n cv2.setTrackbarPos('Upper_Saturation','Control',pixel[1]+50)\n cv2.setTrackbarPos('Upper_Value','Control',pixel[2]+50)\n \n flag =1\n\n\n if event == cv2.EVENT_LBUTTONDOWN:\n counter = counter+1\n\n if counter%2 != 0:\n pos1=(y,x)\n \n if counter%2 == 0:\n pos2=(y,x)\n template1(pos1,pos2) \n \n \n\ndef template1(pos1,pos2): \n cv2.rectangle(frame,pos1,pos2,(0,255,0),2) \n \n x1,y1=pos1\n x2,y2=pos2\n global cropped\n cropped = frame[x1:x2, y1:y2]\n \n\ncap = cv2.VideoCapture(0)\n\n#Trackbar Initialisation\ncv2.namedWindow('Control')\ncv2.createTrackbar('Lower_Hue','Control',0,180,lol)\ncv2.createTrackbar('Lower_Saturation','Control',0,255,lol)\ncv2.createTrackbar('Lower_Value','Control',0,255,lol)\ncv2.createTrackbar('Upper_Hue','Control',0,180,lol)\ncv2.createTrackbar('Upper_Saturation','Control',0,255,lol)\ncv2.createTrackbar('Upper_Value','Control',0,255,lol)\n\n#Mouse function\ncounter = 0\ncv2.namedWindow('frame')\ncv2.setMouseCallback('frame',mouse)\n\ncv2.namedWindow('cropped')\ncv2.setMouseCallback('cropped',mouse)\n\nwhile True:\n\n\n _,frame = cap.read()\n \n \n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n frame_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n \n \n if pos1 and pos2 : \n cv2.imshow('cropped',cropped) \n cropped_gray=cv2.cvtColor(cropped,cv2.COLOR_BGR2GRAY)\n \n #setting HSV values using slider\n LH = cv2.getTrackbarPos('Lower_Hue','Control') \n UH = cv2.getTrackbarPos('Upper_Hue','Control') \n LS = cv2.getTrackbarPos('Lower_Saturation','Control')\n US = cv2.getTrackbarPos('Upper_Saturation','Control')\n LV = cv2.getTrackbarPos('Lower_Value','Control')\n UV = cv2.getTrackbarPos('Upper_Value','Control')\n \n #lower and upper limit of \n lower_colour=np.array([LH,LS,LV])\n upper_colour=np.array([UH,US,UV]) \n \n mask10 = cv2.inRange(cropped,lower_colour,upper_colour) \n res1 = cv2.bitwise_and(cropped,cropped,mask = mask10)\n res1_gray= cv2.cvtColor(res1,cv2.COLOR_BGR2GRAY)\n cv2.imshow('mask',res1)\n\n if flag==1:\n \n template = res1_gray\n cv2.imwrite('mask.jpg',res1)\n width = template.shape[0]\n height = template.shape[1]\n res = cv2.matchTemplate(frame_gray,template,cv2.TM_CCOEFF_NORMED)\n \n thershold = 0.8\n loc = np.where(res>=thershold)\n \n for pt in zip(*loc[::-1]):\n # print(pt)\n cv2.rectangle(frame,(pt),(pt[0]+height,pt[1]+width),(0,0,255),1)\n \n cv2.imshow('frame',frame) \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n\ncap.release()\ncv2.destroyAllWindows","sub_path":"IP/Learning/Template_Matching/Template_Test_v1.1.py","file_name":"Template_Test_v1.1.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"277077172","text":"\"\"\" Tool for finding the PageRank of heroes \"\"\"\nimport numpy as np\nfrom scipy.sparse import csc_matrix\nfrom overcrawl import get_counters\n\nCOUNTERS = get_counters()\nALL_HEROES = list(COUNTERS.keys())\n\nMATRIX = [[COUNTERS[c].get(h, 0) for c in ALL_HEROES] for h in ALL_HEROES]\n\n\ndef page_rank(G, s=0.85, maxerr=0.0000000000001):\n \"\"\"\n Computes the pagerank for each of the n states.\n Used in webpage ranking and text summarization using unweighted\n or weighted transitions respectively.\n Args\n ----------\n G: matrix representing state transitions\n Gij can be a boolean or non negative real number representing the\n transition weight from state i to j.\n Kwargs\n ----------\n s: probability of following a transition. 1-s probability of teleporting\n to another state. Defaults to 0.85\n maxerr: if the sum of pageranks between iterations is bellow this we will\n have converged. Defaults to 0.001\n \"\"\"\n # pylint: disable=invalid-name, too-many-locals\n n = G.shape[0]\n\n # transform G into markov matrix M\n M = csc_matrix(G, dtype=np.float)\n rsums = np.array(M.sum(1))[:, 0]\n ri, _ = M.nonzero()\n M.data /= rsums[ri]\n\n # bool array of sink states\n sink = rsums == 0\n\n # Compute pagerank r until we converge\n ro, r = np.zeros(n), np.ones(n)\n while True:\n ro = r.copy()\n # calculate each pagerank at a time\n for i in range(0, n):\n # inlinks of state i\n Ii = np.array(M[:, i].todense())[:, 0]\n # account for sink states\n Si = sink / float(n)\n # account for teleportation to state i\n Ti = np.ones(n) / float(n)\n\n r[i] = ro.dot(Ii*s + Si*s + Ti*(1-s))\n r_norm = r / sum(r)\n ro_norm = ro / sum(ro)\n if np.sum(np.abs(r_norm - ro_norm)) <= maxerr:\n break\n\n # return normalized pagerank\n return r/sum(r)\n\n\ndef get_rankings():\n \"\"\" Get ranking for each hero in order \"\"\"\n graph = np.array(MATRIX)\n res = page_rank(graph)\n res = list(res)\n return dict(zip(ALL_HEROES, res))\n\n\ndef main():\n \"\"\" Main function \"\"\"\n ranks = get_rankings()\n heroes = sorted(ALL_HEROES, key=lambda n: ranks[n])[::-1]\n\n for hero in heroes:\n print(\"{:16s} {}\".format(hero, int(1000*ranks[hero])))\n\nif __name__ == '__main__':\n main()\n","sub_path":"oversite/oversiteapp/overrank.py","file_name":"overrank.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"286201035","text":"\n\nfrom xai.brain.wordbase.nouns._jitney import _JITNEY\n\n#calss header\nclass _JITNEYS(_JITNEY, ):\n\tdef __init__(self,): \n\t\t_JITNEY.__init__(self)\n\t\tself.name = \"JITNEYS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"jitney\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_jitneys.py","file_name":"_jitneys.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109361568","text":"\"\"\"myprj1 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nimport students.views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n\n #Students\n url(r'^$', students.views.students_list, name=\"home\"),\n url(r'^students/add$', students.views.students_add, name=\"students_add\"),\n url(r'^students/(?P\\d+)/edit/$', students.views.students_edit, name=\"students_edit\"),\n url(r'^students/(?P\\d+)/del/$', students.views.students_del, name=\"students_del\"),\n\n #Groups\n url(r'^groups/$', students.views.groups_list, name=\"groups\"),\n url(r'^groups/add/$', students.views.groups_add, name=\"groups_add\"),\n url(r'^groups/(?P\\d+)/del/$', students.views.groups_del, name=\"groups_del\"),\n url(r'^groups/(?P\\d+)/edit/$', students.views.groups_edit, name=\"groups_edit\"),\n\n #Journal\n url(r'^journal/$', students.views.journal, name=\"journal\"),\n\n]\n","sub_path":"myprj1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303457529","text":"class Queue():\n def __init__(self):\n self.queue = []\n def enqueue(self, value):\n self.queue.append(value)\n def dequeue(self):\n if self.size() > 0:\n return self.queue.pop(0)\n else:\n return None\n def size(self):\n return len(self.queue)\nclass Graph:\n \"\"\"Represent a graph as a dictionary of vertices mapping labels to edges.\"\"\"\n def __init__(self):\n self.vertices = {}\n def add_vertex(self, vertex_id):\n \"\"\"\n Add a vertex to the graph.\n \"\"\"\n if vertex_id in self.vertices:\n print(\"WARNING: That vertex already exists\")\n else:\n self.vertices[vertex_id] = set()\n def add_edge(self, v1, v2):\n \"\"\"\n Add a directed edge to the graph.\n \"\"\"\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].add(v2)\n else:\n raise IndexError(\"That vertex does not exist!\")\n \n def get_neighbors(self, vertex_id):\n \"\"\"\n Get all neighbors (edges) of a vertex.\n \"\"\"\n return self.vertices[vertex_id]\n\ndef bfs(graph, starting_vertex):\n earliest_an = None\n # create queue\n q = Queue()\n # adding the starting_vertex the queue\n initital_path = [starting_vertex]\n # calling it q.enqueue\n q.enqueue(initital_path)\n # To store visited nodes\n visited = set()\n # When the queue is not empty\n while q.size() > 0:\n # dequeue, the first vertex\n path = q.dequeue()\n # lenght path\n path_length = len(path)\n # grabbing the last number from the path\n last_vert = path[-1]\n \n if last_vert not in visited:\n visited.add(last_vert)\n # loop over each neighbor in the graphs vertices at index of vert\n for v in graph.vertices[last_vert]:\n # make a copy of the path\n path_copy = path[:]\n # append vertex to the coppied path\n path_copy.append(v)\n # then enqueue the copied path\n q.enqueue(path_copy)\n\n if len(path_copy) > path_length:\n earliest_an = path_copy\n if earliest_an:\n return earliest_an[-1]\n return -1\n\ndef earliest_ancestor(ancestors, starting_node):\n # instantiate a new graph object\n graph = Graph()\n # loop over all pairs in ancestors\n for pair in ancestors:\n # add pair[0] and pair[1] to the graph\n graph.add_vertex(pair[0])\n graph.add_vertex(pair[1])\n # Create child to parent relationship \n graph.add_edge(pair[1], pair[0])\n\n return bfs(graph, starting_node) ","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479588966","text":"# -*- coding: utf-8 -*-\n\"\"\"\nImport and extensions of the core Mesh class.\n\"\"\"\n\nfrom ._pygimli_ import (cat, HexahedronShape, Line, Mesh, MeshEntity, Node,\n PolygonFace, TetrahedronShape, TriangleFace)\nfrom .logger import deprecated, error, info, warn\nfrom ..meshtools import mergePLC, exportPLC\n\n\ndef __Mesh_str(self):\n st = \"Mesh: Nodes: \" + str(self.nodeCount()) + \" Cells: \" + str(\n self.cellCount()) + \" Boundaries: \" + str(self.boundaryCount())\n if (self.secondaryNodeCount() > 0):\n st += \" secNodes: \" + str(self.secondaryNodeCount())\n\n return st\nMesh.__str__ = __Mesh_str\n\n\ndef __addPLCs__(self, other):\n if self.isGeometry() and other.isGeometry():\n return mergePLC([self, other])\n else:\n error(\"Addition is only supported for PLCs, i.e. meshs without cells.\")\nMesh.__add__ = __addPLCs__\n\n\ndef __MeshEntity_str(self):\n \"\"\"Give mesh entity infos.\"\"\"\n s = self.__repr__()\n s += '\\tID: ' + str(self.id()) + \\\n ', Marker: ' + str(self.marker()) + \\\n ', Size: ' + str(self.size()) + '\\n'\n\n if isinstance(self, PolygonFace) and len(self.nodes()) > 5:\n s += '\\t' + str(self.nodeCount()) + \" Nodes.\\n\"\n else:\n for n in self.nodes():\n s += '\\t' + str(n.id()) + \" \" + str(n.pos()) + \"\\n\"\n return s\nMeshEntity.__str__ = __MeshEntity_str\n\n\ndef __Node_str(self):\n \"\"\"Give node infos.\"\"\"\n s = self.__repr__()\n s += '\\tID: ' + str(self.id()) + \\\n ', Marker: ' + str(self.marker())\n s += '\\t' + str(self.pos()) + '\\n'\n return s\nNode.__str__ = __Node_str\n\n# For Jupyer Notebook use.. check me\n# Node.__repr__ = Node_str\n# Mesh.__repr__ = Mesh_str\n# MeshEntity.__repr__ = MeshEntity_str\n\n\ndef __Mesh_setVal(self, key, val):\n \"\"\"Index access to the mesh data\"\"\"\n self.addData(key, val)\nMesh.__setitem__ = __Mesh_setVal\n\n\ndef __Mesh_getVal(self, key):\n \"\"\"Index access to the mesh data\"\"\"\n if self.haveData(key):\n return self.data(key)\n else:\n error('The mesh does not have the requested data:', key)\nMesh.__getitem__ = __Mesh_getVal\n\n\ndef __MeshBoundingBox__(self):\n bb = self.boundingBox()\n mi = [bb.min()[i] for i in range(self.dim())]\n ma = [bb.max()[i] for i in range(self.dim())]\n return [mi, ma]\nMesh.bb = __MeshBoundingBox__\n\n\ndef __MeshGetCellMarker__(self):\n deprecated(msg='Mesh::cellMarker()', hint='Mesh::cellMarkers()')\n return self.cellMarkers()\n\n\ndef __MeshSetCellMarker__(self, m):\n deprecated(msg='Mesh::setCellMarker()', hint='Mesh::setCellMarkers()')\n return self.setCellMarkers(m)\n\n\ndef __MeshHoleMarkers__(self):\n return self.holeMarker()\n\nMesh.cellMarker = __MeshGetCellMarker__\nMesh.setCellMarker = __MeshSetCellMarker__\nMesh.holeMarkers = __MeshHoleMarkers__\n\n\ndef __createSecondaryNodes__(self, n=3, verbose=False):\n \"\"\"Create `n` equally distributed secondary nodes on the mesh boundaries.\n This is useful to increase the accuracy of traveltime calculations.\n\n Parameters\n ----------\n n : int\n Number of secondary nodes (the default is 3).\n verbose : bool\n Optionally output number of added nodes.\n\n Returns\n -------\n pg.Mesh\n Copy of the given mesh with secondary nodes.\n \"\"\"\n self.createNeighborInfos()\n\n if self.boundary(0).nodeCount() != self.boundary(0).allNodeCount():\n warn(\"Mesh already contains secondary nodes. Not adding any more.\")\n else:\n if self.dim() == 2:\n for b in self.boundaries():\n A = b.node(0).pos()\n B = b.node(1).pos()\n line = Line(A, B)\n for i in range(n):\n sn = self.createSecondaryNode(line.at((i + 1) / (n + 1)))\n b.addSecondaryNode(sn)\n elif self.dim() == 3:\n for b in self.boundaries():\n bs = b.shape()\n for sx in range(n):\n nMax = n\n if isinstance(b, TriangleFace):\n nMax = n - sx\n for sy in range(nMax):\n if isinstance(b, TriangleFace):\n pos = bs.xyz([(sx + 1) / (n + 2),\n (sy + 1) / (n + 2)])\n else:\n pos = bs.xyz([(sx + 1) / (n + 1),\n (sy + 1) / (n + 1)])\n\n sn = self.createSecondaryNode(pos)\n b.addSecondaryNode(sn)\n\n for c in self.cells():\n # add secondary nodes to the edges of 3 Entities\n\n edges = []\n if isinstance(c.shape(), HexahedronShape):\n # 7------6\n # /| /|\n # 4------5 |\n # | 3----|-2\n # |/ |/\n # 0------1\n edges.append([c.shape().node(0), c.shape().node(1)])\n edges.append([c.shape().node(1), c.shape().node(2)])\n edges.append([c.shape().node(2), c.shape().node(3)])\n edges.append([c.shape().node(3), c.shape().node(0)])\n\n edges.append([c.shape().node(0), c.shape().node(4)])\n edges.append([c.shape().node(1), c.shape().node(5)])\n edges.append([c.shape().node(2), c.shape().node(6)])\n edges.append([c.shape().node(3), c.shape().node(7)])\n\n edges.append([c.shape().node(4), c.shape().node(5)])\n edges.append([c.shape().node(5), c.shape().node(6)])\n edges.append([c.shape().node(6), c.shape().node(7)])\n edges.append([c.shape().node(7), c.shape().node(4)])\n elif isinstance(c.shape(), TetrahedronShape):\n edges.append([c.shape().node(0), c.shape().node(1)])\n edges.append([c.shape().node(0), c.shape().node(2)])\n edges.append([c.shape().node(0), c.shape().node(3)])\n\n edges.append([c.shape().node(1), c.shape().node(2)])\n edges.append([c.shape().node(2), c.shape().node(3)])\n edges.append([c.shape().node(3), c.shape().node(1)])\n else:\n print(c)\n warn('cell type unknown')\n\n for e in edges:\n line = Line(e[0].pos(), e[1].pos())\n for i in range(n):\n sn = self.createSecondaryNode(line.at((i+1)/(n+1)),\n tol=1e-6)\n c.addSecondaryNode(sn)\n else:\n warn(\"Unknown dimension. Don't know what to do.\")\n\n if verbose:\n info(\"Added %d secondary nodes.\" % self.secondaryNodeCount())\n\n\ndef __createMeshWithSecondaryNodes__(self, n=3, verbose=False):\n m = Mesh(self)\n m.createSecondaryNodes(n, verbose)\n return m\nMesh.createSecondaryNodes = __createSecondaryNodes__\nMesh.createMeshWithSecondaryNodes = __createMeshWithSecondaryNodes__\n\n\n__Mesh_deform__ = Mesh.deform\ndef __deform__(self, eps, mag=1.0):\n v = None\n dof = self.nodeCount()\n if hasattr(eps, 'ndim') and eps.ndim == 1:\n v = eps\n elif len(eps) == self.dim():\n if len(eps[0]) == dof:\n if self.dim() == 2:\n v = cat(eps[0], eps[1])\n elif self.dim() == 3:\n v = cat(cat(eps[0], eps[1]), eps[2])\n else:\n v = eps[0]\n else:\n print(self)\n print(len(eps), len(eps[0]))\n error('Size of displacement does not match mesh nodes size.')\n\n return __Mesh_deform__(self, v, mag)\n\nMesh.deform = __deform__\n\n\nMesh.exportPLC = exportPLC\n\n# just to keep backward compatibility 20191120\nMesh.createNeighbourInfos = Mesh.createNeighborInfos\nMesh.xmin = Mesh.xMin\nMesh.ymin = Mesh.yMin\nMesh.zmin = Mesh.zMin\nMesh.xmax = Mesh.xMax\nMesh.ymax = Mesh.yMax\nMesh.zmax = Mesh.zMax\n","sub_path":"pygimli/core/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":8027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"524111518","text":"import os\n\nimport consoleiotools as cit\nimport consolecmdtools as cct\n\nimport classes.macphraser\nimport classes.msphraser\nimport classes.jsonphraser\nimport classes.txtphraser\nimport classes.htmlphraser\nimport classes.phraser\n\n__version__ = \"2.1.0\"\n\nPROJECT_DIR = cct.get_path(__file__, parent=True)\nGENERATED_DIR = os.path.join(PROJECT_DIR, \"GeneratedUDP\")\nPHRASES_DIR = os.path.join(PROJECT_DIR, \"Phrases\")\nAVAIL_PHRASER = {phraser.name: phraser for phraser in (\n classes.jsonphraser.JsonPhraser,\n classes.macphraser.MacPhraser,\n classes.txtphraser.TxtPhraser,\n classes.msphraser.MsPhraser,\n classes.htmlphraser.HtmlPhraser,\n)}\n\n\ndef get_phrases_files(dir: str = PHRASES_DIR) -> list:\n \"\"\"Get all phrases files.\n\n Args:\n dir (str, optional): Phrases files directory. Defaults to PHRASES_DIR.\n\n Returns:\n List[str]: Phrases files list.\n \"\"\"\n all_files = os.listdir(dir)\n phrases_files = [filename for filename in all_files if filename.startswith('UDP-') and filename.endswith('.json')]\n return [os.path.join(dir, filename) for filename in phrases_files]\n\n\ndef load_phrases_from_json(filepath: str) -> list:\n \"\"\"Load phrases from given json file.\n\n Args:\n filepath (str): Phrases file path.\n\n Returns:\n list: Phrases list.\n \"\"\"\n cit.info(f\"Parsing `{cct.get_path(filepath, basename=True)}`\")\n phraser = classes.jsonphraser.JsonPhraser()\n phraser.from_file(filepath)\n return phraser.phrases\n\n\ndef load_all_phrases(files: list) -> list:\n \"\"\"Load all phrases from given files.\n\n Args:\n files (list): Phrases files list.\n\n Returns:\n list: Phrases list.\n \"\"\"\n phrases = [phrase for filepath in files for phrase in load_phrases_from_json(filepath)]\n cit.info(f'Loaded {len(phrases)} phrases')\n return phrases\n\n\ndef make_phraser(phraser_name: str, phrases: list) -> classes.phraser.Phraser:\n \"\"\"Make phraser instance.\n\n Args:\n phraser_name (str): Phraser name.\n phrases (list): Phrases list.\n\n Returns:\n classes.phraser.Phraser: Phraser instance.\n \"\"\"\n if phraser_name not in AVAIL_PHRASER:\n raise Exception(f\"Phraser `{phraser_name}` is not available!\")\n Phraser = AVAIL_PHRASER[phraser_name]\n return Phraser(phrases)\n\n\ndef check_file_existance(filepath: str):\n if os.path.exists(filepath):\n cit.ask(f\"'{filepath}' is already exists. Overwrite it?\")\n if cit.get_choice(['Yes', 'No']) == 'Yes':\n os.remove(filepath)\n return True\n else:\n return False\n else:\n return None\n\n\ndef generate_files(phraser: classes.phraser.Phraser, filename: str):\n dir = os.path.join(GENERATED_DIR, phraser.name)\n if not os.path.exists(dir):\n os.makedirs(dir)\n filepath = os.path.join(dir, filename)\n if check_file_existance(filepath) is not False:\n phraser.to_file(filepath)\n cit.info(f\"'{filepath}' is generated, {len(phraser.phrases)} phrases.\")\n else:\n cit.warn(f\"'{filepath}' is not overwrited. No file generated.\")\n\n\n@cit.as_session\ndef assembly_line(phraser_name: str, separate: bool = False):\n phrases_files = get_phrases_files()\n if separate:\n for phrases_file in phrases_files:\n phrases = load_phrases_from_json(phrases_file)\n phraser = make_phraser(phraser_name, phrases)\n filename = cct.get_path(phrases_file, basename=True).replace(cct.get_path(phrases_file, ext=True), phraser.ext)\n generate_files(phraser, filename)\n else:\n phrases = load_all_phrases(phrases_files)\n phraser = make_phraser(phraser_name, phrases)\n filename = \"UserDefinedPhrase.\" + phraser.ext\n generate_files(phraser, filename)\n\n\ndef create_jobs():\n cit.ask(\"Which one you wanna convert?\")\n phraser_names = cit.get_choices(list(AVAIL_PHRASER.keys()), allable=True, exitable=True)\n if not phraser_names:\n cit.bye()\n cit.ask(\"Generate into separate files?\")\n separate = cit.get_choice(['Yes', 'No']) == 'Yes'\n for name in phraser_names:\n assembly_line(name, separate=separate)\n cct.show_in_file_manager(GENERATED_DIR, ask=True)\n\n\nif __name__ == \"__main__\":\n cit.info(f\"Output folder: {GENERATED_DIR}\")\n cit.info(f\"Phrases folder: {PHRASES_DIR}\")\n for filepath in get_phrases_files():\n cit.markdown(f\"|-- `{filepath}`\")\n create_jobs()\n","sub_path":"user_defined_phraser.py","file_name":"user_defined_phraser.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"126712860","text":"import numpy as np\nimport json\n#import sys\nimport time\n\n\nstart_time = time.time() \ndef prodmv(c, B, tam):\n a = list()\n for i in range(tam):\n sum = 0\n for j in range(tam):\n sum += B[i][j] * c[j]\n a.append(sum)\n #print(a)\n\n\n#mat = np.array(json.loads(sys.argv[1]))\nmat = np.loadtxt('mat.txt',dtype=np.int)\n#vec=np.array(json.loads(sys.argv[2]))\nvec=np.loadtxt('vec.txt',dtype=np.int)\n\nprodmv(vec, mat, len(vec))\n\nend_time = time.time()\n\n#print('Tiempo: {:,.2f}'.format(float(end_time - start_time)))\nprint(\"Tiempo: \",float(end_time - start_time))\n","sub_path":"ExaSecuencial.py","file_name":"ExaSecuencial.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"337411058","text":"\n\n# 检测进程个数\nCONFIG_PROCESS_COUNT = 1\n# 检测时间间隔, 单位: s 秒\nCONFIG_DETECT_INTERVAL = 120\n# 检测当前任务是否完成的时间间隔, 单位: s 秒\nCONFIG_DETECT_QUENE_INTERVAL = 20\n\n# 消息类型, test 测试消息类型, ipc 网络摄像机, video 本地测试视频\nMESSAGE_TYPE = 'ipc'\n# 本地测试录像路径\nLOCAL_VIDEO_AND_CONFIG_PATH = './temp' # 当消息类型为‘video’时该参数有效\n\n# 红绿灯检测算法参数\ntraffic_light_detect_count = 17 # 检测帧数\ntraffic_light_broken_count = 15 # 连续 traffic_light_broken_count 检测如果都发现问题, 即认定红绿灯出现问题\n\n# 平台接口配置信息\nplatform_interface = {\n \"get_device\": \"http://192.168.100.137:8089/devices\",\n \"update_device_state\": \"http://192.168.100.137:8088/devicestates\"\n}\n\n\ndef config_print():\n \"\"\"\n 打印平台配置信息\n :return:\n \"\"\"\n print('\\n------------------ running parameters ------------------')\n print('- concurrent worker number :', CONFIG_PROCESS_COUNT,\n '|', 'message type :', MESSAGE_TYPE)\n print('- detect task interval :', CONFIG_DETECT_INTERVAL,\n '|', 'detect quene interval :', CONFIG_DETECT_QUENE_INTERVAL)\n print('--------------------------------------------------------')\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182224139","text":"\nimport sys\nfrom pathlib import PosixPath, PureWindowsPath\n\nif sys.version_info < (3, 2):\n import subprocess32 as sp\nelse:\n import subprocess as sp\n\n\ndef posix2windows(posixpath):\n # type: (PosixPath) -> PureWindowsPath\n wslpath = sp.check_output(\n [\"wslpath\", \"-w\", str(posixpath)],\n universal_newlines=True,\n )\n return PureWindowsPath(wslpath)\n\n\ndef call_explorer(posixpath):\n # type: (PosixPath) -> int\n return sp.call(\n [\"explorer.exe\", str(posix2windows(posixpath))],\n )\n\n\ndef main():\n # type: () -> None\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Windows Explorer\"\n )\n\n parser.add_argument(\n \"posixpath\",\n nargs='?',\n default='.',\n )\n\n args = parser.parse_args()\n\n call_explorer(\n posixpath=PosixPath(args.posixpath)\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"explorer_from_wsl.py","file_name":"explorer_from_wsl.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"127064525","text":"#!/usr/bin/env pybricks-micropython\nfrom Initialize import *\n\ndef gyroForward(desiredDistance, speed):\n print(\"desiredDistance:\" + str(desiredDistance))\n print(\"Speed:\" + str(speed))\n \n initialGyro = 0\n # This is where the variables are defined.\n\n # Here is the main code.\n robot.reset()\n\n #Setting the initial value for distance\n distanceTraveled = robot.distance()\n\n p4GSensor.reset_angle(0) #resets the gyro angle to zero\n\n while distanceTraveled < desiredDistance: #while the current distance of the robot is less than the distance that we want to go:\n distanceTraveled = robot.distance() #we define the distance traveled variable again, as this is a while loop, and the distance of the robot from its starting point is always changing\n # This is the Gyro Part of our code.\n ang = p4GSensor.angle()\n\n # If the angle that the gyro is sensing is NOT equal to the initial gyro (0)\n if ang != initialGyro:\n # Turn that angle to GET to the value of the initial gyro.\n robot.turn(ang)\n\n # Print values for debug\n print(\"speed:\" + str(speed))\n print(\"Angle:\" + str(ang))\n print(\"Distance:\" + str(distanceTraveled))\n\n # Drive the robot\n robot.drive(speed, 0)\n robot.stop(Stop.COAST)","sub_path":"RobotCode/GyroF.py","file_name":"GyroF.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"125440929","text":"import os,sys\nimport logging\nimport json\n\ndef prepare_cards(name):\n cards = ''\n logging.basicConfig(level=logging.DEBUG)\n if not os.path.isfile('cards_map_2016.json'):\n logging.warning('cards_map not exist, please check')\n with open('cards_map_2016.json', 'r') as f:\n jsons = json.load(f)\n f.close()\n \n for region in jsons:\n logging.info('Preparing cards for region {region}'.format(region=region))\n region = jsons[region]\n for bin in region:\n bin = region[bin]\n logging.info('Preparing cards for {tag}'.format(tag=bin['tag']))\n cards = cards + '{tag}={file} '.format(tag=bin['tag'], file=bin['file'])\n \n os.system('combineCards.py {cards} >& {name}.txt'.format(cards=cards, name=name))\n os.system('combineCards.py -S {cards} >& {name}_shape.txt'.format(cards=cards, name=name))\n pass\n\nif __name__ == '__main__':\n name = 'test16'\n prepare_cards(name)\n \n codes = '''\ncombine -M Significance --expectSignal=1 -t -1 {name}.txt > result_{name}.txt\ncombine -M Significance --expectSignal=1 -t -1 {name}.txt --freezeParameters all > result_freezeAll{name}.txt\n\ncombine -M FitDiagnostics -t -1 --expectSignal=1 -d {name}_shape.txt -m 125 --saveShapes --saveWithUncertainties\ntext2workspace.py {name}_shape.txt -m 125\ncombineTool.py -M Impacts -d {name}_shape.root -t -1 --expectSignal=1 -m 125 --doInitialFit --robustFit 1\ncombineTool.py -M Impacts -d {name}_shape.root -t -1 --expectSignal=1 -m 125 --robustFit 1 --doFits --parallel 4\ncombineTool.py -M Impacts -d {name}_shape.root -t -1 --expectSignal=1 -m 125 -o impacts_{name}.json\nplotImpacts.py -i impacts_{name}.json -o impacts_{name}\n '''\n\n os.system(codes.format(name=name))","sub_path":"HiggsCombination/2016/combine_16.py","file_name":"combine_16.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"103021083","text":"# The Bomberman Game https://www.hackerrank.com/challenges/bomber-man/problem\n\ndef bomberMan(n, grid):\n if n == 1:\n return grid\n\n if n % 2 == 0:\n return ['O' * c for i in range(r)]\n\n n //=2\n\n for q in range((n+1) % 2 + 1):\n newGrid = [['O'] * c for i in range(r)]\n\n def set(x, y):\n if 0 <= x < r and 0 <= y < c:\n newGrid[x][y] = '.'\n\n xi = [0, 0, 0, 1, -1]\n yi = [0, -1, 1, 0, 0] \n\n for x in range(r):\n for y in range(c):\n if grid[x][y] == 'O':\n\n for i, j in zip(xi, yi):\n set(x + i, y + j)\n\n grid = newGrid\n\n\n return [\"\". join(x) for x in grid] \n\n\n\n","sub_path":"hackerrank/other/bomber-man.py","file_name":"bomber-man.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"200042394","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/1/25 下午9:00\n# @Author : Mat\n# @Email : mat_wu@163.com\n# @File : web.py\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport json\nfrom tornado.options import define, options\n\nfrom nlp.NBclassifier import NBclassifier\nfrom words.Words import Words\n\ndefine(\"port\", default=8000, help=\"run on the given port\", type=int)\n\n\nclass IndexHandler(tornado.web.RequestHandler):\n def data_received(self, chunk):\n pass\n\n def get(self):\n text = self.get_argument('text', '')\n cut_word = self.get_argument('cut_word', 0)\n word = Words()\n word_str = word.cut_words(text)\n\n if int(cut_word) == 1:\n result = {'msg': 'ok', 'data': word_str}\n result = json.dumps(result, ensure_ascii=False)\n self.write(result)\n return\n\n clf_path = \"./datasets/trainModel/clf.m\"\n vec_path = \"./datasets/trainModel/vec.m\"\n # 创建NB分类器\n nbclassifier = NBclassifier(clf_path, vec_path)\n data_list = [word_str]\n predictList = nbclassifier.predict(data_list)\n predictList = list(predictList)\n predict_class = \"\".join(predictList)\n result = {'msg': 'ok', 'data': predict_class}\n result = json.dumps(result, ensure_ascii=False)\n self.write(result)\n\n\nif __name__ == \"__main__\":\n tornado.options.parse_command_line()\n app = tornado.web.Application(handlers=[(r\"/nlp\", IndexHandler)])\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"ai/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"543143008","text":"__author__ = 'KO CE06'\n\n# เขียนโปรแกรมรับค่าจากผู้ใช้มาเก็บไว้ใน list จนกว่าผู้ใช้จะป้อน 0\n# เมื่อผู้ใช้ป้อนศูนย์ให้ทำการหาเลขคู่และเลขคี่จากค่าที่ป้อนเข้ามา (ไม่รวม 0)\n\n\nlist = []\neven = []\nodd = []\n\nn = int(input(\"Number: \"))\nwhile(n>0):\n list.append(n)\n n = int(input(\"Number: \"))\n\nfor i in list:\n if i%2 == 0:\n even.append(i)\n else:\n odd.append(i)\n\nprint(\"Even: \", even)\nprint(\"Odd: \", odd)\n","sub_path":"LAB404.py","file_name":"LAB404.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"383226979","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nn=0#计总数\r\nfor year in range(2019,2021):\r\n for month in range(1,13):\r\n day=29\r\n if year==2019 and month!=12:\r\n continue\r\n if year==2020 and month>6:\r\n break\r\n if year==2020 and month<=6:\r\n if month==1 or month==3 or month==5 or month==7 or month==8 or month==10 or month==12:\r\n day=31\r\n if month==4 or month==6 or month==9 or month==11:\r\n day=30\r\n if month==2:\r\n day=29 #只考虑2020年\r\n for i in range(1,day+1):\r\n tempDay=str(i)\r\n tempMonth=str(month)\r\n tempYear=str(year)\r\n if i<10:\r\n tempDay=str(0)+tempDay\r\n if month<10:\r\n tempMonth=str(0)+tempMonth\r\n url='http://news.sina.com.cn/head/news'+tempYear+tempMonth+tempDay+'am.shtml'\r\n\r\n\r\n html = requests.get(url)\r\n soup=BeautifulSoup(html.content,\"html.parser\")\r\n\r\n\r\n data=soup.find_all('h1')\r\n for k in range(len(data)):\r\n try:\r\n if data[k].a.string.find('疫情')!=-1 or data[k].a.string.find('新冠')!=-1 or data[k].a.string.find('确诊')!=-1 or data[k].a.string.find('隔离')!=-1 or data[k].a.string.find('武汉')!=-1:\r\n n=n+1\r\n with open(r'E:\\Desktop\\spider\\urls.txt','a') as ff:\r\n ff.write(data[k].a['href']+'\\n')\r\n print(data[k].a['href'])\r\n except :\r\n continue\r\n data = soup.find_all('li')\r\n for k in range(len(data)):\r\n try:\r\n if data[k].a.string.find('疫') != -1 or data[k].a.string.find('新冠') != -1 or data[k].a.string.find(\r\n '确诊') != -1 or data[k].a.string.find('隔离') != -1 or data[k].a.string.find('武汉') != -1:\r\n n = n + 1\r\n # with open(r'E:\\Desktop\\spider\\urls.txt', 'a') as ff:\r\n # ff.write(data[k].a['href'] + '\\n')\r\n print(data[k].a['href'])\r\n except:\r\n continue\r\n\r\nprint(n)\r\n\r\n","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"641358248","text":"\"\"\"\nInterfaces with Crow sensors.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/sensor.Crow/\n\"\"\"\nimport logging\n\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\n\nfrom ..crow import HUB as hub, SIGNAL_CROW_UPDATE\nfrom homeassistant.const import TEMP_CELSIUS\nfrom homeassistant.helpers.entity import Entity\n\n_LOGGER = logging.getLogger(__name__)\n\nINTERFACE_TEMPERATURE = 32533\nINTERFACE_HUMIDITY = 32532\nINTERFACE_AIR_PRESSURE = 32535\n\niface_labels = {\n INTERFACE_TEMPERATURE: 'Temperature',\n INTERFACE_HUMIDITY: 'Humidity',\n INTERFACE_AIR_PRESSURE: 'Air Pressure'\n}\n\n\ndef get_iface_value(iface, data):\n if iface == INTERFACE_AIR_PRESSURE:\n return data['air_pressure']\n if iface == INTERFACE_TEMPERATURE:\n return round(data['temperature'] / 10) / 10\n if iface == INTERFACE_HUMIDITY:\n return round(data['humidity'] / 10) / 10\n return None\n\n\ndef get_iface_unit(iface):\n if iface == INTERFACE_AIR_PRESSURE:\n return 'hPa'\n if iface == INTERFACE_TEMPERATURE:\n return TEMP_CELSIUS\n if iface == INTERFACE_HUMIDITY:\n return '%'\n return 'Unknown'\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the Crow platform.\"\"\"\n sensors = []\n measurements = hub.get_measurements()\n _LOGGER.debug(\"Setup crow sensors: %s\", measurements)\n\n sensor_defs = hub.get(measurements, '$..values.*')\n for sensor in sensor_defs:\n sensor['name'] = measurements.get(str(hub.get_first(sensor, '$._id.device_id'))).get('name')\n\n sensors.extend([CrowSensor(hass, sensor) for sensor in sensor_defs])\n\n add_devices(sensors)\n\n\nclass CrowSensor(Entity):\n \"\"\"Representation of a Crow thermometer.\"\"\"\n\n def __init__(self, hass, sensor):\n \"\"\"Initialize the sensor.\"\"\"\n _LOGGER.debug(\"Init crow sensor: %s\", sensor)\n self.hass = hass\n self.value = None\n self._panel_mac = hub.get_first(sensor, '$._id.control_panel')\n self._device_id = hub.get_first(sensor, '$._id.device_id')\n self._interface_type = hub.get_first(sensor, '$._id.dect_interface')\n self._device_label = \"{} - {}\".format(sensor['name'], iface_labels.get(self._interface_type))\n self.value = get_iface_value(self._interface_type, sensor)\n\n async def async_added_to_hass(self):\n \"\"\"Subscribe to sensors events.\"\"\"\n async_dispatcher_connect(self.hass, SIGNAL_CROW_UPDATE, self.async_update_callback)\n\n @callback\n def async_update_callback(self, msg):\n if msg.get('type') != 'info':\n return\n if self._panel_mac != hub.get_first(msg, '$.data._id.control_panel') \\\n or self._device_id != hub.get_first(msg, '$.data._id.device_id') \\\n or self._interface_type != hub.get_first(msg, '$.data._id.dect_interface'):\n return\n self.value = get_iface_value(self._interface_type, msg.get('data', {}))\n _LOGGER.debug(\"Set %s value to %s\", self._device_label, self.value)\n self.async_schedule_update_ha_state(True)\n\n @property\n def force_update(self):\n \"\"\"Return True if state updates should be forced.\n\n If True, a state change will be triggered anytime the state property is\n updated, not just when the value changes.\n \"\"\"\n return True\n\n @property\n def should_poll(self):\n \"\"\"No polling needed.\"\"\"\n return False\n\n @property\n def name(self):\n \"\"\"Return the name of the device.\"\"\"\n return self._device_label\n\n @property\n def state(self):\n \"\"\"Return the state of the device.\"\"\"\n return self.value\n\n @property\n def available(self):\n \"\"\"Return True if entity is available.\"\"\"\n return self.value is not None\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement of this entity.\"\"\"\n return get_iface_unit(self._interface_type)\n\n # # pylint: disable=no-self-use\n # def update(self):\n # \"\"\"Update the sensor.\"\"\"\n # data = hub.get_first(hub.get_measurements(),\n # '$.%d.values.[?(@._id.dect_interface==%d)]',\n # self._device_id, self._interface_type)\n # self.value = get_iface_value(self._interface_type, data)\n\n","sub_path":"crow/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"358578107","text":"import os\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hex2x_backend.settings')\nimport django\n\ndjango.setup()\n\nfrom hex2x_backend.snapshot.contracts_interaction import check_snapshot_contract_amounts\nfrom hex2x_backend.snapshot.models import HexUser\n\nif __name__ == '__main__':\n all_users_count = 0\n while all_users_count > 0:\n try:\n all_users = HexUser.objects.filter(tx_checked=False).order_by('id')\n all_users_count = all_users.count()\n check_snapshot_contract_amounts(all_users)\n except Exception as e:\n print('process exited with error:', e, flush=True)\n print('restarting', flush=True)\n","sub_path":"check_snapshot.py","file_name":"check_snapshot.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"564267033","text":"\"\"\"\nДано нечетное натуральное число nn. Напишите программу, которая печатает равнобедренный звездный треугольник с основанием, равным nn в соответствии с примером:\n\n*\n**\n***\n****\n***\n**\n*\nФормат входных данных\nНа вход программе подается одно нечетное натуральное число.\n\nФормат выходных данных\nПрограмма должна вывести треугольник в соответствии с условием.\n\nПримечание. Используйте вложенный цикл for.\n\nSample Input 1:\n\n3\nSample Output 1:\n\n*\n**\n*\nSample Input 2:\n\n5\nSample Output 2:\n\n*\n**\n***\n**\n*\n\"\"\"\n# -------------------------------------------------------------------------------------------------\n\n# 1)вариант\nnum = int(input())\nfor i in range(num // 2):\n for j in range(i + 1):\n print('*', end='')\n print()\nfor k in range(num // 2 + 1):\n print('*', end='')\nprint()\nfor i in range(num // 2 + 1, 1, -1):\n for j in range(i - 1):\n print('*', end='')\n print()\n# -------------------------------------------------------------------------------------------------\n\n# 2)вариант\nnum = int(input())\ncount = 0\nstep = 1\nfor _ in range(num):\n if count == num//2 + 1:\n step = -1\n count += step\n print('*' * count)\n# -------------------------------------------------------------------------------------------------\n\n# 3)вариант\nnum = int(input())\nfor i in range(1, num + 1):\n print('*' * min(i, num - i + 1))","sub_path":"Python/for and while/star_triangle.py","file_name":"star_triangle.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"438236581","text":"#!/usr/bin/env python\n# coding=utf8\n\n\ndef setting_from_object(obj):\n settings = dict()\n for key in dir(obj):\n if key.isupper():\n settings[key.lower()] = getattr(obj, key)\n return settings\n\n\nclass ObjectDict(dict):\n def __getattr__(self, key):\n if key in self:\n return self[key]\n return None\n\n def __setattr__(self, key, value):\n self[key] = value\n\n\nclass cached_property(object):\n def __init__(self, func, name=None, doc=None):\n self.__name__ = name or func.__name__\n self.__module__ = func.__module__\n self.__doc__ = doc or func.__doc__\n self.func = func\n\n def __get__(self, obj, type=None):\n if obj is None:\n return self\n value = obj.__dict__.get(self.__name__, None)\n if value is None:\n value = self.func(obj)\n obj.__dict__[self.__name__] = value\n return value\n\n\ndef find_subclasses(klass, include_self=False):\n accum = []\n for child in klass.__subclasses__(): # 遍历db.Model的子类,即定义的那些模型。\n accum.extend(find_subclasses(child, True))\n if include_self:\n accum.append(klass)\n return accum\n","sub_path":"lib/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"437871679","text":"from normal_item import Normal_item\n\n\nclass Aged_brie(Normal_item):\n def __init__(self, name, sell_in, quality):\n self.name = name\n assert isinstance(name, str)\n self.sell_in = sell_in\n assert isinstance(sell_in, int)\n self.quality = quality\n assert isinstance(quality, int)\n\n def update_quality(self):\n if self.sell_in > 0:\n self.quality += 1\n else:\n self.quality += 2\n return self.quality\n\n def update_item(self):\n Aged_brie.update_quality(self)\n Normal_item.update_sell_in(self)\n Normal_item.check_quality(self)\n","sub_path":"app/aged_brie.py","file_name":"aged_brie.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"493272877","text":"\n\n#calss header\nclass _JINGLE():\n\tdef __init__(self,): \n\t\tself.name = \"JINGLE\"\n\t\tself.definitions = [u'to make a repeated gentle ringing sound, or to make things do this: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_jingle.py","file_name":"_jingle.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"438460410","text":"import os\nCDV = None\nif os.environ.get('FLASK_COVERAGE'):\n import coverage\n CDV = coverage.coverage(branch=True, include='app/*')\n CDV.start()\n\nif os.path.exists('.env'):\n print('导入环境变量...')\n for line in open('.env'):\n var = line.strip().split('=')\n if len(var) == 2:\n os.environ[var[0]] = var[1]\n\nfrom flask_migrate import Migrate, MigrateCommand, upgrade\nfrom flask_script import Manager, Shell\nfrom app import create_app, db\nfrom app.models import User, Role, Post, Comment, Follow\n\n\ndef make_shell_context():\n return {'app': app, 'db': db, 'User': User, 'Follow': Follow,\n 'Role': Role, 'Post': Post, 'Comment': Comment}\n\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\nmanager.add_command('db', MigrateCommand)\nmanager.add_command('shell', Shell(make_context=make_shell_context))\n\n\n@manager.command\ndef dev():\n \"\"\"reload when modification happened\"\"\"\n from livereload import Server\n live_server = Server(app.wsgi_app)\n live_server.watch('**/*.*')\n live_server.serve()\n\n\n@manager.command\ndef test(coverage=False):\n \"\"\"Run the unit tests.\"\"\"\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n if CDV:\n CDV.stop()\n CDV.save()\n print('Coverage 概要')\n CDV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n CDV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' %covdir)\n CDV.erase()\n\n\n@manager.command\ndef profile(length=25, profile_dir=None):\n \"\"\"在分析器模式下运行\"\"\"\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length], profile_dir=profile_dir)\n app.run()\n\n\n@manager.command\ndef deploy():\n \"\"\"运行部署任务\"\"\"\n upgrade()\n Role.insert_roles()\n User.add_self_follows()\n\n\n@manager.command\ndef reset():\n \"\"\"重置数据库\"\"\"\n print('开始重置数据库...')\n print('清空数据库...')\n db.drop_all()\n print('创建数据库...')\n db.create_all()\n print('生成角色...')\n Role.insert_roles()\n print('生成我...')\n u = User(wow_faction='联盟', wow_race='暗夜精灵', wow_class='德鲁伊', username='Yiz', email='562124140@qq.com', password='1',\n confirmed=True, name='野蛮角斗士', location='试炼之环', about_me='非著名猫德')\n u2 = User(wow_faction='部落', wow_race='兽人', wow_class='萨满祭司', username='萨尔', email='181826029@qq.com', password='1',\n role=Role.query.filter_by(name='官员').first(), confirmed=True, name='神出鬼没的', location='奥格瑞玛', about_me='部落精神领袖,世界萨')\n u3 = User(username='冬泉信使', email='freyizg@gmail.com', password='1',\n role=Role.query.filter_by(name='官员').first(), confirmed=True, name='游荡的', location='冬泉谷', about_me='这封信上写的什么?')\n db.session.add_all((u, u2, u3))\n db.session.commit()\n print('生成小弟...')\n User.generate_fake(200)\n print('生成文章...')\n Post.generate_fake(200)\n print('生成评论...')\n Comment.generate_fake(5, 15)\n print('生成关注...')\n Follow.generate_fake(5, 20)\n print('生成自关注...')\n User.add_self_follows()\n\n def generate_likes_and_collections():\n from random import randint\n for i in range(1000):\n u = User.query.get(randint(1, User.query.count()))\n u2 = User.query.get(randint(1, User.query.count()))\n c = Comment.query.get(randint(1, Comment.query.count()))\n p = Post.query.get(randint(1, Post.query.count()))\n if c not in u.comments_like:\n u.comments_like.append(c)\n c.likes += 1\n db.session.add(u)\n if p not in u2.posts_collected:\n u2.posts_collected.append(p)\n p.collects += 1\n db.session.add(u2)\n db.session.add(p)\n db.session.commit()\n\n print('生成点赞和收藏...')\n generate_likes_and_collections()\n print('重置数据库完成,谢谢使用!')\n quit()\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593145024","text":"# coding:gbk\n\nimport threading\nimport time\n\ntotal = 5 # 总共的票数\n\nlock = threading.RLock() # 创建可重入互斥锁\n\n\ndef sale():\n global total\n\n lock.acquire()\n time.sleep(1)\n print('正在售出第%s张票\\n' % total)\n time.sleep(1)\n\n total -= 1\n\n lock.release()\n\n\nif __name__ == '__main__':\n threads = []\n\n for i in range(5): # 创建5个线程,代表5个售票窗口\n t = threading.Thread(target=sale, args=())\n threads.append(t)\n\n for t in threads: # 开始售票\n t.start()\n","sub_path":"multithreading_multiprocess/sell_ticket_with_lock.py","file_name":"sell_ticket_with_lock.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"520364345","text":"#!/usr/bin/env python3\n\n# Problem 6: Sum square difference\n\n# The sum of the squares of the first ten natural numbers is,\n\n# 1^2 + 2^2 + ... + 10^2 = 385\n\n# The square of the sum of the first ten natural numbers is,\n\n# (1 + 2 + ... + 10)^2 = 55^2 = 3025\n\n# Hence the difference between the sum of the squares of the first ten\n# natural numbers and the square of the sum is 3025 - 385 = 2640.\n\n# Find the difference between the sum of the squares of the first one\n# hundred natural numbers and the square of the sum.\n\ndef problem6():\n n = 100\n return (3*n**4 - 3*n**2 + 2*n**3 - 2*n) // 12\n\nif __name__ == '__main__':\n print(problem6())\n","sub_path":"problem6.py","file_name":"problem6.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"560915079","text":"import torch\nimport torchvision.datasets as datasets\nimport torchvision.transforms as Transform\nfrom torch.optim import lr_scheduler\nfrom PIL import Image\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nimport time\nimport re\nimport datetime\nimport os\n\nfrom .base import FeatureExtractor\n\n\nclass AutoEncoderExtractor(FeatureExtractor):\n type = 'autoencoder'\n\n def __init__(self, params):\n self.input_shape = params['input_shape']\n self.pre_process = self.preprocess(self.input_shape)\n self.device = params['device']\n self.model = AutoEncoder(input_shape=self.input_shape)\n self.output_path = params['output_dir']\n if params['train']:\n self.train(params)\n elif params['model_path'] is not None and params['model_path'] != '':\n self.model.load_state_dict(torch.load(params['model_path'], map_location=lambda storage, location: storage))\n self.model.to(self.device)\n else:\n raise Exception('no model specified: use model_path to give path of trained model or set -train')\n self.model.eval()\n\n def get_training_helpers(self, params, model: nn.Module):\n num_epochs = params['epochs']\n milestone_ratio = params['milestone_step_ratio']\n learning_rate = params['learning_rate']\n weight_decay = params['weight_decay']\n gamma = params['gamma']\n loss = nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[int(x * num_epochs) for x in milestone_ratio],\n gamma=gamma)\n return loss, optimizer, scheduler\n\n def preprocess(self, input_shape)->Transform:\n transform = Transform.Compose([Transform.Resize(input_shape[:2]),\n Transform.ToTensor(),\n Transform.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])])\n return transform\n\n def prepare_data(self, input_folder: str, pre_process: Transform, batch_size: int=64)->DataLoader:\n trainset = datasets.ImageFolder(input_folder, transform=pre_process)\n dataloader = DataLoader(trainset, batch_size=batch_size, shuffle=False, num_workers=4)\n return dataloader\n\n def train(self, params):\n print('training starts..')\n input_folder = params['input_train']\n input_shape = params['input_shape']\n batch_size = params['batch_size']\n num_epochs = params['epochs']\n model_directory = os.path.join(self.output_path, 'model')\n\n model = AutoEncoder(input_shape=input_shape)\n model = model.to(self.device)\n\n model.train(True)\n\n distance, optimizer, scheduler = self.get_training_helpers(params, model)\n data_loader = self.prepare_data(input_folder, self.pre_process, batch_size)\n\n for epoch in range(num_epochs):\n start_time = time.time()\n scheduler.step()\n epoch_loss = 0.0\n for data in data_loader:\n imgs, _ = data\n imgs = imgs.to(self.device)\n # ===================forward=====================\n encoded_outputs, decoded_outputs = model(imgs)\n loss = distance(decoded_outputs, imgs)\n # ===================backward====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n epoch_loss += loss.data * imgs.size(0)\n # ===================log========================\n epoch_time = time.time() - start_time\n epoch_loss /= len(data_loader.dataset)\n if epoch % 100 == 0:\n print('saving checkpoint')\n time_string = re.sub(string='_'.join(str(datetime.datetime.now()).split(':')[:-1]), pattern='[ -]',\n repl='_')\n model_save_name = \"model_\" + str(epoch) + '_' + time_string + \".pt\"\n torch.save(model.state_dict(),\n os.path.join(model_directory, model_save_name))\n print('epoch [{}/{}], run_loss:{:.4f}, epoch_loss:{:.4f}, time:{:.4f}s'.format(epoch + 1, num_epochs,\n loss.data, epoch_loss,\n epoch_time))\n torch.save(model.state_dict(), os.path.join(model_directory, \"model_final.pt\"))\n self.model = model\n\n def extract_features(self, img: Image):\n img = self.pre_process(img).unsqueeze(0)\n img = img.to(self.device)\n feature_vector, _ = self.model(img)\n return feature_vector.cpu().detach().numpy().flatten()\n\n\nclass AutoEncoder(nn.Module):\n def __init__(self, input_shape=(224, 224, 3)):\n super(AutoEncoder, self).__init__()\n\n self.input_shape = input_shape\n\n self.encoder_conv = nn.Sequential(nn.Conv2d(input_shape[2], 32, kernel_size=5, stride=2, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=5, stride=2, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=5, stride=2, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=0),\n nn.ReLU(inplace=True))\n self.feature_reduction = (input_shape[0] // 2 // 2 // 2 // 2 - 1) // 2\n lin_features_len = ((input_shape[0] // 2 // 2 // 2 // 2 - 1) // 2) * (\n (input_shape[0] // 2 // 2 // 2 // 2 - 1) // 2) * 128\n\n self.encoder_linear = nn.Linear(lin_features_len, 2048)\n\n self.decoder_layer = nn.Linear(2048, lin_features_len)\n\n out_pad = 1 if input_shape[0] // 2 // 2 // 2 // 2 % 2 == 0 else 0\n\n self.decoder_conv = nn.Sequential(nn.ReLU(inplace=True),\n nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=0,\n output_padding=out_pad),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(64, 64, kernel_size=5, stride=2, padding=2,\n output_padding=out_pad),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(64, 32, kernel_size=5, stride=2, padding=2,\n output_padding=out_pad),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(32, 32, kernel_size=5, stride=2, padding=2,\n output_padding=out_pad),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(32, 3, kernel_size=5, stride=2, padding=2,\n output_padding=out_pad)) # ,\n\n # nn.ReLU(inplace=True),\n # nn.Sigmoid())\n\n def forward(self, input):\n # encoder\n encoded_features = self.encoder_conv(input)\n encoded_features = encoded_features.view(encoded_features.size(0), -1)\n encoded = self.encoder_linear(encoded_features)\n\n # decoder\n decoded_linear = self.decoder_layer(encoded)\n decoded = decoded_linear.view(decoded_linear.size(0), 128, self.feature_reduction, self.feature_reduction)\n decoded = self.decoder_conv(decoded)\n return encoded, decoded\n\n","sub_path":"extractors/AutoEncoder.py","file_name":"AutoEncoder.py","file_ext":"py","file_size_in_byte":8302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447015266","text":"from Position import Position\nfrom ChessPiece import ChessPiece\n\nclass Queen(ChessPiece):\n\n def __init__(self, position, aligment):\n ChessPiece.__init__(self, position, aligment)\n\n def getSignature(self):\n return 'BQ' if self.getAligment() == ChessPiece.BLACK else 'WQ'\n \n def getPossibleMovements(self, chessBoard):\n\n #moving left\n possibleMovements = self.__getValidLinearMovements(range(self.getPosition().x() - 1, -1, -1), chessBoard)\n\n #moving right\n possibleMovements += self.__getValidLinearMovements(range(self.getPosition().x() + 1, 8), chessBoard)\n\n #moving up\n possibleMovements += self.__getValidLinearMovements(range(self.getPosition().y() -1, -1, -1), chessBoard, False)\n\n #moving down\n possibleMovements += self.__getValidLinearMovements(range(self.getPosition().y() + 1, 8), chessBoard, False)\n\n #moving toward left upper diagonal\n possibleMovements += self.__getValidDiagonalMovements(-1, -1, chessBoard, lambda x, y: 0 <= x and 0 <= y)\n\n #moving toward right upper diagonal\n possibleMovements += self.__getValidDiagonalMovements(-1, 1, chessBoard, lambda x, y: 0 <= x and y < 8)\n\n #moving toward left lower diagonal\n possibleMovements += self.__getValidDiagonalMovements(1, -1, chessBoard, lambda x, y: x < 8 and 0 <= y)\n\n #moving toward right lower diagonal\n possibleMovements += self.__getValidDiagonalMovements(1, 1, chessBoard, lambda x, y: x < 8 and y < 8)\n\n return possibleMovements\n\n\n def __getValidLinearMovements(self, possibleRange, chessBoard, varyingX=True):\n\n validPositions = []\n\n for p in possibleRange:\n \n possiblePosition = Position(p, self.getPosition().y()) if varyingX else Position(self.getPosition().x(), p)\n pieceInPosition = chessBoard.getPieceInPosition(possiblePosition)\n\n if(pieceInPosition == None):\n validPositions.append(possiblePosition)\n else:\n if(pieceInPosition.getAligment() != self.getAligment()):\n validPositions.append(possiblePosition)\n break\n \n return validPositions\n\n def __getValidDiagonalMovements(self, xStep, yStep, chessBoard, constraintCheck):\n \n validMovements = []\n newX, newY = self.getPosition().x() + xStep, self.getPosition().y() + yStep\n\n while(constraintCheck(newX, newY)):\n possiblePosition = Position(newX, newY)\n pieceInPosition = chessBoard.getPieceInPosition(possiblePosition)\n\n if(pieceInPosition == None):\n validMovements.append(possiblePosition)\n else:\n if(pieceInPosition.getAligment() != self.getAligment()):\n validMovements.append(possiblePosition)\n break\n \n newX += xStep\n newY += yStep\n \n return validMovements\n\n def copy(self):\n return Queen(self.getPosition().copy(), self.getAligment())\n\n def getValue(self):\n return ChessPiece.QUEEN_WORTH","sub_path":"chess/Queen.py","file_name":"Queen.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"40599207","text":"#!/usr/bin/env python3\n\nfrom flask import Flask, render_template, request, redirect, session, flash\nimport connexion\nfrom models import model\n\n\ndef get_ticker_price(ticker='aapl'):\n return model.apiget(ticker)\n\n# app = connexion.App(__name__, specification_dir='./')\n\napp = Flask(__name__)\n\n# app.add_api('swagger.yml')\n\napp.secret_key = 'the session needs this'\n\ndef date_format(datestring):\n date = dateutil.parser.parse(datestring)\n week_day_index = date.weekday()\n clock_time = date.strftime(\"%H:%M:%S\")\n date_string = f\"{calendar.day_abbr[week_day_index]} {date.day} {calendar.month_name[date.month]} \"+ clock_time\n return date_string\n \n# context_processor does not work with connexion\n# @app.context_processor\n# def context_processor():\n# return dict(date_format = date_format)\n\n\n\n@app.route('/', methods=['GET'])\ndef send_to_login():\n return redirect('/login')\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login(): \n if request.method == 'GET':\n print(session)\n if 'username' in session:\n return render_template('login_logged.html')\n return render_template('login.html')\n else:\n try:\n username = request.form['username']\n password = request.form['password']\n user_object = model.set_user_object(username)\n except:\n flash(\"Invalid Login\")\n return redirect('/login')\n if user_object.check_password(user_object.pass_hash, password):\n session['username'] = username\n flash(f'User {username} successfully logged in!')\n return redirect('/login')\n else:\n flash(\"Invalid Login\")\n return redirect('/login')\n\n\nif __name__=='__main__':\n app.debug = True\n app.run(port=8080)\n \n","sub_path":"myapp/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"518353718","text":"\"\"\"Easee charger switch.\"\"\"\n\nfrom homeassistant.components.switch import SwitchEntity\nfrom homeassistant.const import CONF_MONITORED_CONDITIONS\n\nfrom .entity import ChargerEntity, convert_units_funcs\nfrom .const import DOMAIN, EASEE_ENTITIES\nimport logging\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(hass, entry, async_add_entities):\n \"\"\"Setup switch platform.\"\"\"\n config = hass.data[DOMAIN][\"config\"]\n chargers_data = hass.data[DOMAIN][\"chargers_data\"]\n monitored_conditions = config.options.get(CONF_MONITORED_CONDITIONS, [\"status\"])\n entities = []\n for charger_data in chargers_data._chargers:\n for key in monitored_conditions:\n data = EASEE_ENTITIES[key]\n entity_type = data.get(\"type\", \"sensor\")\n\n if entity_type == \"switch\":\n _LOGGER.debug(\n \"Adding entity: %s (%s) for charger %s\",\n key,\n entity_type,\n charger_data.charger.name,\n )\n entities.append(\n ChargerSwitch(\n charger_data=charger_data,\n name=key,\n state_key=data[\"key\"],\n units=data[\"units\"],\n convert_units_func=convert_units_funcs.get(\n data[\"convert_units_func\"], None\n ),\n attrs_keys=data[\"attrs\"],\n icon=data[\"icon\"],\n state_func=data.get(\"state_func\", None),\n switch_func=data.get(\"switch_func\", None),\n )\n )\n\n chargers_data._entities.extend(entities)\n async_add_entities(entities)\n\n\nclass ChargerSwitch(ChargerEntity, SwitchEntity):\n \"\"\"Easee switch class.\"\"\"\n\n async def async_turn_on(self, **kwargs): # pylint: disable=unused-argument\n \"\"\"Turn on the switch.\"\"\"\n _LOGGER.debug(\"%s Switch turn on\" % self._entity_name)\n function_call = getattr(self.charger_data.charger, self._switch_func)\n await function_call(True)\n await self.charger_data.async_refresh()\n await self.async_update()\n\n async def async_turn_off(self, **kwargs): # pylint: disable=unused-argument\n \"\"\"Turn off the switch.\"\"\"\n _LOGGER.debug(\"%s Switch turn off\" % self._entity_name)\n function_call = getattr(self.charger_data.charger, self._switch_func)\n await function_call(False)\n await self.charger_data.async_refresh()\n await self.async_update()\n\n @property\n def is_on(self):\n \"\"\"Return true if the switch is on.\"\"\"\n _LOGGER.debug(\"Getting state of %s\" % self._entity_name)\n return self._state\n","sub_path":"custom_components/easee/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"363904952","text":"t_prompt = \"- Text: \"\nfilename = input(\"Name of your file: \")\ntry:\n with open(filename, \"r\") as f:\n working_text = f.read()\n print(\"The file content is: \\n\", working_text)\nexcept IOError:\n print(\"Your file has been created.\")\n working_text = \"\"\n\n\ndef f_line_break():\n return \"\\n\"\n\n\ndef f_plain():\n return input(\"- Text: \")\n\n\ndef f_bold():\n return f\"**{input(t_prompt)}**\"\n\n\ndef f_italic():\n return f\"*{input(t_prompt)}*\"\n\n\ndef f_inline_code():\n return f\"`{input(t_prompt)}`\"\n\n\ndef f_header():\n h_level = int(input(\"- Level: \"))\n user_text = input(t_prompt)\n return h_level * \"#\" + f\" {user_text}\\n\"\n\n\ndef f_link():\n l_label = input(\"- Label: \")\n url = input(\"- URL: \")\n return f\"[{l_label}]({url})\"\n\n\ndef f_list():\n while True:\n rows = int(input(\"- Number of rows: \"))\n if rows > 0:\n break\n print(\"The number of rows should be greater than zero\")\n user_text = \"\"\n for i in range(rows):\n if user_input == \"ordered-list\":\n counter = f\"{i + 1}. \"\n elif user_input == \"unordered-list\":\n counter = \"* \"\n user_text += counter + input(f\"- Row #{i+1}\") + \"\\n\"\n return user_text\n\n\navailable_formatters = {'plain': f_plain, 'bold': f_bold, 'italic': f_italic, 'header': f_header,\n 'ordered-list': f_list, 'unordered-list': f_list, 'link': f_link,\n 'inline-code': f_inline_code, 'line-break': f_line_break}\n\nwhile True:\n user_input = input(\"- Choose a formatter: \")\n if user_input == \"!help\":\n print(\"Available formatters: \", \" \".join(available_formatters))\n print(\"Special commands: !help !done\")\n\n elif user_input == \"!done\":\n with open(filename, \"w\") as f:\n f.write(working_text)\n break\n elif user_input not in available_formatters:\n print(\"Unknown formatting type or command. Please try again\")\n elif user_input in available_formatters:\n working_text += available_formatters[f\"{user_input}\"]()\n print(working_text)\n","sub_path":"markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"435892039","text":"# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries\n# SPDX-License-Identifier: MIT\n\n# Simple demo of the LSM9DS1 accelerometer, magnetometer, gyroscope.\n# Will print the acceleration, magnetometer, and gyroscope values every second.\nimport time\nimport board\nimport adafruit_lsm9ds1\n\n# Create sensor object, communicating over the board's default I2C bus\ni2c = board.I2C() # uses board.SCL and board.SDA\n# i2c = board.STEMMA_I2C() # For using the built-in STEMMA QT connector on a microcontroller\nsensor = adafruit_lsm9ds1.LSM9DS1_I2C(i2c)\n\n# SPI connection:\n# from digitalio import DigitalInOut, Direction\n# spi = board.SPI()\n# csag = DigitalInOut(board.D5)\n# csag.direction = Direction.OUTPUT\n# csag.value = True\n# csm = DigitalInOut(board.D6)\n# csm.direction = Direction.OUTPUT\n# csm.value = True\n# sensor = adafruit_lsm9ds1.LSM9DS1_SPI(spi, csag, csm)\n\n# Main loop will read the acceleration, magnetometer, gyroscope, Temperature\n# values every second and print them out.\nwhile True:\n # Read acceleration, magnetometer, gyroscope, temperature.\n accel_x, accel_y, accel_z = sensor.acceleration\n mag_x, mag_y, mag_z = sensor.magnetic\n gyro_x, gyro_y, gyro_z = sensor.gyro\n temp = sensor.temperature\n # Print values.\n print(\n \"Acceleration (m/s^2): ({0:0.3f},{1:0.3f},{2:0.3f})\".format(\n accel_x, accel_y, accel_z\n )\n )\n print(\n \"Magnetometer (gauss): ({0:0.3f},{1:0.3f},{2:0.3f})\".format(mag_x, mag_y, mag_z)\n )\n print(\n \"Gyroscope (rad/sec): ({0:0.3f},{1:0.3f},{2:0.3f})\".format(\n gyro_x, gyro_y, gyro_z\n )\n )\n print(\"Temperature: {0:0.3f}C\".format(temp))\n # Delay for a second.\n time.sleep(1.0)\n","sub_path":"examples/lsm9ds1_simpletest.py","file_name":"lsm9ds1_simpletest.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"503727485","text":"from machine import I2C\nimport struct\nimport time\nimport utime\n\nclass Clock:\n \"\"\" CLOCK is a HT1382 I2C/3-Wire Real Time Clock with a 32 kHz crystal \"\"\"\n def __init__(self, i2, a = 0x68):\n self.i2 = i2\n self.a = a\n \n def set(self, tt = None):\n \"\"\" tt is (year, month, mday, hour, minute, second, weekday, yearday), as used\n by utime. \"\"\"\n if tt is None:\n tt = utime.localtime()\n (year, month, mday, hour, minute, second, weekday, yearday) = tt\n def bcd(x):\n return (x % 10) + 16 * (x // 10)\n self.i2.writeto_mem(self.a, 7, bytes([0]))\n self.i2.writeto_mem(self.a, 0, bytes([\n bcd(second),\n bcd(minute),\n 0x80 | bcd(hour), # use 24-hour mode\n bcd(mday),\n bcd(month),\n 1 + weekday,\n bcd(year % 100)]))\n\n def regrd(self, addr, fmt = \"B\"):\n b = self.i2.readfrom_mem(self.a, addr, struct.calcsize(fmt))\n return struct.unpack(fmt, b)\n\n def read(self):\n (ss,mm,hh,dd,MM,ww,yy) = self.regrd(0, \"7B\")\n def dec(x):\n return (x % 16) + 10 * (x // 16)\n return (\n 2000 + dec(yy),\n dec(MM),\n dec(dd),\n dec(hh & 0x7f),\n dec(mm),\n dec(ss),\n dec(ww) - 1)\n\ndef main():\n i2 = I2C(1, freq = 100000)\n\n d = Clock(i2)\n\n # Set the clock to 2010-2-10 14:45:00\n d.set((2019, 2, 10, 14, 45, 0, 0, 1))\n\n while True:\n print('year=%4d month=%2d mday=%2d time=%02d:%02d:%02d weekday=%d' % d.read())\n time.sleep(1)\n","sub_path":"micropython/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"587503877","text":"import sys\n\n\n\n\n\n\n\ndef revenge(N):\n\tflipcount = 0\n\n\tif len(N) != 1:\n\t\tdish = N\n\t\toptimal_dish = \"+\"*len(N)\n\t\tng_optimal_dish = \"-\"*len(N)\n\n\t\t#print(dish, optimal_dish, dish == optimal_dish)\n\n\t\twhile dish != optimal_dish:\n\t\t\ti = 0\n\t\t\tps = dish[0]\n\t\t\tflipped = False\n\n\t\t\twhile i < len(dish) and flipped == False:\n\t\t\t\tif dish[i] != ps:\n\t\t\t\t\tdish = flip(dish,i)\n\t\t\t\t\tflipcount+=1\n\t\t\t\t\tflipped = True\n\t\t\t\telif dish == ng_optimal_dish:\n\t\t\t\t\tdish = flip(dish, len(dish))\n\t\t\t\t\tflipcount+=1\n\t\t\t\t\tflipped = True\n\t\t\t\telse:\n\t\t\t\t\ti+=1\n\telse:\n\t\tif N[0] != \"+\":\n\t\t\tflipcount = 1\n\n\treturn flipcount\n\n\ndef flip(dish, i):\n\t\"\"\"\n\tflip the pancakes from top upto position i\n\t\"\"\"\n\tif dish[0] == \"-\":\n\t\tdish = \"+\"*i + dish[i:]\n\telse:\n\t\tdish = \"-\"*i + dish[i:]\n\treturn dish\n\n\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) > 1:\n\t\tfilename = sys.argv[1]\n\n\tfilename = \"B-large.in\"\n\n\n\tanswer = \"\"\n\twith open(filename, \"r\") as f:\n\t\tmaxcase = f.readline()\n\t\tcase = 1\n\t\twhile case < int(maxcase)+1:\n\t\t\tnumberofflips = revenge(f.readline().strip())\n\t\t\tanswer += \"Case #\" + str(case) + \": \" + str(numberofflips) + \"\\n\"\n\t\t\tcase+=1\n\t\n\twith open(\"B-large.out\",\"w\") as f:\n\t\tf.write(answer)","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_ganzinam_revengeofthepancakes.py","file_name":"16_0_2_ganzinam_revengeofthepancakes.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"606376597","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport os\nimport yaml\nfrom tools.times import timestamp\nfrom config.conf import ELEMENT_PATH, LOCATE_MODE\n\n\ndef inspect_element():\n \"\"\"审查所有的元素是否正确\"\"\"\n start_time = timestamp()\n for i in os.listdir(ELEMENT_PATH):\n _path = os.path.join(ELEMENT_PATH, i)\n if os.path.isfile(_path):\n with open(_path, encoding='utf-8') as f:\n data = yaml.safe_load(f)\n for k in data.values():\n pattern, value = k.split('==')\n if pattern not in LOCATE_MODE:\n raise AttributeError('【%s】路径中【%s]元素没有指定类型' % (i, k))\n if pattern == 'xpath':\n assert '//' in value, '【%s】路径中【%s]元素xpath类型与值不配' % (\n i, k)\n if pattern == 'css':\n assert '//' not in value, '【%s】路径中【%s]元素css类型与值不配' % (\n i, k)\n if pattern in ('id', 'name', 'class'):\n assert value, '【%s】路径中【%s]元素类型与值不匹配' % (i, k)\n end_time = timestamp()\n print(\"校验元素done!用时%.9f秒!\" % (end_time - start_time))\n\n\nif __name__ == '__main__':\n inspect_element()\n","sub_path":"common/inspect.py","file_name":"inspect.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"291235363","text":"import socket\n\nsckt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nbuffSize = 4096\n\nhost = socket.gethostname()\nport = 1026\n\nprint(\"Host: \" + host)\n\nsckt.bind((\"\", port))\n\naddress = (\"\", port)\n\nfile = open(\"duck.jpg\", \"wb\")\n\ndata, address = sckt.recvfrom(buffSize)\n\nprint(\"\\nConnected with: \" + str(address) + \"\\n\")\n\nwhile data:\n\n file.write(data)\n\n print(\"Received \" + str(file.tell()) + \" bytes...\")\n\n if data == b\"done\":\n print(\"\\nFile received!\\n\")\n file.close()\n sckt.close()\n break\n\n data, address = sckt.recvfrom(buffSize)\n","sub_path":"server/udpserver.py","file_name":"udpserver.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"315214186","text":"import operator\n\nimport numpy as np\n\n\ndef autoNorm(dataset):\n attributes = dataset.shape[1]\n for i in range(0, attributes):\n dataset[:, i] = (dataset[:, i] - dataset[:, i].mean()) / dataset[:, i].std()\n return dataset\n\ndef knn(dataset, label, k, inX):\n lines = dataset.shape[0]\n distance = np.sqrt(((np.tile(inX, (lines, 1)) - dataset)**2).sum(axis=1))\n sortedDistance = distance.argsort()[0:k]\n classLabel = {}\n for item in range(k):\n l = label[sortedDistance[item]]\n classLabel[l] = classLabel.get(l, 0)+1\n key = sorted(classLabel.items(), key=operator.itemgetter(1), reverse=True)\n return key[0][0]\n\n\ndef test(data, label, test_ratio=0.2):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n train_data = data[test_indices]\n train_label = label[train_indices]\n error = 0\n for i in test_indices:\n pred = knn(train_data, train_label, 20, data[i][0:3])\n if pred != label[i]:\n error += 1\n print(error, test_set_size)\n print(error / test_set_size)\n\n\nsource = np.loadtxt(\"../datasets/datingTestSet2.txt\")\n\ndata = autoNorm(source[:, 0:3])\nlabel = source[:, -1]\nlabel_prediction = knn(data, label, 3, [1,2,3])\ntest(data, label)\n\n\n","sub_path":"others/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"52069794","text":"import dict\nimport os\nimport os.path\ndef tf_dict(dir):\n dicts = {}\n for root,dirs,files in os.walk(dir):\n for name in files:\n path = os.path.join(root,name)\n D = dict.dict(path)\n for d in D:\n if dicts.has_key(d):\n dicts[d] = int(dicts[d]) + int(D[d])\n else:\n dicts[d] = int(D[d])\n tf_dict = {}\n tf_down = sum(dicts.values())\n for d in dicts.keys():\n tf_up = dicts[d]\n tf = float(tf_up)/float(tf_down)\n tf_dict[d] = tf\n tf_dict = sorted(tf_dict.items(),key=lambda x:x[1],reverse=True)\n # tf_dict = tf_dict[:200]\n tf_dict_1 = {}\n k=0\n for l in tf_dict:\n if len(l[0])>3:\n tf_dict_1[l[0]]=l[1]\n k+=1\n if k == 100:\n break\n\n # for l in tf_dict:\n # tf_dict_1[l[0]]=l[1]\n\n\n # for l in list:\n # tf_dict[l[0]] = l[1]\n return tf_dict_1\n# if __name__ == '__main__':\n# for root,dirs,files in os.walk('/Users/baiweili/Desktop/output1'):\n# if not root is '/Users/baiweili/Desktop/output1':\n# tf_dict = tf_dict(root)\n# name = root.split('/')[-1]\n# result_file = '/Users/baiweili/Desktop/result/'+name\n# f = open(result_file,'wb')\n# for d in tf_dict.keys():\n# f.write(d)\n# f.write('\\n')\n# f.close()","sub_path":"PythonProject/tf-idf/tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"67010600","text":"import logging\nimport sys\nfrom os import path\nfrom typing import Optional\n\nfrom ..common import run\nfrom ..common.run_status import RunStatus\n\n\ndef clone_student(student: str, base_url: str):\n logging.debug(\"Cloning {}'s repository\".format(student))\n if not path.exists(student):\n clone_url('{}/{}.git'.format(base_url, student))\n\n\ndef clone_url(url: str, into: Optional[str] = None):\n if into:\n logging.info('cloning {} into {}'.format(url, into))\n status, output, _ = run(['git', 'clone', '--quiet', url, into])\n else:\n logging.info('cloning {}'.format(url))\n status, output, _ = run(['git', 'clone', '--quiet', url])\n\n if status is RunStatus.CALLED_PROCESS_ERROR:\n if 'Permission denied (publickey)' in output:\n print('Permission denied when cloning from {}'.format(url), file=sys.stderr)\n print('Make sure that this SSH key is registered with StoGit.', file=sys.stderr)\n sys.exit(1)\n\n if 'The project you were looking for could not be found.' in output:\n print('Could not find repository {}'.format(url), file=sys.stderr)\n sys.exit(1)\n","sub_path":"stograde/student/clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"532836862","text":"import time\nimport argparse\nimport asyncio\nimport vapoursynth\nimport os\nfrom functools import partial\n\n\"\"\"\nReworke by Infi\nOriginal Author: kageru https://gist.github.com/kageru/549e059335d6efbae709e567ed081799\nThanks: BluBb_mADe, FichteFoll, stux!, Frechdachs\n\"\"\"\n\ncore = vapoursynth.core\ncore.add_cache = False\nimwri = getattr(core, \"imwri\", getattr(core, \"imwrif\", None))\noutput_dir = os.path.splitext(os.path.basename(__file__))[0]\n\n\nclass GetNative:\n def __init__(self, src, kernel=None, b=None, c=None, taps=None, ar=None, approx=None, min_h=None, max_h=None,\n frame=None, img_out=None, plot_scaling=None, plot_format=None, show_plot=None):\n self.plot_format = plot_format\n self.plot_scaling = plot_scaling\n self.src = src\n self.min_h = min_h\n self.max_h = max_h\n self.ar = ar\n self.b = b\n self.c = c\n self.taps = taps\n self.approx = approx\n self.kernel = kernel\n self.frame = frame\n self.img_out = img_out\n self.txt_output = \"\"\n self.show_plot = show_plot\n self.resolutions = []\n self.filename = self.get_filename()\n\n async def run(self):\n # change format to GrayS with bitdepth 32 for descale\n src = self.src[self.frame]\n matrix_s = '709' if src.format.color_family == vapoursynth.RGB else None\n src_luma32 = core.resize.Point(src, format=vapoursynth.YUV444PS, matrix_s=matrix_s)\n src_luma32 = core.std.ShufflePlanes(src_luma32, 0, vapoursynth.GRAY)\n src_luma32 = core.std.Cache(src_luma32)\n\n # descale each individual frame\n resizer = descale_approx if self.approx else descale_accurate\n clip_list = []\n for h in range(self.min_h, self.max_h + 1):\n clip_list.append(resizer(src_luma32, self.getw(h), h, self.kernel, self.b, self.c, self.taps))\n full_clip = core.std.Splice(clip_list, mismatch=True)\n full_clip = upscale(full_clip, self.getw(src.height), src.height, self.kernel, self.b, self.c, self.taps)\n if self.ar != src.width / src.height:\n src_luma32 = upscale(src_luma32, self.getw(src.height), src.height, self.kernel, self.b, self.c, self.taps)\n expr_full = core.std.Expr([src_luma32 * full_clip.num_frames, full_clip], 'x y - abs dup 0.015 > swap 0 ?')\n full_clip = core.std.CropRel(expr_full, 5, 5, 5, 5)\n full_clip = core.std.PlaneStats(full_clip)\n full_clip = core.std.Cache(full_clip)\n\n tasks_pending = set()\n futures = {}\n vals = []\n full_clip_len = len(full_clip)\n for frame_index in range(len(full_clip)):\n print(f\"{frame_index}/{full_clip_len}\", end=\"\\r\")\n fut = asyncio.ensure_future(asyncio.wrap_future(full_clip.get_frame_async(frame_index)))\n tasks_pending.add(fut)\n futures[fut] = frame_index\n while len(tasks_pending) >= core.num_threads * (2 if self.approx else 1) + 2:\n tasks_done, tasks_pending = await asyncio.wait(tasks_pending, return_when=asyncio.FIRST_COMPLETED)\n vals += [(futures.pop(task), task.result().props.PlaneStatsAverage) for task in tasks_done]\n\n tasks_done, _ = await asyncio.wait(tasks_pending)\n vals += [(futures.pop(task), task.result().props.PlaneStatsAverage) for task in tasks_done]\n vals = [v for _, v in sorted(vals)]\n ratios, vals, best_value = self.analyze_results(vals)\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n self.save_plot(vals)\n if self.img_out:\n self.save_images(src_luma32)\n\n self.txt_output += 'Raw data:\\nResolution\\t | Relative Error\\t | Relative difference from last\\n'\n for i, error in enumerate(vals):\n self.txt_output += f'{i + self.min_h:4d}\\t\\t | {error:.10f}\\t\\t\\t | {ratios[i]:.2f}\\n'\n\n with open(f\"{output_dir}/{self.filename}.txt\", \"w\") as file_open:\n file_open.writelines(self.txt_output)\n\n return best_value\n\n def getw(self, h, only_even=True):\n w = h * self.ar\n w = int(round(w))\n if only_even:\n w = w // 2 * 2\n\n return w\n\n def analyze_results(self, vals):\n ratios = [0.0]\n for i in range(1, len(vals)):\n last = vals[i - 1]\n current = vals[i]\n ratios.append(current and last / current)\n sorted_array = sorted(ratios, reverse=True) # make a copy of the array because we need the unsorted array later\n max_difference = sorted_array[0]\n\n differences = [s for s in sorted_array if s - 1 > (max_difference - 1) * 0.33][:5]\n\n for diff in differences:\n current = ratios.index(diff)\n # don't allow results within 20px of each other\n for res in self.resolutions:\n if res - 20 < current < res + 20:\n break\n else:\n self.resolutions.append(current)\n\n bicubic_params = self.kernel == 'bicubic' and f'Scaling parameters:\\nb = {self.b:.2f}\\nc = {self.c:.2f}\\n' or ''\n best_values = f\"{'p, '.join([str(r + self.min_h) for r in self.resolutions])}p\"\n self.txt_output += f\"Resize Kernel: {self.kernel}\\n{bicubic_params}Native resolution(s) (best guess): \" \\\n f\"{best_values}\\nPlease check the graph manually for more accurate results\\n\\n\"\n\n return ratios, vals, f\"Native resolution(s) (best guess): {best_values}\"\n\n def save_plot(self, vals):\n import matplotlib as mpl\n if not self.show_plot:\n mpl.use('Agg')\n import matplotlib.pyplot\n\n matplotlib.pyplot.style.use('dark_background')\n matplotlib.pyplot.plot(range(self.min_h, self.max_h + 1), vals, '.w-')\n matplotlib.pyplot.title(self.filename)\n matplotlib.pyplot.ylabel('Relative error')\n matplotlib.pyplot.xlabel('Resolution')\n matplotlib.pyplot.yscale(self.plot_scaling)\n matplotlib.pyplot.savefig(f'{output_dir}/{self.filename}.{self.plot_format}')\n if self.show_plot:\n matplotlib.pyplot.show()\n matplotlib.pyplot.clf()\n\n # Original idea by Chibi_goku http://recensubshq.forumfree.it/?t=64839203\n # Vapoursynth port by MonoS @github: https://github.com/MonoS/VS-MaskDetail\n def mask_detail(self, clip, final_width, final_height):\n resizer = descale_approx if self.approx else descale_accurate\n temp = resizer(clip, final_width, final_height, self.kernel, self.b, self.c, self.taps)\n temp = upscale(temp, clip.width, clip.height, self.kernel, self.b, self.c, self.taps)\n mask = core.std.Expr([clip, temp], 'x y - abs dup 0.015 > swap 16 * 0 ?').std.Inflate()\n mask = upscale(mask, final_width, final_height, \"spline36\", self.b, self.c, taps=self.taps)\n\n return change_bitdepth(mask, dither_type=\"none\")\n\n # TODO: use PIL for output\n def save_images(self, src_luma32):\n resizer = descale_approx if self.approx else descale_accurate\n src = src_luma32\n first_out = imwri.Write(change_bitdepth(src), 'png', f'{output_dir}/{self.filename}_source%d.png')\n first_out.get_frame(0) # trick vapoursynth into rendering the frame\n for r in self.resolutions:\n r += self.min_h\n image = self.mask_detail(src, self.getw(r), r)\n mask_out = imwri.Write(change_bitdepth(image), 'png', f'{output_dir}/{self.filename}_mask_{r:d}p%d.png')\n mask_out.get_frame(0)\n descale_out = resizer(src, self.getw(r), r, self.kernel, self.b, self.c, self.taps)\n descale_out = imwri.Write(change_bitdepth(descale_out), 'png', f'{output_dir}/{self.filename}_{r:d}p%d.png')\n descale_out.get_frame(0)\n\n def get_filename(self):\n return ''.join([\n f\"f_{self.frame}\",\n f\"_k_{self.kernel}\",\n f\"_ar_{self.ar:.2f}\",\n f\"_{self.min_h}-{self.max_h}\",\n f\"_b_{self.b:.2f}_c_{self.c:.2f}\" if self.kernel == \"bicubic\" else \"\",\n f\"_taps_{self.taps}\" if self.kernel == \"lanczos\" else \"\",\n f\"_[approximation]\" if self.approx else \"\",\n ])\n\n\n# TODO Check fmtc kernel befor calling fmtc here\ndef upscale(src, width, height, kernel, b, c, taps):\n resizer = getattr(src.resize, kernel.title())\n if not resizer:\n return src.fmtc.resample(width, height, kernel=kernel, a1=b, a2=c, taps=taps)\n if kernel == 'bicubic':\n resizer = partial(resizer, filter_param_a=b, filter_param_b=c)\n elif kernel == 'lanczos':\n resizer = partial(resizer, filter_param_a=taps)\n\n return resizer(width, height)\n\n\ndef descale_accurate(src, width, height, kernel, b, c, taps):\n descale = getattr(src, 'descale_getnative', None)\n if descale is None:\n descale = getattr(src, 'descale')\n descale = getattr(descale, 'De' + kernel)\n if kernel == 'bicubic':\n descale = partial(descale, b=b, c=c)\n elif kernel == 'lanczos':\n descale = partial(descale, taps=taps)\n\n return descale(width, height)\n\n\ndef descale_approx(src, width, height, kernel, b, c, taps):\n return src.fmtc.resample(width, height, kernel=kernel, taps=taps, a1=b, a2=c, invks=True, invkstaps=taps)\n\n\ndef change_bitdepth(src, bits=8, dither_type='error_diffusion'):\n src_f = src.format\n out_f = core.register_format(src_f.color_family,\n vapoursynth.INTEGER,\n bits,\n src_f.subsampling_w,\n src_f.subsampling_h)\n\n return core.resize.Point(src, format=out_f.id, dither_type=dither_type)\n\n # r39+\n # return src.resize.Point(format=src.format.replace(bits_per_sample=bits, dither_type=dither_type))\n\n\ndef to_float(str_value):\n if set(str_value) - set(\"0123456789./\"):\n raise argparse.ArgumentTypeError(\"Invalid characters in float parameter\")\n try:\n return eval(str_value) if \"/\" in str_value else float(str_value)\n except (SyntaxError, ZeroDivisionError, TypeError, ValueError):\n raise argparse.ArgumentTypeError(\"Exception while parsing float\") from None\n\n\ndef get_attr(obj, attr, default=None):\n for ele in attr.split('.'):\n obj = getattr(obj, ele, default)\n if obj == default:\n return default\n return obj\n\n\ndef get_source_filter(args):\n ext = os.path.splitext(args.input_file)[1].lower()\n if imwri and (args.img or ext in {\".png\", \".tif\", \".tiff\", \".bmp\", \".jpg\", \".jpeg\", \".webp\", \".tga\", \".jp2\"}):\n print(\"Using imwri as source filter\")\n return imwri.Read\n source_filter = get_attr(core, 'ffms2.Source')\n if source_filter:\n print(\"Using ffms2 as source filter\")\n return source_filter\n source_filter = get_attr(core, 'lsmas.LWLibavSource')\n if source_filter:\n print(\"Using lsmas.LWLibavSource as source filter\")\n return source_filter\n source_filter = get_attr(core, 'lsmas.LSMASHVideoSource')\n if source_filter:\n print(\"Using lsmas.LSMASHVideoSource as source filter\")\n return source_filter\n raise ValueError(\"No source filter found.\")\n\n\nparser = argparse.ArgumentParser(description='Find the native resolution(s) of upscaled material (mostly anime)')\nparser.add_argument(dest='input_file', type=str, help='Absolute or relative path to the input file')\nparser.add_argument('--frame', '-f', dest='frame', type=int, default=None, help='Specify a frame for the analysis. Random if unspecified')\nparser.add_argument('--kernel', '-k', dest='kernel', type=str.lower, default='bilinear', help='Resize kernel to be used')\nparser.add_argument('--bicubic-b', '-b', dest='b', type=to_float, default=\"1/3\", help='B parameter of bicubic resize')\nparser.add_argument('--bicubic-c', '-c', dest='c', type=to_float, default=\"1/3\", help='C parameter of bicubic resize')\nparser.add_argument('--lanczos-taps', '-t', dest='taps', type=int, default=3, help='Taps parameter of lanczos resize')\nparser.add_argument('--aspect-ratio', '-ar', dest='ar', type=to_float, default=0, help='Force aspect ratio. Only useful for anamorphic input')\nparser.add_argument('--approx', '-ap', dest=\"approx\", action=\"store_true\", help='Use fmtc instead of descale [faster, loss of accuracy]')\nparser.add_argument('--min-height', '-min', dest=\"min_h\", type=int, default=500, help='Minimum height to consider')\nparser.add_argument('--max-height', '-max', dest=\"max_h\", type=int, default=1000, help='Maximum height to consider')\nparser.add_argument('--use', '-u', help='Use specified source filter e.g. (lsmas.LWLibavSource)')\nparser.add_argument('--is-image', '-img', dest='img', action=\"store_true\", help='Force image input')\nparser.add_argument('--generate-images', '-img-out', dest='img_out', action=\"store_true\", help='Save detail mask as png')\nparser.add_argument('--plot-scaling', '-ps', dest='plot_scaling', type=str.lower, default='log', help='Scaling of the y axis. Can be \"linear\" or \"log\"')\nparser.add_argument('--plot-format', '-pf', dest='plot_format', type=str.lower, default='svg', help='Format of the output image. Can be svg, png, pdf, rgba, jp(e)g, tif(f), and probably more')\nparser.add_argument('--show-plot-gui', '-pg', dest='show_plot', action=\"store_true\", help='Show an interactive plot gui window.')\n\n\ndef getnative():\n starttime = time.time()\n args = parser.parse_args()\n\n if (args.img or args.img_out) and imwri is None:\n raise ValueError(\"imwri not found.\")\n\n if args.approx:\n if not hasattr(core, 'fmtc'):\n raise ValueError('fmtc not found')\n\n try:\n core.fmtc.resample(core.std.BlankClip(), kernel=args.kernel)\n except vapoursynth.Error:\n raise ValueError('fmtc: Invalid kernel specified.')\n else:\n if not hasattr(core, 'descale_getnative') and \"toggaf.asi.xe\" not in core.get_plugins():\n if not hasattr(core, 'descale'):\n raise ValueError('Neither descale_getnative nor descale found.\\n'\n 'One of them is needed for accurate descaling')\n print(\"Warning: only the really really slow descale is available.\\n\"\n \"Download the modified descale for improved performance:\\n\"\n \"https://github.com/Infiziert90/vapoursynth-descale\")\n\n if args.kernel not in ['spline36', 'spline16', 'lanczos', 'bicubic', 'bilinear']:\n raise ValueError(f'descale: {args.kernel} is not a supported kernel. Try -ap for approximation.')\n\n if args.use:\n source_filter = get_attr(core, args.use)\n if not source_filter:\n raise ValueError(f\"{args.use} is not available in the current vapoursynth enviroment.\")\n print(f\"Using {args.use} as source filter\")\n else:\n source_filter = get_source_filter(args)\n\n src = source_filter(args.input_file)\n if args.frame is None:\n args.frame = src.num_frames // 3\n\n if args.ar is 0:\n args.ar = src.width / src.height\n\n if args.min_h >= src.height:\n raise ValueError(f\"Picture is to small or equal for min height {args.min_h}.\")\n elif args.min_h >= args.max_h:\n raise ValueError(f\"Your min height is bigger or equal to max height.\")\n elif args.max_h > src.height:\n print(f\"Your max height cant be bigger than your image dimensions. New max height is {src.height}\")\n args.max_h = src.height\n\n kwargs = args.__dict__.copy()\n del kwargs[\"input_file\"]\n del kwargs[\"use\"]\n del kwargs[\"img\"]\n\n get_native = GetNative(src, **kwargs)\n try:\n loop = asyncio.get_event_loop()\n best_value = loop.run_until_complete(get_native.run())\n except ValueError as err:\n return print(f\"Error in getnative: {err}\")\n\n content = ''.join([\n f\"\\nKernel: {args.kernel} \",\n f\"AR: {args.ar:.2f} \",\n f\"B: {args.b:.2f} C: {args.c:.2f} \" if args.kernel == \"bicubic\" else \"\",\n f\"Taps: {args.taps} \" if args.kernel == \"lanczos\" else \"\",\n f\"\\n{best_value}\",\n f\"\\n[approximation]\" if args.approx else \"\",\n ])\n print(content)\n print('done in {:.2f} s'.format(time.time() - starttime))\n\n\nif __name__ == '__main__':\n print(\"Start getnative.\")\n getnative()\n","sub_path":"getnative.py","file_name":"getnative.py","file_ext":"py","file_size_in_byte":16253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"240253858","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 13 10:34:29 2020\n样本数据扫描模块 sample\n@author: iFunk\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom strategies import boll\n# MONGODB CONNECT\nfrom pymongo import MongoClient\nclient = MongoClient('mongodb://127.0.0.1:27017')\n#client = MongoClient('mongodb://112.12.60.2:27017')\nmydb=client[\"ptest\"]\n\n#GET ALL DAILY\ndef get_daily_all():\n mycollection=mydb[\"stocks_daily_qfq\"]\n rs_stockcode = mycollection.find()\n list_stockcode = list(rs_stockcode)\n list_stockcode.reverse()\n #将查询结果转换为Df\n df_stockcode = pd.DataFrame(list_stockcode)\n return df_stockcode\n\ndef get_sample_daterange(data,startdate,enddate):\n data_stockcode = data.groupby('ts_code')\n for name,group in data_stockcode:\n #GET GROUP\n df_group = pd.DataFrame(group)\n #df_group['trade_date'] = pd.to_datetime(df_group['trade_date'])\n df_group = df_group.sort_values(by=\"trade_date\",ascending=False)\n df_group['trade_date'] = pd.DataFrame(df_group['trade_date'], dtype=np.datetime64)\n df_group = df_group.set_index(['trade_date'], drop=False, append=False, inplace=False, verify_integrity=False)\n if (df_group.empty or len(df_group)<0):\n #print (name,'','EMPTY OR <5')\n continue\n else:\n #if (df_group.loc[startdate] is not None or df_group.loc[enddate] is not None):\n df = df_group[enddate:startdate]\n print (name,df['trade_date'].tolist())\n '''\n keys是列标签或数组列表,\n drop:删除要用作新索引的列,布尔值默认为True,\n append:boolean是否将列附加到现有索引,默认为False,\n inplace修改DataFrame(���要创建新对象)默认为False,\n verify_integrity:检查新索引是否有重复项默认为False。\n '''\n return data\n\ndata = get_daily_all()\nstart_date = datetime.datetime.strptime('2020-08-01','%Y-%m-%d')\nend_date = datetime.datetime.strptime('2020-09-01','%Y-%m-%d')\ndf = get_sample_daterange(data,start_date,end_date).head(1000)\n","sub_path":"review/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"18214832","text":"import torch, torch.nn as nn\n\n\n\n\"\"\"=================================================================================================\"\"\"\nALLOWED_MINING_OPS = ['random','semihard', 'distance', 'parametric', 'anticollapse_distance', 'anticollapse_semihard', 'anticollapse_cdistance']\nREQUIRES_BATCHMINER = True\nREQUIRES_OPTIM = True\n\n### MarginLoss with trainable class separation margin beta. Runs on Mini-batches as well.\nclass Criterion(torch.nn.Module):\n def __init__(self, opt, batchminer):\n \"\"\"\n Args:\n margin: Triplet Margin.\n nu: Regularisation Parameter for beta values if they are learned.\n beta: Class-Margin values.\n n_classes: Number of different classes during training.\n \"\"\"\n super(Criterion, self).__init__()\n self.n_classes = opt.n_classes\n\n self.margin = opt.loss_margin_margin\n self.nu = opt.loss_margin_nu\n self.beta_constant = opt.loss_margin_beta_constant\n self.beta_val = opt.loss_margin_beta\n\n if opt.loss_margin_beta_constant:\n self.beta = opt.loss_margin_beta\n else:\n self.beta = torch.nn.Parameter(torch.ones(opt.n_classes)*opt.loss_margin_beta)\n\n self.batchminer = batchminer\n\n self.name = 'margin'\n\n self.lr = opt.loss_margin_beta_lr\n\n def forward(self, batch, labels):\n \"\"\"\n Args:\n batch: torch.Tensor: Input of embeddings with size (BS x DIM)\n labels: nparray/list: For each element of the batch assigns a class [0,...,C-1], shape: (BS x 1)\n \"\"\"\n sampled_triplets = self.batchminer(batch, labels)\n\n if len(sampled_triplets):\n d_ap, d_an = [],[]\n for triplet in sampled_triplets:\n train_triplet = {'Anchor': batch[triplet[0],:], 'Positive':batch[triplet[1],:], 'Negative':batch[triplet[2]]}\n\n pos_dist = ((train_triplet['Anchor']-train_triplet['Positive']).pow(2).sum()+1e-8).pow(1/2)\n neg_dist = ((train_triplet['Anchor']-train_triplet['Negative']).pow(2).sum()+1e-8).pow(1/2)\n\n d_ap.append(pos_dist)\n d_an.append(neg_dist)\n d_ap, d_an = torch.stack(d_ap), torch.stack(d_an)\n\n if self.beta_constant:\n beta = self.beta\n else:\n beta = torch.stack([self.beta[labels[triplet[0]]] for triplet in sampled_triplets]).to(torch.float).to(d_ap.device)\n\n pos_loss = torch.nn.functional.relu(d_ap-beta+self.margin)\n neg_loss = torch.nn.functional.relu(beta-d_an+self.margin)\n\n pair_count = torch.sum((pos_loss>0.)+(neg_loss>0.)).to(torch.float).to(d_ap.device)\n\n if pair_count == 0.:\n loss = torch.sum(pos_loss+neg_loss)\n else:\n loss = torch.sum(pos_loss+neg_loss)/pair_count\n\n if self.nu: loss = loss + beta_regularisation_loss.to(torch.float).to(d_ap.device)\n else:\n loss = torch.tensor(0.).to(torch.float).to(batch.device)\n\n return loss\n","sub_path":"criteria/stacked_self_distill.py","file_name":"stacked_self_distill.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"149706249","text":"# coding=gbk\nfrom login.templates.platform.common.operate_mysql import select, select_platform, select_readbook\n\n\ndef bookName():\n '''生成付费收听版权方全称'''\n cp_record = select_platform(\"select * from platform.t_copyright order by id desc limit 1;\")\n cp_id = cp_record[0]['id']\n cp_id = str(cp_id)\n cp_id_lenth= len(cp_id)\n if cp_id_lenth<4:\n Name_Number=str(int(cp_id)+1).zfill(4)\n elif cp_id_lenth==4 and cp_id != '9999':\n Name_Number=str(int(cp_id)+1)\n elif cp_id_lenth>4 and cp_id[-4:] != '9999':\n Name_Number=str(int(cp_id[-4:])+1).zfill(4)\n else:\n Name_Number='0001'\n FullName='有声书籍版权'+Name_Number\n CopyName = '有声版权' + Name_Number\n print('全称为:'+FullName+'|'+'简称为:'+CopyName)\n return [FullName,CopyName]\n\ndef readBookName():\n '''生成电子阅读版权方全称'''\n cp_record = select_readbook(\"select * from readbook.rb_partner_ext order by partner_id desc limit 1;\")\n cp_id = cp_record[0]['partner_id']\n cp_id = str(cp_id)\n cp_id_lenth = len(cp_id)\n if cp_id_lenth < 4:\n Name_Number = str(int(cp_id) + 1).zfill(4)\n elif cp_id_lenth == 4 and cp_id != '9999':\n Name_Number = str(int(cp_id) + 1)\n elif cp_id_lenth > 4 and cp_id[-4:] != '9999':\n Name_Number = str(int(cp_id[-4:]) + 1).zfill(4)\n else:\n Name_Number = '0001'\n OrgName = '懒人听书' + Name_Number\n FullName = '电子阅读版权' + Name_Number\n CopyName = '阅读版权' + Name_Number\n print('机构名称为:'+OrgName+'|'+'全称为:'+FullName+'|'+'简称为:'+CopyName)\n return [OrgName,FullName,CopyName]\ndef comicName():\n '''生成漫画版权全称'''\n cp_record = select(\"select * from yyting_partdb.c_comic_copyright order by id desc limit 1;\",\"db_yyting_partdb\")\n cp_id = cp_record[0]['id']\n cp_id = str(cp_id)\n print(cp_id)\n cp_id_lenth = len(cp_id)\n if cp_id_lenth < 4:\n Name_Number = str(int(cp_id) + 1).zfill(4)\n elif cp_id_lenth == 4 and cp_id != '9999':\n Name_Number = str(int(cp_id) + 1)\n elif cp_id_lenth > 4 and cp_id[-4:] != '9999':\n Name_Number = str(int(cp_id[-4:]) + 1).zfill(4)\n else:\n Name_Number = '0001'\n FullName = '漫画版权方' + Name_Number\n ShortName = '漫画版权' + Name_Number\n Contacter = '漫画'+Name_Number\n print('全称为:'+FullName+'|'+'简称为:'+ShortName+'|'+'联系人为:'+Contacter)\n return [FullName,ShortName,Contacter]\n\nif __name__=='__main__':\n comicName()","sub_path":"login/templates/platform/common/Create_CopyrightName.py","file_name":"Create_CopyrightName.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"194793101","text":"import json\n\nfrom pylxd import container, exceptions\nfrom pylxd.tests import testing\n\n\nclass TestContainer(testing.PyLXDTestCase):\n \"\"\"Tests for pylxd.container.Container.\"\"\"\n\n def test_all(self):\n \"\"\"A list of all containers are returned.\"\"\"\n containers = container.Container.all(self.client)\n\n self.assertEqual(1, len(containers))\n\n def test_get(self):\n \"\"\"Return a container.\"\"\"\n name = 'an-container'\n\n an_container = container.Container.get(self.client, name)\n\n self.assertEqual(name, an_container.name)\n\n def test_get_not_found(self):\n \"\"\"NameError is raised when the container doesn't exist.\"\"\"\n def not_found(request, context):\n context.status_code = 404\n return json.dumps({\n 'type': 'error',\n 'error': 'Not found',\n 'error_code': 404})\n self.add_rule({\n 'text': not_found,\n 'method': 'GET',\n 'url': r'^http://pylxd.test/1.0/containers/(?P.*)$', # NOQA\n })\n\n name = 'an-missing-container'\n\n self.assertRaises(\n exceptions.NotFound,\n container.Container.get, self.client, name)\n\n def test_create(self):\n \"\"\"A new container is created.\"\"\"\n config = {'name': 'an-new-container'}\n\n an_new_container = container.Container.create(\n self.client, config, wait=True)\n\n self.assertEqual(config['name'], an_new_container.name)\n\n def test_create_failed(self):\n \"\"\"If the container creation fails, CreateFailed is raised.\"\"\"\n def create_fail(request, context):\n context.status_code = 500\n return json.dumps({\n 'type': 'error',\n 'error': 'An unknown error',\n 'error_code': 500})\n self.add_rule({\n 'text': create_fail,\n 'method': 'POST',\n 'url': r'^http://pylxd.test/1.0/containers$',\n })\n config = {'name': 'an-new-container'}\n\n self.assertRaises(\n exceptions.CreateFailed,\n container.Container.create, self.client, config)\n\n def test_reload(self):\n \"\"\"A reload updates the properties of a container.\"\"\"\n an_container = container.Container(\n name='an-container', _client=self.client)\n\n an_container.reload()\n\n self.assertTrue(an_container.ephemeral)\n\n def test_reload_not_found(self):\n \"\"\"NameError is raised on a 404 for updating container.\"\"\"\n def not_found(request, context):\n context.status_code = 404\n return json.dumps({\n 'type': 'error',\n 'error': 'Not found',\n 'error_code': 404})\n self.add_rule({\n 'text': not_found,\n 'method': 'GET',\n 'url': r'^http://pylxd.test/1.0/containers/(?P.*)$', # NOQA\n })\n\n an_container = container.Container(\n name='an-missing-container', _client=self.client)\n\n self.assertRaises(NameError, an_container.reload)\n\n def test_update(self):\n \"\"\"A container is updated.\"\"\"\n an_container = container.Container(\n name='an-container', _client=self.client)\n an_container.architecture = 1\n an_container.config = {}\n an_container.created_at = 1\n an_container.devices = {}\n an_container.ephemeral = 1\n an_container.expanded_config = {}\n an_container.expanded_devices = {}\n an_container.profiles = 1\n an_container.status = 1\n\n an_container.update(wait=True)\n\n self.assertTrue(an_container.ephemeral)\n\n def test_update_partial_objects(self):\n \"\"\"A partially fetched profile can't be pushed.\"\"\"\n an_container = self.client.containers.all()[0]\n\n self.assertRaises(\n exceptions.ObjectIncomplete,\n an_container.update)\n\n def test_rename(self):\n an_container = container.Container(\n name='an-container', _client=self.client)\n\n an_container.rename('an-renamed-container', wait=True)\n\n self.assertEqual('an-renamed-container', an_container.name)\n\n def test_delete(self):\n \"\"\"A container is deleted.\"\"\"\n # XXX: rockstar (21 May 2016) - This just executes\n # a code path. There should be an assertion here, but\n # it's not clear how to assert that, just yet.\n an_container = container.Container(\n name='an-container', _client=self.client)\n\n an_container.delete(wait=True)\n\n\nclass TestContainerState(testing.PyLXDTestCase):\n \"\"\"Tests for pylxd.container.ContainerState.\"\"\"\n\n def test_get(self):\n \"\"\"Return a container.\"\"\"\n name = 'an-container'\n\n an_container = container.Container.get(self.client, name)\n state = an_container.state()\n\n self.assertEqual('Running', state.status)\n self.assertEqual(103, state.status_code)\n","sub_path":"pylxd/tests/test_container.py","file_name":"test_container.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"524786169","text":"from app import app\nfrom flask import Flask,render_template\n@app.route('/')\n@app.route('/index')\ndef index():\n user = {'username': 'Neel'} \n return render_template('test.html', user=user) \n@app.route('/test') \ndef test(): \n user = {'username': 'Neel'} \n return render_template('test.html', user=user) \n@app.route('/test2') \ndef test2(): \n user = {'username': 'Neel'} \n sample_data = [ \n { \n 'author': {'username': 'Neel'}, \n 'body': 'Hello!' \n }, { \n 'author': {'username': 'Neel'}, \n 'body': 'Welcome to Flask!' \n } \n ] \n return render_template('test2.html', user=user, sample_data=sample_data) \n\n\n\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"100182144","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n'''\n作为 Apple Store App 独立开发者,\n你要搞限时促销,为你的应用生成激活码(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)\n'''\n\nimport random, string\n\npoolOfChars = string.ascii_letters + string.digits\nrandom_codes = lambda x, y: ''.join([random.choice(x) for i in range(y)])\n\ni = 0\n\nwhile i < 200:\n print(random_codes(poolOfChars, 15))\n i+=1","sub_path":"show-me-the-code/problem_001/problem_001.py","file_name":"problem_001.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"224317643","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom user_manager.getuser import get_user\n\nfrom finance_manager.forms import CalcProfitForm\n\nfrom inventory_manager.models import Sell, OtherSell, OtherIncome, Expense, MaterialIssue, Salery\nfrom finance_manager.models import Profit, PettyCashExpense\n\n\n@login_required(login_url='/login')\ndef profit(request):\n\tuser = get_user(request)\n\tif not user.is_superuser:\n\t\treturn redirect('/fobiddn/')\n\n\tif request.method == 'POST':\n\t\tstart_date = request.POST['start_date']\n\t\tend_date = request.POST['end_date']\n\t\trecodes = None\n\t\tif start_date == '' or end_date == '':\n\t\t\trecodes = Profit.objects.all().order_by('-id')\n\t\telse:\n\t\t\trecodes = Profit.objects.filter(date__range=[start_date,end_date]).order_by('-id')\n\t\t\t\n\t\treturn render(request,'finance_manager/profit/profit.html', {'recodes':recodes})\n\telse:\n\t\trecodes = Profit.objects.all().order_by('-id')\n\t\treturn render(request,'finance_manager/profit/profit.html', {'recodes':recodes})\n\ndef calculate_profit(start_date,end_date,startup_unfinished,last_unfinished,startup_finished,last_finished,desc,remarks):\n#calculate cost of sales\n\t#issued material cost\n\tissued_materials = MaterialIssue.objects.filter(date__range=[start_date,end_date])\n\tissued_material_cost = 0\n\tfor issue in issued_materials:\n\t\tissued_material_cost += issue.amount\n\tprint('Issued material cost:\\t',issued_material_cost)\n\n\t#startup_unfinished - (param)\n\t#last_unfinished - (param)\n\t#startup_finished - (param)\n\t#last_finished - (param)\n\t\n\tcost_of_sales = issued_material_cost + startup_unfinished + startup_finished - (last_unfinished + last_finished)\n\tprint('Cost of sales:\\t',cost_of_sales)\n#calculate gross profit\n\t#cost of sales - (calculated)\n\t#sales amount\n\tsales = Sell.objects.filter(date__range=[start_date,end_date])\n\tsales_amount = 0\n\tfor sale in sales:\n\t\tsales_amount += sale.amount\n\tprint('Sales:\"\\t',sales_amount)\n\n\t#other sales amount\n\tother_sales = OtherSell.objects.filter(date__range=[start_date,end_date])\n\tother_sales_amount = 0\n\tfor other_sale in other_sales:\n\t\tother_sales_amount += other_sale.amount\n\tprint('Other Sales:\\t',other_sales_amount)\n\n#calculate profit\n\t#gross profit\n\tgross_profit = (sales_amount + other_sales_amount) - cost_of_sales\n\tprint('Gross Profit:\\t',gross_profit)\n\n\t#other income\n\tother_incomes = OtherIncome.objects.filter(date__range=[start_date,end_date])\n\tother_incomes_amount = 0\n\tfor other_income in other_incomes:\n\t\tother_incomes_amount += other_income.amount\n\tprint('Other Income:\\t',other_incomes_amount)\n\n\t#expenses\n\texpenses = Expense.objects.filter(date__range=[start_date,end_date])\n\texpenses_amount = 0\n\tfor expense in expenses:\n\t\texpenses_amount += expense.amount\n\tprint('Expenses:\\t',expenses_amount)\n\n\tsalery_expenses = Salery.objects.filter(date__range=[start_date,end_date])\n\tsalery_expenses_amount = 0\n\tfor salery in salery_expenses:\n\t\tsalery_expenses_amount += salery.amount\n\tprint('Salery Expenses:\\t',salery_expenses_amount)\n\n\t#pettycash expenses\n\tpettycash_expenses = PettyCashExpense.objects.filter(date__range=[start_date,end_date])\n\tpettycash_expenses_amount = 0\n\tfor pettycash_expense in pettycash_expenses:\n\t\tpettycash_expenses_amount += pettycash_expense.amount\n\tprint('PettyCash Expenses:\\t',pettycash_expenses_amount)\n\n\t#calculating profit\n\tprofit = gross_profit + other_incomes_amount - (expenses_amount + pettycash_expenses_amount + salery_expenses_amount)\n\tprint('Profit:\\t',profit)\n\n\tprofit_object = Profit(desc=desc, amount=profit,start_date=start_date,end_date=end_date,remarks=remarks)\n\tprofit_object.save()\n\t\n\n@login_required(login_url='/login')\ndef calc_profit(request):\n\tuser = get_user(request)\n\tif not user.is_superuser:\n\t\treturn redirect('/fobiddn/')\n\t\t\n\tif request.method == 'POST':\n\t\tform = CalcProfitForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tstart_date = form.cleaned_data.get('start_date')\n\t\t\tend_date = form.cleaned_data.get('end_date')\n\t\t\tstartup_unfinished = form.cleaned_data.get('startup_unfinished')\n\t\t\tlast_unfinished = form.cleaned_data.get('last_unfinished')\n\t\t\tstartup_finished = form.cleaned_data.get('startup_finished')\n\t\t\tlast_finished = form.cleaned_data.get('last_finished')\n\t\t\tdesc = form.cleaned_data.get('desc')\n\t\t\tremarks = form.cleaned_data.get('remarks')\n\n\t\t\tcalculate_profit(start_date,end_date,startup_unfinished,last_unfinished,startup_finished,last_finished,desc,remarks)\n\t\treturn redirect('/finance-manager/profit/')\n\telse:\n\t\tform = CalcProfitForm()\n\t\tcontext = {'formname':'Calculate Profit','form':form}\n\t\treturn render(request,'finance_manager/profit/form_template.html', context)\n","sub_path":"FactoryManagementSystem/finance_manager/views/profit.py","file_name":"profit.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52093657","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 7 09:14:25 2019\n\n@author: user\n\"\"\"\n\n# --------------------------- Load libraries ---------------------------------\nfrom casadi import *\nimport rospy\nimport mpc_fatigue.pynocchio_casadi as pin\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport talker_inv_dyn_pilz_3DOF as ta\nimport talker_inv_kin_pilz_3DOF as ti\n\n#---------------------------- Load the urdf ---------------------------------\nurdf = rospy.get_param('/robot_description')\n\n# ---------------------- Solve forward kinematics ----------------------------\n#Postion of link 5 in cartesian space (Solution to direct kinematics)\nfk_string = pin.generate_forward_kin(urdf, 'prbt_link_5')\n#fk_string = pin.generate_forward_kin(urdf, 'prbt_flange')\n\n#Create casadi function\nfk = casadi.Function.deserialize(fk_string)\n\n# ---------------------- Solve Jacobian of link_5 ----------------------------\n#Jacobian for link_5 (Solution to direct kinematics)\njac_string = pin.generate_jacobian(urdf, 'prbt_link_5')\n#jac_string = pin.generate_jacobian(urdf, 'prbt_flange')\n#Create casadi function\njac_dict = casadi.Function.deserialize(jac_string)\n\n# ---------------------- Solve Inverse dynamics ----------------------------\nIdyn_string = pin.generate_inv_dyn(urdf)\n#Create casadi function\nIdyn = casadi.Function.deserialize(Idyn_string)\n\n# --------------------------- NLP formulation -----------------------------\n\n#Variables\nnq = 3\nqc = SX.sym('qc', nq) #joint angles\nqcdot = SX.sym('qcdot', nq) # joint velocities\nqcddot = np.zeros(nq) # Joint acceleration\nx_ini = 0.4\ny_ini = 0.0\nz_ini = 0.3\n\n# ---------------------- Solve forward kinematics ----------------------------\n#From the desider initial condition, compute the inverse kinematic to determine\n#the initial condition\npos_link_5 = fk(q = qc)['ee_pos']\npos_des_link_5 = SX([x_ini,y_ini,z_ini])\n\ndes = dot(pos_link_5 - pos_des_link_5, pos_link_5 - pos_des_link_5)\n#Nlp problem\nprob = dict(x = qc, f = des)\n#Create solver interface\nsolver = nlpsol('solver','ipopt', prob)\nsol = solver(x0 = [0.0,0.0,0.0])\n\n# Jacobian value\n#jac_end_effector = jac_dict(q = qc)[\"J\"][0:4]\n# Position end effector\n# pos_end_effector = fk(q=qc)['ee_pos']\n# tau with end effector force\n#tau = Idyn(q=qc_k, qdot= qcd_k, qddot = qcddot)['tau'] - mtimes(jac_end_effector.T,Force)\n\n#Initial conditions\nqc_init = sol['x']# High torque on second joint\nqcdot_init = np.zeros(nq)\n\nti.talker(qc_init)\n# Define time \nT = 2.\nN = 60\nh = T/N\n# Define a certain time grid\ntgrid = [T/N*k for k in range(N)]\n\n# DEGREES OF FREEDOM\ntau0 = 40\nalpha = 2\nlbqdot = -.6\nubqdot = .6\nbound_torque = 15.0\nz_min = 0.2\nz_max = 0.75\n\n# Empty NLP\nw = []\nlbw = []\nubw = []\ng = []\nlbg = []\nubg = []\nJ = 0\nlbt_plot = []\nubt_plot = []\n\n#Initial condition\nqc_k = SX.sym('qc0', nq)\nw.append(qc_k)\nlbw += [qc_init] # .tolist()\nubw += [qc_init] # .tolist() \n\nfor k in range(N):\n \n #Control at each interval\n qcdname = 'qcd'+ str(k)\n qcd_k = SX.sym(qcdname, nq)\n w.append(qcd_k)\n if k == 0:\n lbw += qcdot_init.tolist() \n ubw += qcdot_init.tolist() \n else:\n lbw += np.full((1,nq),lbqdot)[0].tolist() \n ubw += np.full((1,nq),ubqdot)[0].tolist() \n \n #Constraint on end effector position\n# z_name = 'z'+ str(k) + 'end_eff'\n# z_rect = SX.sym(z_name)\n# w.append(z_rect)\n# lbw += [z_min] \n# ubw += [z_max] \n# lbw += [z_ini] \n# ubw += [z_ini] \n \n #Define reference needed to the constraint\n ref = SX(2,1)\n ref[0] = x_ini\n ref[1] = y_ini\n# ref[2] = z_rect\n \n pos_end_effector = fk(q=qc_k)['ee_pos'][0:2]\n g.append(pos_end_effector - ref)\n lbg += np.zeros(nq-1).tolist() \n ubg += np.zeros(nq-1).tolist() \n \n #Force at each interval\n F_name = 'Fx'+ str(k)\n Force = SX.sym(F_name,1) # Force at the end effector\n w.append(Force)\n lbw += [-np.inf] \n ubw += [ np.inf] \n# g.append(Force)\n# lbg += [0] \n# ubg += [ np.inf] \n \n #Constraint over tau\n jac_end_effector = jac_dict(q = qc_k)[\"J\"][0:3,0:3]\n F = SX(3,1)\n F[0] = Force\n F[1] = 0.0\n F[2] = 0.0\n tau = Idyn(q=qc_k, qdot= qcd_k, qddot = qcddot)['tau'] - mtimes(jac_end_effector.T,F)\n g.append(tau)\n esp = alpha * k * h\n torque = tau0 * np.exp(-esp)\n \n if torque > bound_torque:\n ubg += np.full((1,nq), torque)[0].tolist() \n ubt_plot.append(torque)\n lbg += np.full((1,nq), - torque)[0].tolist() \n lbt_plot.append(-torque)\n else:\n ubg += np.full((1,3), bound_torque)[0].tolist() \n ubt_plot+= [ bound_torque]\n lbg += np.full((1,3),- bound_torque)[0].tolist() \n lbt_plot+= [-bound_torque]\n \n \n #Update J\n #J += - mtimes(tau.T,tau) #staturates torque\n #J += mtimes(tau.T,tau) \n J += - mtimes(F.T,F)\n #J += -F[0]\n \n #Now integrate\n q_next = qc_k + qcd_k * h\n \n #New local state\n qname = 'qc' + str(k+1)\n qc_k= SX.sym(qname,nq)\n w.append(qc_k)\n# lbw += np.full((1,nq), - np.inf )[0].tolist() #Cambiare ?\n# ubw += np.full((1,nq), np.inf)[0].tolist() \n lbw += np.array([-2.96,-2.53,-2.35]).tolist() #Cambiare ?\n ubw += np.array([2.96,2.53,2.35]).tolist()\n #Continuity constraint\n g.append(q_next - qc_k)\n lbg += np.zeros(nq).tolist() \n ubg += np.zeros(nq).tolist() \n \nubg = vertcat(*ubg)\nlbg = vertcat(*lbg)\nubw = vertcat(*ubw)\nlbw = vertcat(*lbw)\ng = vertcat(*g)\nw = vertcat(*w)\n\n\nprint(\"g.shape:\",g.shape)\nprint(\"lbg.shape:\",lbg.shape)\nprint(\"ubg.shape:\",ubg.shape)\nprint(\"x.shape:\",w.shape)\nprint(\"lbw.shape:\",lbw.shape)\nprint(\"ubw.shape:\",ubw.shape)\n\n# Create the nlp solver\nnlp = dict(f = J, g = g, x = w)\nSolver = nlpsol('Solver','ipopt',nlp)\nr = Solver(lbx = lbw, ubx = ubw, lbg = lbg, ubg = ubg) \n \nsol = r['x'].full().flatten()\n\nqc_opt= []\nq1_opt = []\nq2_opt = []\nq3_opt = []\nqcdot_opt = []\nq1dot_opt = []\nq2dot_opt = []\nq3dot_opt = []\nFx_opt = []\nz_rect_opt = []\n\nranghe = np.size(sol)/(2*nq+1)\n \nfor k in range(2*ranghe):\n if (k % 2 == 0):\n qc_opt.append(sol[3*k+(k-k/2):3*k+(k-k/2)+nq])\n q1_opt.append(sol[3*k+(k-k/2):3*k+(k-k/2)+nq][0])\n q2_opt.append(sol[3*k+(k-k/2):3*k+(k-k/2)+nq][1])\n q3_opt.append(sol[3*k+(k-k/2):3*k+(k-k/2)+nq][2])\n qcdot_opt.append(sol[3*k+(k-k/2)+nq:3*k+(k-k/2)+2*nq])\n q1dot_opt.append(sol[3*k+(k-k/2)+nq:3*k+(k-k/2)+2*nq][0])\n q2dot_opt.append(sol[3*k+(k-k/2)+nq:3*k+(k-k/2)+2*nq][1])\n q3dot_opt.append(sol[3*k+(k-k/2)+nq:3*k+(k-k/2)+2*nq][2])\n Fx_opt.append(sol[3*k+(k-k/2)+2*nq])\n if (k == 2*N-1):\n k+=1\n qc_opt.append(sol[3*k+(k-k/2):3*k+(k-k/2)+nq])\n q1_opt.append(sol[3*k+(k-k/2):3*k+(k-k/2)+nq][0])\n q2_opt.append(sol[3*k+(k-k/2):3*k+(k-k/2)+nq][1])\n q3_opt.append(sol[3*k+(k-k/2):3*k+(k-k/2)+nq][2])\n \n \n#Select taus:\ntau1_opt = []\ntau2_opt = []\ntau3_opt = []\n\n#Idyn = casadi.Function.deserialize(Idyn_string)\n\nfor k in range(N):\n \n qc = [q1_opt[k],q2_opt[k],q3_opt[k]]\n qcdot = [q1dot_opt[k],q2dot_opt[k],q3dot_opt[k]]\n jac_end_effector = jac_dict(q = qc)[\"J\"][0:3,0:3] \n \n Fend = np.full((nq,1), 0.0)\n Fend[0] = Fx_opt[k]\n\n tau = Idyn(q=qc,qdot= qcdot, qddot = qcddot)['tau'] - mtimes(jac_end_effector.T,Fend)\n tau1_opt.append(tau[0])\n tau2_opt.append(tau[1])\n tau3_opt.append(tau[2])\n\nta.talker(q1_opt,q2_opt,q3_opt)\n\n# ------------------------------ PLOTS --------------------------------------\ntgrid.append(T)\n\n# Plot q1, q2, q3\nplt.figure(1)\nplt.clf()\nplt.plot(tgrid,q1_opt,'-')\nplt.plot(tgrid,q2_opt,'-')\nplt.plot(tgrid,q3_opt,'-')\nplt.legend(['q1_opt','q2_opt','q3_opt'])\nplt.title('q variable in time plot')\nplt.grid()\n\n\n\nplt.figure(2)\nplt.clf()\nplt.step(tgrid,vertcat(DM.nan(1), q1dot_opt),'-')\nplt.step(tgrid,vertcat(DM.nan(1), q2dot_opt),'-')\nplt.step(tgrid,vertcat(DM.nan(1), q3dot_opt),'-')\nplt.legend(['q1dot_opt','q2dot_opt','q3dot_opt'])\nplt.title('qdot variable in time plot')\nplt.grid()\n\n\n# Plot torque\nplt.figure(3)\nplt.clf()\nplt.step(tgrid,vertcat(DM.nan(1), ubt_plot),'--')\nplt.step(tgrid,vertcat(DM.nan(1), lbt_plot),'--')\nplt.step(tgrid,[DM.nan(1)] + tau1_opt,'--')\nplt.step(tgrid,[DM.nan(1)] + tau2_opt,'--')\nplt.step(tgrid,[DM.nan(1)] + tau3_opt,'--')\nplt.legend(['ubg_plot','lgb_plot','tau1_opt','tau2_opt','tau3_opt'])\nplt.grid()\nplt.title('torque variable in time plot')\n\n#Plot Fx force\nplt.figure(4)\nplt.clf()\nplt.step(tgrid,vertcat(DM.nan(1), Fx_opt),'-')\nplt.legend('Fx_opt')\nplt.title('Fx in time plot')\nplt.grid()\n\nplt.show()\n\nprint('end')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python/Pilz_3_DOF/force_optimization_pilz_3DOF.py","file_name":"force_optimization_pilz_3DOF.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"421071987","text":"from django.conf.urls import url\n\nfrom . import views\nfrom participants.models import Adjudicator\n\nurlpatterns = [\n url(r'^create/$',\n views.create_adj_allocation,\n name='create_adj_allocation'),\n url(r'^edit/$',\n views.draw_adjudicators_edit,\n name='draw_adjudicators_edit'),\n url(r'^_get/$',\n views.draw_adjudicators_get,\n name='draw_adjudicators_get'),\n url(r'^save/$',\n views.SaveAdjudicatorsView.as_view(),\n name='save_adjudicators'),\n url(r'^_update_importance/$',\n views.update_debate_importance,\n name='update_debate_importance'),\n url(r'^conflicts/$',\n views.adj_conflicts,\n name='adj_conflicts'),\n]\n","sub_path":"adjallocation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"86025585","text":"import os\nimport io\nimport pandas as pd\nimport numpy as np\nimport farmhash\nimport time\nimport sqlite3\nimport json\nimport glob\nimport re\nfrom datetime import datetime\n\n# we only support the ros-e_m_a_c-spice-6-v1.0 directory for rosetta, there is another that exists in the naif db, \n# but it isnt used to populate our spicedb as we dont expect users to use that directory\nmissions_readable = { \"clem1-l-spice-6-v1.0\" : \"clementine\",\n \"co-s_j_e_v-spice-6-v1.0\" : \"cassini_orbiter\",\n \"dawn-m_a-spice-6-v1.0\" : \"dawn\",\n \"di-c-spice-6-v1.0\" : \"deep_impact\",\n \"dif-c_e_x-spice-6-v1.0\" : \"epoxi\",\n \"ds1-a_c-spice-6-v1.0\" : \"deep_space_1\",\n \"grail-l-spice-6-v1.0\" : \"grail\",\n \"hay-a-spice-6-v1.0\" : \"hayabusa\",\n \"jno-j_e_ss-spice-6-v1.0\" : \"juno\",\n \"lro-l-spice-6-v1.0\" : \"lunar_reconnaissance_orbiter\",\n \"mer1-m-spice-6-v1.0\" : \"mer_1\",\n \"mer2-m-spice-6-v1.0\" : \"mer_2\",\n \"mess-e_v_h-spice-6-v1.0\" : \"messenger\",\n \"mex-e_m-spice-6-v1.0\" : \"mars_express\",\n \"mgs-m-spice-6-v1.0\" : \"mars_global_surveyor\",\n \"mro-m-spice-6-v1.0\" : \"mars_reconnaissance_orbiter\",\n \"msl-m-spice-6-v1.0\" : \"mars_science_laboratory\",\n \"near-a-spice-6-v1.0\" : \"near\",\n \"nh-j_p_ss-spice-6-v1.0\" : \"new_horizons\",\n \"ody-m-spice-6-v1.0\" : \"mars_odyssey\",\n \"ros-e_m_a_c-spice-6-v1.0\" : \"rosetta\", \n \"sdu-c-spice-6-v1.0\" : \"stardust\",\n \"vco-v-spice-6-v1.0\" : \"venus_climate_orbiter\",\n \"vex-e_v-spice-6-v1.0\" : \"venus_express\",\n \"vo1_vo2-m-spice-6-v1.0\" : \"viking_orbiter\" }\n\n# Reverse mission translations (readable->true)\nmissions_true = {value: key for key, value in missions_readable.items()}\n\n\n# Parses config file for user settings\ndef configure():\n with open('/swaggerapp/config.txt', 'r') as f:\n ip = []\n user = []\n filepath = []\n for line in f:\n research = line.split(': ')\n ip.append(research[0].strip())\n research = line.split(' ')\n user.append(research[1].strip())\n filepath.append(research[2].strip())\n return user, ip, filepath\n\n\n# returns filename of newest kernel version\ndef newest_kernel(path, name):\n split = name.split('.')\n ext = split[-1]\n if re.search('_v[0-9]+', name):\n split = name.split('_v')\n elif re.search('v[0-9]+', name):\n split = name.split('v')\n \n # path/to/file/fname*.ext\n regex = path + '/' + split[0] + '*.' + ext \n # glob returns filenames in alphabetical order, we can assume the last will be the highest version\n newest = glob.glob(regex)[-1]\n return newest.rsplit('/', 1)[1] # we just want the filename, as newest versions always exist in same dir as eachother\n\n\n# indexes all kernel files from the /spicedata directory\n# fileinfo stored in /spicedata/.spicedb.sqlite\n# this method is called at the end of this init file\ndef populate_spicedb():\n\n # trash the old db bc its faster than comparing values and updating\n if os.path.exists('/spicedata/.spicedb.sqlite'):\n os.remove('/spicedata/.spicedb.sqlite')\n \n # database format will be: | Mission | Kernel | File | Path | Hash | Newest |\n conn = sqlite3.connect('/spicedata/.spicedb.sqlite')\n c = conn.cursor()\n c.execute(\"CREATE TABLE SPICE (Mission TEXT, Kernel TEXT, File TEXT, Path TEXT, Hash TEXT, Newest TEXT )\")\n \n # we expect a specific directory structure: /spicedata/{mission}/{weirddir}/data/{kernel}/{file}\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Begin Indexing of SPICE data from /spicedata directory')\n for root, subdir, files in os.walk('/spicedata'):\n for name in files:\n if name[0] == '.': # skip hidden files\n continue\n\n # full split format will be: ['', 'spicedata', 'clem1-l-spice-6-v1.0', 'clsp_1000', 'data', 'ck']\n split = root.split('/') \n if len(split) >= 3:\n if len(split) == 3 and name == 'dsindex.lbl': # we can expect this file at the top level of every mission directory\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Indexing Mission [' + missions_readable[split[2]] + ']')\n\n fhash = farmhash.hash64(str(io.open(root+'/'+name,'rb').read())) # spice data encoding is mixed, so read as binary\n newest = newest_kernel(root, name)\n mis_name = missions_readable[split[2]]\n\n if len(split)>=6 and (split[5] in ['ck', 'ek', 'fk', 'spk', 'sclk', 'lsk', 'ik', 'pck', 'mk'] and not name.endswith('.txt')): # kernel and mk files are always 4 dirs down \n ker_name = split[5]\n else:\n ker_name = 'misc'\n\n c.execute(\"INSERT OR IGNORE INTO SPICE (Mission, Kernel, File, Path, Hash, Newest) VALUES ('{mn}', '{kn}', '{fn}', '{fp}', '{fh}', '{new}')\"\n .format(mn=mis_name, kn=ker_name, fn=name, fp=root, fh=fhash, new=newest))\n \n conn.commit()\n conn.close()\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Finished Indexing of SPICE data, fileinfo stored in /spicedata/.spicedb.sqlite')\n\n\ndef sqlselect_dictarray(sql_rows):\n dicts = []\n for row in sql_rows:\n dicts.append(sqlselect_dict(row))\n return dicts\n\n# converts a SQL SELECT return format into an array of dicts\ndef sqlselect_dict(row):\n return {'mission': row[0],\n 'kernel' : row[1],\n 'file' : row[2],\n 'path' : row[3],\n 'hash' : row[4],\n 'newest' : row[5]}\n\ndef sqlselect_command(command):\n conn = sqlite3.connect('/spicedata/.spicedb.sqlite')\n c = conn.cursor()\n c.execute(command)\n rows = c.fetchall()\n conn.close()\n return rows\n\n# takes a sql select output and creates a pandas dataframe\ndef sqlselect_dataframe(rows):\n df = pd.DataFrame(columns = [\"mission\", \"kernel\", \"file\", \"path\", \"hash\", \"newest\"])\n for i in range(len(rows)):\n df.loc[i] = rows[i]\n return df\n\n\ndef make_user_ip_filepath_dict(user, ip, filepath):\n zipped = list(zip(ip, filepath))\n return dict(zip(user, zipped))\n\npopulate_spicedb()\n","sub_path":"python-flask-server/swagger_server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"641987317","text":"class Garden(object):\n\n flowers = []\n trees = []\n\n def add_flower(self, color):\n self.flowers.append(Flower(color))\n\n def add_tree(self, color):\n self.trees.append(Tree(color))\n \n def watering(self, amount):\n print(\"Watering width\", amount)\n self.amount = amount / (len(self.flowers) + len(self.trees))\n for flower in self.flowers:\n flower.water_amount += 0.75 * self.amount\n for tree in self.trees:\n tree.water_amount += 0.4 * self.amount\n\n def show_garden(self):\n for flower in self.flowers:\n if flower.water_amount < 5:\n print(\"The \" + flower.color + \" Flower needs water\")\n else:\n print(\"The \" + flower.color + \" Flower doesnt need water\")\n for tree in self.trees:\n if tree.water_amount < 10:\n print(\"The \" + tree.color + \" Tree needs water\")\n else:\n print(\"The \" + tree.color + \" Tree doesnt need water\")\n print(\"\")\n\nclass Flower(object):\n \n water_amount = 0\n\n def __init__(self, color):\n self.color = color\n\nclass Tree(object):\n\n water_amount = 0\n \n def __init__(self, color):\n self.color = color\n\ngarden = Garden()\ngarden.add_flower(\"yellow\")\ngarden.add_flower(\"blue\")\ngarden.add_tree(\"purple\")\ngarden.add_tree(\"orange\")\ngarden.show_garden()\ngarden.watering(40)\ngarden.show_garden()\ngarden.watering(70)\ngarden.show_garden()","sub_path":"week-04/day-02/garden.py","file_name":"garden.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"562432748","text":"def create_data_file(data_set,features_file):\n stats = get_labels(features_file)\n f = open(\"new_sentences_add_remove\",\"w\")\n with open(data_set) as data:\n for line in data:\n comb=line.split(\"!@@@!\")[0]\n query = comb.split(\"-\")[2]\n new_line = query+\"!@@@!\"+line.rstrip()+\"!@@@!\"+stats[comb]+\"\\n\"\n f.write(new_line)\n f.close()\n\ndef get_labels(features_file):\n stats={}\n with open(features_file) as features:\n for line in features:\n label = line.split()[0]\n comb = line.split()[-1].rstrip()\n stats[comb] = label\n return stats\n\n\n\ndata_set = \"/home/greg/auto_seo/scripts/senetces_add_remove\"\nfeatures_file = \"/home/greg/auto_seo/SentenceRanking/new_sentence_features\"\n\ncreate_data_file(data_set,features_file)","sub_path":"norm_net/prereq.py","file_name":"prereq.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"512213329","text":"#-------------------------------------------------------------------------------\r\n# Name: scalc\r\n# Purpose: to demonstrate a scientific calculator in python\r\n#\r\n# Author: CA1K\r\n#\r\n# Created: 09/02/2019\r\n#-------------------------------------------------------------------------------\r\n\r\n#feel free to tinker with the variables bw, bp, l1, and l2\r\n\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nfrom math import *\r\nimport os\r\n\r\n#pre-defined variables\r\nl1 = \"Calculator by CA1K.XKOTTO.COM\" #top label in the window\r\nl2 = \"Scientific Calculator\" #title\r\nv = 0 #output value\r\nbw = 6 #button width\r\nbp = 3 #button padding\r\nque = [] #number que the calculator will reference\r\nfinished = 0 #signals that the operations are done\r\nspan = 7 #span of the non-buttons\r\n\r\n#pre-defined classes/functions\r\nclass window(Frame):\r\n def __init__(self, master=None):\r\n Frame.__init__(self, master)\r\n self.master = master\r\n self.que = que\r\n self.finished = finished\r\n self.v = v\r\n self.pi = pi\r\n self.e = e\r\n self.init_window()\r\n def init_window(self):\r\n self.master.title(l2)\r\n self.pack(fill=BOTH, expand=1)\r\n l = Label(self, text=l1, bg=\"black\", fg=\"white\")\r\n l.grid(row=0, column=0, columnspan=span, sticky=W+E)\r\n entry = Label(self, text=str(v), bg=\"gray\")\r\n entry.grid(row=1, columnspan=span, sticky=W+E)\r\n cls = Button(self, text=\"Clear\", width=bw, command=lambda:clr(entry,que))\r\n cls.grid(row=2, column=0, columnspan=span, sticky=W+E)\r\n sev = Button(self, text=\"7\", width=bw, command=lambda:chb(7,entry))\r\n sev.grid(row=3, column=0)\r\n xlx = Button(self, text=\"log x2, x\", width=bw, command=lambda:op(entry,7,que))\r\n xlx.grid(row=3, column=6)\r\n xl2 = Button(self, text=\"log 2, x\", width=bw, command=lambda:lb2(entry))\r\n xl2.grid(row=4, column=6)\r\n xl10 = Button(self, text=\"log 10, x\", width=bw, command=lambda:l10(entry))\r\n xl10.grid(row=5, column=6)\r\n mod = Button(self, text=\"%\", width=bw, command=lambda:op(entry,6,que))\r\n mod.grid(row=6, column=6)\r\n powr = Button(self, text=\"^\", width=bw, command=lambda:op(entry,4,que))\r\n powr.grid(row=3, column=4)\r\n sq = Button(self, text=\"x^2\", width=bw, command=lambda:sqr(entry))\r\n sq.grid(row=4, column=4)\r\n cu = Button(self, text=\"x^3\", width=bw, command=lambda:cub(entry))\r\n cu.grid(row=5, column=4)\r\n npow = Button(self, text=\"x^x\", width=bw, command=lambda:npown(entry))\r\n npow.grid(row=6, column=4)\r\n ee = Button(self, text=\"e\", width=bw, command=lambda:chb(e,entry))\r\n ee.grid(row=3, column=5)\r\n pie = Button(self, text=\"π\", width=bw, command=lambda:chb(pi,entry))\r\n pie.grid(row=4, column=5)\r\n ssq = Button(self, text=\"√\", width=bw, command=lambda:ssqf(entry))\r\n ssq.grid(row=5, column=5)\r\n s3q = Button(self, text=\"3√\", width=bw, command=lambda:s3qf(entry))\r\n s3q.grid(row=6, column=5)\r\n xrx = Button(self, text=\"x2√x\", width=bw, command=lambda:op(entry,5,que))\r\n xrx.grid(row=7, column=5)\r\n eig = Button(self, text=\"8\", width=bw, command=lambda:chb(8,entry))\r\n eig.grid(row=3, column=1)\r\n nin = Button(self, text=\"9\", width=bw, command=lambda:chb(9,entry))\r\n nin.grid(row=3, column=2)\r\n div = Button(self, text=\"/\", width=bw, command=lambda:op(entry,3,que))\r\n div.grid(row=3, column=3)\r\n fou = Button(self, text=\"4\", width=bw, command=lambda:chb(4,entry))\r\n fou.grid(row=4, column=0)\r\n fiv = Button(self, text=\"5\", width=bw, command=lambda:chb(5,entry))\r\n fiv.grid(row=4, column=1)\r\n six = Button(self, text=\"6\", width=bw, command=lambda:chb(6,entry))\r\n six.grid(row=4, column=2)\r\n mul = Button(self, text=\"*\", width=bw, command=lambda:op(entry,2,que))\r\n mul.grid(row=4, column=3)\r\n one = Button(self, text=\"1\", width=bw, command=lambda:chb(1,entry))\r\n one.grid(row=5, column=0)\r\n two = Button(self, text=\"2\", width=bw, command=lambda:chb(2,entry))\r\n two.grid(row=5, column=1)\r\n thr = Button(self, text=\"3\", width=bw, command=lambda:chb(3,entry))\r\n thr.grid(row=5, column=2)\r\n mns = Button(self, text=\"-\", width=bw, command=lambda:op(entry,1,que))\r\n mns.grid(row=5, column=3)\r\n zer = Button(self, text=\"0\", width=bw, command=lambda:chb(0,entry))\r\n zer.grid(row=6, column=0)\r\n equ = Button(self, text=\"=\", width=bw, command=lambda:fin(entry,que))\r\n equ.grid(row=6, column=1)\r\n pls = Button(self, text=\"+\", width=bw, command=lambda:op(entry,0,que))\r\n pls.grid(row=6, column=2)\r\n dot = Button(self, text=\".\", width=bw, command=lambda:dec(entry))\r\n dot.grid(row=6, column=3)\r\n si = Button(self, text=\"sin(x)\", width=bw, command=lambda:sct(0,entry))\r\n si.grid(row=7, column=0)\r\n co = Button(self, text=\"cos(x)\", width=bw, command=lambda:sct(1,entry))\r\n co.grid(row=7, column=1)\r\n tang = Button(self, text=\"tan(x)\", width=bw, command=lambda:sct(2,entry))\r\n tang.grid(row=7, column=2)\r\n dtr = Button(self, text=\"° -> π\", width=bw, command=lambda:conv(0,entry))\r\n dtr.grid(row=7, column=3)\r\n rtd = Button(self, text=\"π -> °\", width=bw, command=lambda:conv(1,entry))\r\n rtd.grid(row=7, column=4)\r\n rp = Button(self, text=\"1 / x\", width=bw, command=lambda:recp(entry))\r\n rp.grid(row=7, column=6)\r\n asi = Button(self, text=\"asin(x)\", width=bw, command=lambda:sct(3,entry))\r\n asi.grid(row=8, column=0)\r\n aco = Button(self, text=\"acos(x)\", width=bw, command=lambda:sct(4,entry))\r\n aco.grid(row=8, column=1)\r\n atang = Button(self, text=\"atan(x)\", width=bw, command=lambda:sct(5,entry))\r\n atang.grid(row=8, column=2)\r\n delt = Button(self, text=\"del\", width=bw, command=lambda:de(entry))\r\n delt.grid(row=8, column=3)\r\n ttg = Button(self, text=\"Graphing Calculator\", width=int(bw*3.5), command=lambda:tg(self))\r\n ttg.grid(row=8, column=4, columnspan=3, sticky=E)\r\n self.pack()\r\n def reg(a,b,c): #uses a to determine the type of calculation of b and c\r\n if(a==0):\r\n print(str(b) + \" + \" + str(c))\r\n return b + c\r\n if(a==1):\r\n print(str(b) + \" - \" + str(c))\r\n return b - c\r\n if(a==2):\r\n print(str(b) + \" x \" + str(c))\r\n return b * c\r\n if(a==3):\r\n print(str(b) + \" / \" + str(c))\r\n return b / c\r\n if(a==4):\r\n print(str(b) + \" to the power of \" + str(c))\r\n return b ** c\r\n if(a==5):\r\n print(str(c) + \" root \" + str(b))\r\n return b ** (1 / c)\r\n if(a==6):\r\n print(str(b) + \" % \" + str(c))\r\n return b % c\r\n if(a==7):\r\n print(str(b) + \" log \" + str(c))\r\n return log(b,c)\r\n def chb(n,z): #change the output box number\r\n r = z.cget(\"text\")\r\n if(self.finished == 0): #if the number in the box isn't the answer...\r\n if(r == \"0\" or n == pi or n == e):\r\n z.config(text=str(n))\r\n else:\r\n z.config(text=r+str(n))\r\n else:\r\n z.config(text=str(n))\r\n self.finished = 0\r\n def clr(z,z2): #erase the value in the output and array values\r\n z.config(text=\"0\")\r\n z2.clear() #supposed to be an array\r\n v = 0 #resetting the answer variable\r\n def op(z,n,q): #puts the operation and first number in que\r\n if(len(q)==0):\r\n try:\r\n q.append(int(z.cget(\"text\")))\r\n except ValueError: #if the number is a float...\r\n q.append(float(z.cget(\"text\")))\r\n q.append(n)\r\n z.config(text=\"0\")\r\n else:\r\n q.clear()\r\n try:\r\n q.append(int(z.cget(\"text\")))\r\n except ValueError:\r\n q.append(float(z.cget(\"text\")))\r\n q.append(n)\r\n z.config(text=\"0\")\r\n def ret(zz): #convenient function to retrieve the number\r\n try:\r\n return int(zz.cget(\"text\"))\r\n except ValueError: #if the number in the output is a float...\r\n return float(zz.cget(\"text\"))\r\n def fin(z,q):\r\n z2 = ret(z)\r\n q.append(z2)\r\n v = reg(q[1],q[0],q[2]) #send the array to the reg method\r\n z.config(text=str(v))\r\n q.clear()\r\n finished = 1\r\n v = 0\r\n def dec(z): #adds a decimal to the number in the output window\r\n z2 = z.cget(\"text\")\r\n if \".\" in z2:\r\n print(\"error: 2 decimals\")\r\n else:\r\n z.config(text=z2+\".\") #add a decimal to the end\r\n def npown(z): #bring the number to the power of itself\r\n z2 = ret(z)\r\n z.config(text=str(z2 ** z2))\r\n print(str(z2) + \" to the power of \" + str(z2))\r\n v = 0\r\n finished = 1\r\n def sqr(z): #square the number\r\n z2 = ret(z)\r\n z.config(text=str(z2 ** 2))\r\n print(str(z2) + \" squared\")\r\n v = 0\r\n finished = 1\r\n def cub(z): #cube the number\r\n z2 = ret(z)\r\n z.config(text=str(z2 ** 3))\r\n print(str(z2) + \" cubed\")\r\n v = 0\r\n finished = 1\r\n def ssqf(z): #square root the number in the box\r\n z2 = ret(z)\r\n z.config(text=str(sqrt(z2)))\r\n print(\"square root of \" + str(z2))\r\n v = 0\r\n finished = 1\r\n def s3qf(z):\r\n z2 = ret(z)\r\n z3 = z2 ** (1 / 3) #z2 to the power of 0.3, which is a cube root\r\n z.config(text=str(z3))\r\n print(\"cube root of \" + str(z2))\r\n v = 0\r\n finished = 1\r\n def sct(m,z): #sine, cosine, tangent...\r\n z2 = ret(z)\r\n if(z2 % pi == 0 or z2 / pi == 0.5 or z2 / pi == 1.5):\r\n print(\"[deg -> rad] conversion not needed\")\r\n else:\r\n print(\"converting \" + str(z2) + \" degrees to \" + str(radians(z2)) + \" radians\")\r\n z2 = radians(z2)\r\n if(m == 0):\r\n z.config(text=str(sin(z2)))\r\n print(\"sine of \" + str(z2))\r\n if(m == 1):\r\n z.config(text=str(cos(z2)))\r\n print(\"cosine of \" + str(z2))\r\n if(m == 2):\r\n z.config(text=str(tan(z2)))\r\n print(\"tangent of \" + str(z2))\r\n if(m == 3):\r\n z.config(text=str(asin(z2)))\r\n print(\"arc sine of \" + str(z2))\r\n if(m == 4):\r\n z.config(text=str(acos(z2)))\r\n print(\"arc cosine of \" + str(z2))\r\n if(m == 5):\r\n z.config(text=str(atan(z2)))\r\n print(\"arc tangent of \" + str(z2))\r\n v = 0\r\n finished = 1\r\n def conv(m,z): #convert between degrees and radians\r\n z2 = ret(z)\r\n if(m == 0): #degrees to radians\r\n print(\"converting \" + str(z2) + \" degrees to \" + str(radians(z2)) + \" radians\")\r\n z.config(text=str(radians(z2)))\r\n if(m == 1): #radians to degrees\r\n print(\"converting \" + str(z2) + \" radians to \" + str(degrees(z2)) + \" degrees\")\r\n z.config(text=str(degrees(z2)))\r\n v = 0\r\n finished = 1\r\n def lb2(z): #log base 2\r\n z2 = ret(z)\r\n z3 = log(z2,2)\r\n z.config(text=str(z3))\r\n print(str(z2) + \" log 2\")\r\n v = 0\r\n finished = 1\r\n def l10(z): #log base 10\r\n z2 = ret(z)\r\n z3 = log10(z2)\r\n z.config(text=str(z3))\r\n print(str(z2) + \" log 10\")\r\n v = 0\r\n finished = 1\r\n def recp(z): #return the reciprocal (opposite) of the value\r\n z2 = ret(z)\r\n z3 = 1 / z2\r\n z.config(text=str(z3))\r\n print(\"reciprocal of \" + str(z2))\r\n v = 0\r\n finished = 1\r\n def de(z): #deletes a digit from the number in the box\r\n z2 = z.cget(\"text\")\r\n z2 = z2[:-1]\r\n if(z2 == \"\"):\r\n z2 = \"0\"\r\n z.config(text=z2)\r\n def tg(m): #toggles calculator to graphing calculator\r\n m.master.destroy()\r\n os.popen(\"gcalc.pyw\")\r\n\r\n#procedures\r\nroot = tk.Tk()\r\nroot.resizable(0,0)\r\nwin = window(root)\r\nroot.mainloop()\r\n\r\n#if __name__ == '__main__':\r\n# main()\r\n\r\n","sub_path":"scalc.pyw","file_name":"scalc.pyw","file_ext":"pyw","file_size_in_byte":12991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"624525019","text":"import copy\nfrom collections import defaultdict\nfrom itertools import product\n\nfrom pddlstream.algorithms.downward import get_problem, task_from_domain_problem, apply_action, fact_from_fd, \\\n get_goal_instance, plan_preimage, get_literals, instantiate_task, get_cost_scale, \\\n sas_from_instantiated, scale_cost, fd_from_fact, parse_action, literal_holds\nfrom pddlstream.algorithms.reorder import get_partial_orders\nfrom pddlstream.algorithms.scheduling.negative import get_negative_predicates, convert_negative, recover_negative_axioms\nfrom pddlstream.algorithms.scheduling.postprocess import postprocess_stream_plan\nfrom pddlstream.algorithms.scheduling.recover_axioms import get_derived_predicates, extraction_helper\nfrom pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan\nfrom pddlstream.algorithms.scheduling.simultaneous import extract_function_results, \\\n add_stream_actions, partition_plan, add_unsatisfiable_to_goal\nfrom pddlstream.algorithms.scheduling.utils import partition_results, \\\n get_results_from_head, apply_streams, partition_external_plan\nfrom pddlstream.algorithms.search import solve_from_task\nfrom pddlstream.language.constants import get_args, Not, EQ, get_prefix\nfrom pddlstream.language.conversion import obj_from_pddl_plan, substitute_expression, pddl_from_object\nfrom pddlstream.language.object import UniqueOptValue, OptimisticObject\nfrom pddlstream.language.external import Result\nfrom pddlstream.language.effort import compute_plan_effort\nfrom pddlstream.language.optimizer import is_optimizer_result, UNSATISFIABLE\nfrom pddlstream.utils import Verbose, INF, get_mapping, neighbors_from_orders, apply_mapping\n\ndef compute_function_plan(opt_evaluations, action_plan, unit_costs):\n function_plan = set()\n if unit_costs:\n return function_plan\n results_from_head = get_results_from_head(opt_evaluations)\n for action_instance in action_plan:\n action = action_instance.action\n if action is None:\n continue\n args = [action_instance.var_mapping[p.name] for p in action.parameters]\n function_result = extract_function_results(results_from_head, action, args)\n if function_result is not None:\n function_plan.add(function_result)\n return function_plan\n\ndef convert_fluent_streams(stream_plan, real_states, action_plan, step_from_fact, node_from_atom):\n import pddl\n assert len(real_states) == len(action_plan) + 1\n steps_from_stream = {}\n for result in reversed(stream_plan):\n steps_from_stream[result] = set()\n for fact in result.get_certified():\n if (fact in step_from_fact) and (node_from_atom[fact].result == result):\n steps_from_stream[result].update(step_from_fact[fact])\n for fact in result.instance.get_domain():\n step_from_fact[fact] = step_from_fact.get(fact, set()) | steps_from_stream[result]\n # TODO: apply this recursively\n\n # TODO: ensure that derived facts aren't in fluents?\n # TODO: handle case where costs depend on the outputs\n _, outgoing_edges = neighbors_from_orders(get_partial_orders(\n stream_plan, init_facts=map(fact_from_fd, filter(lambda f: isinstance(f, pddl.Atom), real_states[0]))))\n static_plan = []\n fluent_plan = []\n for result in stream_plan:\n external = result.external\n if (result.opt_index != 0) or (not external.is_fluent()):\n static_plan.append(result)\n continue\n if outgoing_edges[result]:\n # No way of taking into account the binding of fluent inputs when preventing cycles\n raise NotImplementedError('Fluent stream is required for another stream: {}'.format(result))\n #if (len(steps_from_stream[result]) != 1) and result.output_objects:\n # raise NotImplementedError('Fluent stream required in multiple states: {}'.format(result))\n for state_index in steps_from_stream[result]:\n new_output_objects = [ # OptimisticObject.from_opt(out.value, object())\n OptimisticObject.from_opt(out.value, UniqueOptValue(result.instance, object(), i))\n for i, out in enumerate(result.output_objects)]\n if new_output_objects and (state_index < len(action_plan)):\n # TODO: check that the objects aren't used in any effects\n instance = copy.copy(action_plan[state_index])\n action_plan[state_index] = instance\n output_mapping = get_mapping(map(pddl_from_object, result.output_objects),\n map(pddl_from_object, new_output_objects))\n instance.var_mapping = {p: output_mapping.get(v, v)\n for p, v in instance.var_mapping.items()}\n fluent_facts = list(map(fact_from_fd, filter(\n lambda f: isinstance(f, pddl.Atom) and (f.predicate in external.fluents), real_states[state_index])))\n new_instance = external.get_instance(result.instance.input_objects, fluent_facts=fluent_facts)\n new_result = new_instance.get_result(new_output_objects, opt_index=result.opt_index)\n fluent_plan.append(new_result)\n return static_plan + fluent_plan\n\n##################################################\n\ndef recover_stream_plan(evaluations, opt_evaluations, goal_expression, domain, node_from_atom,\n action_plan, axiom_plans, negative, unit_costs):\n # Universally quantified conditions are converted into negative axioms\n # Existentially quantified conditions are made additional preconditions\n # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)\n # Added effects cancel out removed effects\n # TODO: node_from_atom is a subset of opt_evaluations (only missing functions)\n real_task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain, unit_costs))\n opt_task = task_from_domain_problem(domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs))\n negative_from_name = get_negative_predicates(negative)\n\n real_states, combined_plan = recover_negative_axioms(real_task, opt_task, axiom_plans, action_plan, negative_from_name)\n function_plan = compute_function_plan(opt_evaluations, action_plan, unit_costs)\n\n full_preimage = plan_preimage(combined_plan, [])\n stream_preimage = set(full_preimage) - real_states[0]\n negative_preimage = set(filter(lambda a: a.predicate in negative_from_name, stream_preimage))\n positive_preimage = stream_preimage - negative_preimage\n function_plan.update(convert_negative(negative_preimage, negative_from_name, full_preimage, real_states))\n\n step_from_fact = {fact_from_fd(l): full_preimage[l] for l in positive_preimage if not l.negated}\n target_facts = [fact for fact in step_from_fact.keys() if get_prefix(fact) != EQ]\n #stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)\n # visualize_constraints(map(fact_from_fd, target_facts))\n stream_plan = []\n extract_stream_plan(node_from_atom, target_facts, stream_plan)\n stream_plan = postprocess_stream_plan(evaluations, domain, stream_plan, target_facts)\n stream_plan = convert_fluent_streams(stream_plan, real_states, action_plan, step_from_fact, node_from_atom)\n\n return stream_plan + list(function_plan)\n\n##################################################\n\ndef get_instance_facts(instance, node_from_atom):\n # TODO: ignores conditional effect conditions\n facts = []\n for precondition in get_literals(instance.action.precondition):\n if precondition.negated:\n continue\n args = apply_mapping(precondition.args, instance.var_mapping)\n literal = precondition.__class__(precondition.predicate, args)\n fact = fact_from_fd(literal)\n if fact in node_from_atom:\n facts.append(fact)\n return facts\n\ndef add_optimizer_effects(instantiated, instance, stream_plan):\n # TODO: instantiate axioms with negative on effects for blocking\n # TODO: fluent streams using conditional effects. Special fluent predicate for inputs to constraint\n # This strategy will only work for relaxed to ensure that the current state is applied\n # TODO: bug! The FD instantiator prunes the result.external.stream_fact\n for result in stream_plan:\n if not is_optimizer_result(result):\n continue\n # TODO: need to make multiple versions if several ways of achieving the action\n atom = fd_from_fact(substitute_expression(result.external.stream_fact, result.get_mapping()))\n instantiated.atoms.add(atom)\n effect = (tuple(), atom)\n instance.add_effects.append(effect)\n # domain = {fact for result in stream_plan if result.external.info.simultaneous\n # for fact in result.instance.get_domain()}\n # TODO: can streams depending on these to be used if the dependent preconditions are added to the action\n\ndef add_stream_efforts(node_from_atom, instantiated, effort_weight, **kwargs):\n # TODO: make effort just a multiplier (or relative) to avoid worrying about the scale\n #efforts = [] # TODO: regularize & normalize across the problem?\n for instance in instantiated.actions:\n # TODO: prune stream actions here?\n # TODO: round each effort individually to penalize multiple streams\n facts = get_instance_facts(instance, node_from_atom)\n #effort = COMBINE_OP([0] + [node_from_atom[fact].effort for fact in facts])\n stream_plan = []\n extract_stream_plan(node_from_atom, facts, stream_plan)\n if effort_weight is not None:\n effort = compute_plan_effort(stream_plan, **kwargs)\n instance.cost += scale_cost(effort_weight*effort)\n #efforts.append(effort)\n add_optimizer_effects(instantiated, instance, stream_plan)\n #print(min(efforts), efforts)\n\n##################################################\n\ndef add_optimizer_axioms(results, instantiated):\n # Ends up being a little slower than version in optimizer.py when not blocking shared\n # TODO: add this to simultaneous\n import pddl\n results_from_instance = defaultdict(list)\n for result in results:\n results_from_instance[result.instance].append(result)\n optimizer_results = list(filter(is_optimizer_result, results))\n optimizers = {result.external.optimizer for result in optimizer_results}\n for optimizer in optimizers:\n optimizer_facts = {substitute_expression(result.external.stream_fact, result.get_mapping())\n for result in optimizer_results if result.external.optimizer is optimizer}\n facts_from_arg = defaultdict(list)\n for fact in optimizer_facts:\n for arg in get_args(fact):\n facts_from_arg[arg].append(fact)\n\n for stream in optimizer.streams:\n if not stream.instance.disabled:\n continue\n constraints = stream.instance.get_constraints()\n output_variables = []\n for out in stream.output_objects:\n assert isinstance(out.param, UniqueOptValue)\n output_variables.append([r.output_objects[out.param.output_index]\n for r in results_from_instance[out.param.instance]])\n for combo in product(*output_variables):\n mapping = get_mapping(stream.output_objects, combo)\n name = '({})'.join(UNSATISFIABLE)\n blocked = set(substitute_expression(constraints, mapping))\n additional = {fact for arg in combo for fact in facts_from_arg[arg]} - blocked\n # TODO: like a partial disable, if something has no outputs, then adding things isn't going to help\n if stream.instance.enumerated and not stream.instance.successes:\n # Assumes the optimizer is submodular\n condition = list(map(fd_from_fact, blocked))\n else:\n condition = list(map(fd_from_fact, blocked | set(map(Not, additional))))\n effect = fd_from_fact((UNSATISFIABLE,))\n instantiated.axioms.append(pddl.PropositionalAxiom(name, condition, effect))\n instantiated.atoms.add(effect)\n\n##################################################\n\ndef rename_instantiated_actions(instantiated):\n actions = instantiated.actions[:]\n renamed_actions = []\n action_from_name = {}\n for i, action in enumerate(actions):\n renamed_actions.append(copy.copy(action))\n renamed_name = 'a{}'.format(i)\n renamed_actions[-1].name = '({})'.format(renamed_name)\n action_from_name[renamed_name] = action # Change reachable_action_params?\n instantiated.actions[:] = renamed_actions\n return action_from_name\n\ndef recover_axioms_plans(instantiated, action_instances):\n task = instantiated.task\n derived_predicates = get_derived_predicates(task.axioms)\n state = set(task.init)\n axiom_plans = []\n for action_instance in action_instances + [get_goal_instance(task.goal)]:\n # TODO: apply all axiom_instances unaffected by negative conditions\n preimage = list(plan_preimage([action_instance], []))\n axiom_instances = filter(lambda ax: all(l.predicate in derived_predicates or literal_holds(state, l)\n for l in ax.condition), instantiated.axioms)\n # Only instantiate if preimage has goal\n axiom_plan = extraction_helper(state, axiom_instances, preimage)\n assert axiom_plan is not None\n axiom_plans.append(axiom_plan)\n apply_action(state, action_instance)\n return axiom_plans\n\ndef pddl_from_instance(instance):\n action = instance.action\n args = [instance.var_mapping[p.name]\n for p in action.parameters[:action.num_external_parameters]]\n return action.name, args\n\n##################################################\n\ndef get_plan_cost(action_plan, cost_from_action, unit_costs):\n if action_plan is None:\n return INF\n if unit_costs:\n # TODO: no longer need to pass around unit_costs\n return len(action_plan)\n #return sum([0.] + [instance.cost for instance in action_plan])\n scaled_cost = sum([0.] + [cost_from_action[instance] for instance in action_plan])\n return scaled_cost / get_cost_scale()\n\ndef using_optimizers(stream_results):\n return any(map(is_optimizer_result, stream_results))\n\ndef relaxed_stream_plan(evaluations, goal_expression, domain, all_results, negative,\n unit_efforts, effort_weight, max_effort,\n simultaneous=False, reachieve=True, unit_costs=False, debug=False, **kwargs):\n # TODO: alternatively could translate with stream actions on real opt_state and just discard them\n # TODO: only consider axioms that have stream conditions?\n applied_results, deferred_results = partition_results(\n evaluations, all_results, apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))\n stream_domain, result_from_name = add_stream_actions(domain, deferred_results)\n opt_evaluations = apply_streams(evaluations, applied_results) # if n.effort < INF\n\n if reachieve:\n achieved_results = {r for r in evaluations.values() if isinstance(r, Result)}\n init_evaluations = {e for e, r in evaluations.items() if r not in achieved_results}\n applied_results = achieved_results | set(applied_results)\n evaluations = init_evaluations # For clarity\n # TODO: could iteratively increase max_effort\n node_from_atom = get_achieving_streams(evaluations, applied_results,\n unit_efforts=unit_efforts, max_effort=max_effort)\n if using_optimizers(all_results):\n goal_expression = add_unsatisfiable_to_goal(stream_domain, goal_expression)\n problem = get_problem(opt_evaluations, goal_expression, stream_domain, unit_costs) # begin_metric\n\n with Verbose(debug):\n instantiated = instantiate_task(task_from_domain_problem(stream_domain, problem))\n if instantiated is None:\n return None, INF\n cost_from_action = {action: action.cost for action in instantiated.actions}\n if (effort_weight is not None) or using_optimizers(applied_results):\n add_stream_efforts(node_from_atom, instantiated, effort_weight, unit_efforts=unit_efforts)\n add_optimizer_axioms(all_results, instantiated)\n action_from_name = rename_instantiated_actions(instantiated)\n with Verbose(debug):\n sas_task = sas_from_instantiated(instantiated)\n sas_task.metric = True\n\n # TODO: apply renaming to hierarchy as well\n # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential\n action_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs)\n if action_plan is None:\n return None, INF\n action_instances = [action_from_name[name] for name, _ in action_plan]\n cost = get_plan_cost(action_instances, cost_from_action, unit_costs)\n axiom_plans = recover_axioms_plans(instantiated, action_instances)\n\n applied_plan, function_plan = partition_external_plan(recover_stream_plan(\n evaluations, opt_evaluations, goal_expression, stream_domain, node_from_atom,\n action_instances, axiom_plans, negative, unit_costs))\n #action_plan = obj_from_pddl_plan(parse_action(instance.name) for instance in action_instances)\n action_plan = obj_from_pddl_plan(map(pddl_from_instance, action_instances))\n\n deferred_plan, action_plan = partition_plan(action_plan, result_from_name)\n stream_plan = applied_plan + deferred_plan + function_plan\n combined_plan = stream_plan + action_plan\n return combined_plan, cost\n","sub_path":"pddlstream/algorithms/scheduling/relaxed.py","file_name":"relaxed.py","file_ext":"py","file_size_in_byte":17850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"642721407","text":"from rest_framework.routers import DefaultRouter\nfrom app import views\nfrom django.urls import path, include\n\nrouter = DefaultRouter()\nrouter.register('cars', views.CarViewSet)\nrouter.register('brands', views.BrandViewSet)\nrouter.register('models', views.CarModelsViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423459940","text":"# 导入模块\nimport requests,json\n# 创建会话\nsession = requests.session()\n# 请求头\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n}\ndef cookies_read():\n cookies_txt = open('cookies.txt','r',encoding='utf-8')\n cookies_dict = json.loads(cookies_txt.read())\n cookies = requests.utils.cookiejar_from_dict(cookies_dict)\n return cookies\n\ndef sign_in():\n url = 'https://wordpress-edu-3autumn.localprod.oc.forchange.cn/wp-login.php'\n data = {'log': 'spiderman',\n 'pwd': 'crawler334566',\n 'wp-submit': '登录',\n 'redirect_to': 'https://wordpress-edu-3autumn.localprod.oc.forchange.cn/wp-admin/',\n 'testcookie': '1'\n }\n session.post(url,headers=headers,data=data)\n cookies_dict = requests.utils.dict_from_cookiejar(session.cookies)\n cookies_str = json.dumps(cookies_dict)\n cookies_txt = open('cookies.txt', 'w', encoding='utf-8')\n cookies_txt.write(cookies_str)\n cookies_txt.close()\n\ndef send_message():\n url_1 = 'https://wordpress-edu-3autumn.localprod.oc.forchange.cn/wp-comments-post.php'\n data_1 = {\n 'comment': input('请输入你要发表的评论:'),\n 'submit': '发表评论',\n 'comment_post_ID': '13',\n 'comment_parent': '0'\n }\n return requests.post(url_1,headers=headers,data=data_1)\n\ntry:\n session.cookies = cookies_read()\nexcept FileNotFoundError:\n sign_in()\n session.cookies = cookies_read()\nnum = send_message()\nif num.status_code == 200:\n print('成功啦!')\nelse:\n sign_in()\n session.cookies = cookies_read()\n num = send_message()","sub_path":"风变编程/python爬虫-山腰班/fb_08/fb_05_01.py","file_name":"fb_05_01.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"44061853","text":"\"\"\"\nSequential Monte Carlo-ABC sampler\n\"\"\"\nimport numpy as np\nimport theano\nimport pymc3 as pm\nfrom scipy.stats.mstats import mquantiles\nfrom tqdm import tqdm\n\nfrom .arraystep import metrop_select\nfrom .metropolis import MultivariateNormalProposal\nfrom ..theanof import floatX\nfrom ..model import modelcontext, treelist, FreeRV\nfrom ..backends.ndarray import NDArray\nfrom ..backends.base import MultiTrace\n\n\n__all__ = ['SMC-ABC', 'sample_smc_abc']\n\nproposal_dists = {'MultivariateNormal': MultivariateNormalProposal}\n\n\nclass SMC_ABC():\n \"\"\"\n Sequential Monte Carlo step\n\n Parameters\n ----------\n n_steps : int\n The number of steps of a Markov Chain. If `tune == True` `n_steps` will be used for\n the first stage, and the number of steps of the other states will be determined\n automatically based on the acceptance rate and `p_acc_rate`.\n scaling : float\n Factor applied to the proposal distribution i.e. the step size of the Markov Chain. Only\n works if `tune == False` otherwise is determined automatically\n p_acc_rate : float\n Probability of not accepting a Markov Chain proposal. Used to compute `n_steps` when\n `tune == True`. It should be between 0 and 1. \n sum_stat: list\n List of summary statistics to compute.\n min_epsilon : float\n Minimum epsilons threshold that the sampler can reach.\n iqr_scale : int\n Scale factor for the inter-quantile range used in epsilon computation.\n distance_metric : string\n Distance metric to be computed between summary statistics.\n epsilons : list\n Pre-specified list of epsilon thresholds.\n proposal_name :\n Type of proposal distribution. Currently the only valid option is `MultivariateNormal`.\n\n References\n ----------\n .. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),\n Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.\n Geophysical Journal International, 2013, 194(3), pp.1701-1726,\n `link `__\n\n .. [Ching2007] Ching, J. and Chen, Y. (2007).\n Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class\n Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),\n 816-832. `link `__\n \"\"\"\n def __init__(self, n_steps=5, scaling=1., p_acc_rate=0.01, tune=True, sum_stat=['mean'],\n min_epsilon=0.5, iqr_scale=1, distance_metric='absolute difference', epsilons=None, \n proposal_name='MultivariateNormal', routine='iqr'):\n\n self.n_steps = n_steps\n self.scaling = scaling\n self.p_acc_rate = p_acc_rate\n self.tune = tune\n self.proposal = proposal_dists[proposal_name]\n self.sum_stat = sum_stat\n self.min_epsilon = min_epsilon\n self.iqr_scale = iqr_scale\n self.distance_metric = distance_metric\n self.epsilons = epsilons\n self.routine = routine\n\ndef sample_smc_abc(draws=5000, step=None, progressbar=False, model=None, random_seed=-1):\n \"\"\"\n Sequential Monte Carlo sampling\n\n Parameters\n ----------\n draws : int\n The number of samples to draw from the posterior (i.e. last stage). And also the number of\n independent Markov Chains. Defaults to 5000.\n step : :class:`SMC`\n SMC initialization object\n progressbar : bool\n Flag for displaying a progress bar\n model : pymc3 Model\n optional if in `with` context\n random_seed : int\n random seed\n \"\"\"\n model = modelcontext(model)\n\n if random_seed != -1:\n np.random.seed(random_seed)\n\n stage = 0\n #variables = pm.model.treelist([])\n #for k, v in model.named_vars.items():\n # if not isinstance(v, pm.model.ObservedRV):\n # if not pm.util.is_transformed_name(k):\n # print(v)\n # variables.append(v)\n variables = model.vars\n discrete = np.concatenate([[v.dtype in pm.discrete_types] * (v.dsize or 1) for v in variables])\n any_discrete = discrete.any()\n all_discrete = discrete.all()\n prior_logp = theano.function(variables, model.varlogpt)\n simulator = model.observed_RVs[0]\n function = simulator.distribution.function\n observed_sum_stat = get_sum_stats(simulator.observations, sum_stat=step.sum_stat)\n epsilon = np.inf\n distance_list = []\n pm._log.info('Using {} as distance metric'.format(step.distance_metric))\n pm._log.info('Using {} as summary statistic'.format(step.sum_stat))\n\n if step.epsilons is None:\n epsilon_list = []\n\n else:\n step.epsilons.append(step.min_epsilon)\n epsilon_list = step.epsilons\n\n while epsilon > step.min_epsilon:\n if stage == 0:\n pm._log.info('Sample initial stage: ...')\n posterior = _initial_population(draws, model, variables)\n weights = np.ones(draws) / draws\n else:\n weights = calc_weights(un_weights)\n \n # resample based on plausibility weights (selection)\n resampling_indexes = np.random.choice(np.arange(draws), size=draws, p=weights)\n posterior = posterior[resampling_indexes]\n # compute proposal distribution based on weights\n try:\n covariance = _calc_covariance(posterior, weights)\n except ValueError:\n pm._log.info('Could not compute covariance matrix')\n break\n proposal = step.proposal(covariance)\n # get distance function\n distance_function = get_distance(step.distance_metric)\n \n # compute scaling and number of Markov chains steps (optional), based on previous\n # acceptance rate\n #if step.tune and stage > 0:\n # if acc_rate == 0:\n # acc_rate = 1. / step.n_steps\n # step.scaling = _tune(acc_rate)\n # step.n_steps = 1 + (np.ceil(np.log(step.p_acc_rate) / np.log(1 - acc_rate)).astype(int))\n\n # Apply Rejection kernel (mutation)\n proposed = 0.\n accepted = 0.\n new_posterior_list = []\n proposal_list = []\n\n #if step.epsilons is None:\n if stage == 0:\n simulated_sample = [function(*sample) for sample in posterior][::10]\n epsilon_list.append(calc_epsilon(simulated_sample[0], step.iqr_scale, step, step.epsilons, stage, step.routine))\n else:\n epsilon_list.append(calc_epsilon(distance_list, step.iqr_scale, step, step.epsilons, stage, step.routine))\n distance_list = []\n \n epsilon = epsilon_list[stage]\n pm._log.info('Sampling stage {} with Epsilon {:f}'.format(stage, epsilon))\n\n for draw in tqdm(range(draws), disable=not progressbar):\n q_old = posterior[draw]\n deltas = np.squeeze(proposal(step.n_steps) * step.scaling)\n for n_step in range(0, step.n_steps):\n delta = deltas[n_step]\n if any_discrete:\n if all_discrete:\n delta = np.round(delta, 0).astype('int64')\n q_old = q_old.astype('int64')\n q_new = (q_old + delta).astype('int64')\n else:\n delta[discrete] = np.round(delta[discrete], 0)\n q_new = (q_old + delta)\n else:\n q_new = q_old + delta\n \n if np.isfinite(prior_logp(*q_new)):\n\n simulated_data = function(*q_new)\n \n simulated_stat = get_sum_stats(simulated_data, sum_stat=step.sum_stat)\n distance = distance_function(simulated_stat, observed_sum_stat)\n\n if distance < epsilon:\n accepted += 1.\n new_posterior_list.append(q_new)\n proposal_list.append(proposal.logp(covariance * step.scaling, q_new))\n distance_list.append(distance)\n break\n \n proposed += 1.\n\n if new_posterior_list:\n new_posterior = np.array(new_posterior_list)\n resampling_indexes = np.random.choice(np.arange(len(new_posterior)), size=draws)\n\n posterior = new_posterior[resampling_indexes]\n proposal_array = np.array(proposal_list)\n proposal_array = proposal_array[resampling_indexes]\n priors = np.array([prior_logp(*sample) for sample in posterior])\n un_weights = priors - proposal_array\n acc_rate = accepted / proposed\n stage += 1\n else:\n pm._log.info('Acceptance rate is 0')\n break\n\n trace = _posterior_to_trace(posterior, model)\n\n return trace\n\n# FIXME!!!!\ndef _initial_population(samples, model, variables):\n \"\"\"\n Create an initial population from the prior\n \"\"\"\n population = np.zeros((samples, len(variables)))\n init_rnd = {}\n start = model.test_point\n for idx, v in enumerate(variables):\n if pm.util.is_transformed_name(v.name):\n trans = v.distribution.transform_used.forward_val\n population[:,idx] = trans(v.distribution.dist.random(size=samples, point=start))\n population[:,idx] = v.distribution.dist.random(size=samples, point=start)\n else:\n population[:,idx] = v.random(size=samples, point=start)\n\n return population\n\n\n#def _initial_population(draws, model, variables):\n# \"\"\"\n# Create an initial population from the prior\n# \"\"\"\n# population = []\n# var_info = {}\n# start = model.test_point\n# init_rnd = pm.sample_prior_predictive(draws, model=model)\n# for v in variables:\n# var_info[v.name] = (start[v.name].shape, start[v.name].size)\n#\n# for i in range(draws):\n# point = pm.Point({v.name: init_rnd[v.name][i] for v in variables}, model=model)\n# population.append(model.dict_to_array(point))\n#\n# return np.array(population), var_info\n\n\n\ndef calc_epsilon(population, iqr_scale, step, epsilons, stage, routine):\n \"\"\"Calculate next epsilon threshold based on the current population.\n\n Returns\n -------\n epsilon : float\n \"\"\"\n if step.epsilons is None:\n if step.routine == 'iqr':\n range_iq = mquantiles(population, prob=[0.25, 0.75])\n epsilon = np.abs(range_iq[1] - range_iq[0]) * iqr_scale\n if step.routine == 'median':\n epsilon = np.abs(np.median(population)) * iqr_scale\n else:\n epsilon = step.epsilons[stage]\n\n return epsilon \n\ndef calc_weights(un_weights):\n \"\"\"Compute importance weights\"\"\"\n weights_un = np.exp(un_weights - un_weights.max())\n weights = weights_un / np.sum(weights_un)\n return weights\n\ndef _calc_covariance(posterior_array, weights):\n \"\"\"\n Calculate trace covariance matrix based on importance weights.\n \"\"\"\n cov = np.cov(np.squeeze(posterior_array), aweights=weights.ravel(), bias=False, rowvar=0)\n if np.isnan(cov).any() or np.isinf(cov).any():\n raise ValueError('Sample covariances not valid! Likely \"chains\" is too small!')\n return np.atleast_2d(cov)\n\ndef _tune(acc_rate):\n \"\"\"\n Tune adaptively based on the acceptance rate.\n\n Parameters\n ----------\n acc_rate: float\n Acceptance rate of the Metropolis sampling\n\n Returns\n -------\n scaling: float\n \"\"\"\n # a and b after Muto & Beck 2008 .\n a = 1. / 9\n b = 8. / 9\n return (a + b * acc_rate) ** 2\n\ndef _posterior_to_trace(posterior, model):\n \"\"\"\n Save results into a PyMC3 trace\n \"\"\"\n length_pos = len(posterior)\n varnames = [v.name for v in model.vars]\n with model:\n strace = NDArray(model)\n strace.setup(length_pos, 0)\n for i in range(length_pos):\n strace.record({k:v for k, v in zip(varnames, posterior[i])})\n return MultiTrace([strace])\n\ndef get_sum_stats(data, sum_stat=SMC_ABC().sum_stat):\n \"\"\"\n Parameters:\n -----------\n data : array\n Observed or simulated data\n sum_stat : list\n List of summary statistics to be computed. Accepted strings are mean, std, var. \n Python functions can be passed in this argument.\n\n Returns:\n --------\n sum_stat_vector : array\n Array contaning the summary statistics.\n \"\"\"\n if data.ndim == 1:\n data = data[:,np.newaxis]\n sum_stat_vector = np.zeros((len(sum_stat), data.shape[1]))\n\n for i, stat in enumerate(sum_stat):\n for j in range(sum_stat_vector.shape[1]):\n if stat == 'mean':\n sum_stat_vector[i, j] = data[:,j].mean()\n elif stat == 'std':\n sum_stat_vector[i, j] = data[:,j].std()\n elif stat == 'var':\n sum_stat_vector[i, j] = data[:,j].var()\n else:\n sum_stat_vector[i, j] = stat(data[:,j])\n\n return np.atleast_1d(np.squeeze(sum_stat_vector))\n\ndef absolute_difference(a, b):\n return np.sum(np.abs(a - b))\n\ndef sum_of_squared_distance(a, b):\n return np.sum((a - b)**2)\n\ndef mean_absolute_error(a, b):\n return np.sum(np.abs(a - b))/len(a)\n\ndef mean_squared_error(a, b):\n return np.sum((a - b)**2)/len(a)\n\ndef euclidean_distance(a, b):\n return np.sqrt(np.sum((a - b)**2))\n\ndef get_distance(func_name):\n d = {'absolute difference': absolute_difference,\n 'sum of squared_distance' : sum_of_squared_distance,\n 'mean absolute_error' : mean_absolute_error,\n 'mean squared_error' : mean_squared_error,\n 'euclidean' : euclidean_distance}\n for key, value in d.items():\n if func_name == key:\n return value","sub_path":"pymc3/step_methods/smc_ABC.py","file_name":"smc_ABC.py","file_ext":"py","file_size_in_byte":13781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"151407319","text":"class TextEdit2:\n\n\tdef edit(self,path):\n\t\t#IS2012用\n\t\tfile1=open(path,'r',encoding='utf-8')\n\t\tresult=[]\n\t\tfor line in file1:\n\t\t\tresult.append(line)\n\t\tdata=result[5764:]\n\t\tdata_atr=result[:5764]\n\t\tarff=[]\n\t\tatr=[]\n\t\tfor line2 in data:\n\t\t\tarff.append(line2.lstrip(\"'unknown,'\"))\n\t\tfor line3 in data_atr:\n\t\t\tif line3==result[2]:\n\t\t\t\tprint(\"成功だ!\")\n\t\t\t\tcontinue\n\t\t\tatr.append(line3)\n\t\tatr.append(arff)\n\t\twith open(path, mode='w') as w:\n\t\t \tfor wr in atr:\n\t\t \t\tw.writelines(wr)\n\t\tw.close()\n\t\tfile1.close()\n\t\t#IS2009用\n\t\t# file2 = open(path3,'r',encoding='utf-8')\n\t\t# result2 = []\n\t\t# atr2 = []\n\n\t\t# for line3 in file2:\n\t\t# \tresult2.append(line3)\n\n\t\t# for line4 in result2:\n\t\t# \tarff.append(line4.lstrip(\"'unknown,'\"))\n\n\t\t# atr2.append(arff)\n\t\t# with open(\"output2.arff\", mode='w') as w:\n\t\t# \tfor wr in atr2:\n\t\t# \t w.writelines(wr)\n\t\t# w.close()\n\t\t# file2.close()\n","sub_path":"outside-scripts/TextEdit2.py","file_name":"TextEdit2.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235110083","text":"import os\nimport sys\nimport glob\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport functools\nimport keras\n\nfrom dual_im_gen import dual_im_gen\nfrom DI_model import dual_im\n\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom keras.models import Model\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD\nfrom keras import metrics\nfrom sklearn.metrics import confusion_matrix, f1_score\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.utils import plot_model\n\n\n\nbatch_size = 32\nNB_EPOCHS = 10\n\ntest_address = \"/project/BEN_DL/split_images/benthoz_retrain/testing\"\nval_address = \"/project/BEN_DL/split_images/retrain_50/testing\"\ntrain_address = \"/project/BEN_DL/split_images/benthoz_retrain/training\"\nout_path = \"/project/BEN_DL/output/DI/\"\n\nnb_classes = len(glob.glob(train_address + \"/*\"))\n\ndef transfer_learn_DI(model):\n\n for layer in model.layers:\n if layer.name.endswith('source_arm') or layer.name.endswith('crop_arm'):\n layer.trainable = False\n else:\n layer.trainable = True\n\n model.compile(\n optimizer='rmsprop', #Adam(lr=0.0001),#(other fine tuning)\n loss='categorical_crossentropy',\n metrics=[\n metrics.categorical_accuracy,\n metrics.top_k_categorical_accuracy,\n #top3_acc\n ]\n )\n\ndef predictor(model, test_generator, steps):\n y_pred = np.array([]).reshape(0,24)\n y_true = np.array([]).reshape(0,24)\n\n for i in range(steps):\n features, labels = next(test_generator)\n batch_pred = model.predict(features)\n\n y_pred = np.append(y_pred, batch_pred, axis=0)\n y_true = np.append(y_true, labels, axis=0)\n\n y_true = np.argmax(y_true, axis=1)\n y_pred = np.argmax(y_pred, axis=1)\n return y_pred, y_true\n\ndef get_nb_files(directory):\n \"\"\"Get number of files by searching directory recursively\"\"\"\n cnt = len(glob.glob(os.path.join(directory,'*/*.jpg')))\n return cnt\n\nif __name__==\"__main__\":\n a = argparse.ArgumentParser()\n a.add_argument(\"--source_layer\", default='mixed2')\n a.add_argument(\"--n_kernels\", default=128, type=int)\n a.add_argument(\"--n_hidden\", default=512, type=int)\n args = a.parse_args()\n trial_num = max([int(d) for d in os.listdir(out_path) if os.path.isdir(out_path + d) and d.isdigit()] +[0]) + 1\n save_dir = out_path + str(trial_num) + \"/\"\n\n nb_train_samples = get_nb_files(train_address)\n nb_val_samples = get_nb_files(val_address)\n nb_test_samples = get_nb_files(test_address)\n\n train_steps = int(nb_train_samples/batch_size)\n val_steps = int(nb_val_samples/batch_size)\n test_steps = int(nb_test_samples/batch_size)\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n model = dual_im(nb_classes, args.source_layer, args.n_kernels, args.n_hidden)\n\n transfer_learn_DI(model)\n\n dim_gen_test = dual_im_gen(test_address, batch_size)\n dim_gen_val = dual_im_gen(val_address, batch_size)\n dim_gen_train = dual_im_gen(train_address, batch_size)\n\n history_tl = model.fit_generator(\n dim_gen_train,\n epochs=NB_EPOCHS,\n steps_per_epoch=train_steps,\n validation_data=dim_gen_val,\n validation_steps=val_steps,\n class_weight='auto',\n verbose =1\n )\n\n model.save(save_dir + \"model.h5\")\n\n scores = model.evaluate_generator(dim_gen_test, steps = test_steps)\n y_pred, y_true = predictor(model, dim_gen_test, steps = test_steps)\n\n conf = confusion_matrix(y_true = y_true, y_pred = y_pred)\n f1scores = f1_score(y_true = y_true, y_pred = y_pred, average = None)\n\n np.savez(save_dir + \"output_vars.npz\",\n scores = scores,\n hist = history_tl.history,\n y_pred = y_pred,\n y_true = y_true,\n conf = conf,\n f1scores = f1scores\n )\n","sub_path":"DI/train_DI.py","file_name":"train_DI.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"426859447","text":"from typing import DefaultDict\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport GPy\nfrom numpy.lib.function_base import select\nfrom gpr_group_model import GPRegression_Group\nfrom simulation import * \nimport pickle\nimport os\nimport argparse\nimport sys\nimport pandas as pd \nimport xarray as xr\nimport pretty_errors\n\nplt.style.use('double_col.mplstyle')\n\n#-----------------------------------------------------------------------------------\n# parameters\n\nparser = argparse.ArgumentParser(description='Run Simulation for GPOO project.')\nparser.add_argument('opt_num', type = int, help = 'choose what f to use, choices: 1,2,3')\nparser.add_argument('--n', type = int, help = 'budget (should be positive integer)')\nparser.add_argument('--r', type = int, help = 'number of repeat (should be positive integer)')\nparser.add_argument('--alg', nargs='*', help = 'please list all algorithms to run. Choices: StoOO, GPOO, GPTree, Random, SK')\n# args = parser.parse_args()\nargs,_ = parser.parse_known_args()\n\nopt_num = str(args.opt_num) \nsave_folder = 'GPOO_results' + opt_num + '/'\n\nif not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n# np.random.seed(2021)\nif args.n is not None:\n n = args.n \nelse:\n n = 80 # budget\nif args.r is not None:\n n_repeat = args.r\nelse:\n n_repeat = 100 # number of repeat\nif args.alg is not None:\n run_alg = args.alg\n print(run_alg)\nelse:\n run_alg = ['StoOO', 'GPOO', 'GPTree', 'Random']\n\nhmax = 10\narms_range = [0.0, 1.0] # root cell\nreward_type = 'center' # 'center' or 'ave'\nsigma = 0.1 # noise for observation (normal std)\nk = 2 # k-ary tree\ns = 10 # split for aggregated feedback\nd = 1 # feature dim\n\n\nlengthscale = 0.05 # kernel para\nkernel_var = 0.1 # kernel para\ngp_noise_var = kernel_var * 0.05 # 1e-10 # gp para\n\n# my_kernel = GPy.kern.StdPeriodic(\n# input_dim=d, \n# # variance=kernel_var, \n# lengthscale=0.2, \n# variance = 0.5,\n# period=1\n# # ,n_freq=10,lower=0.0, upper=0.3,active_dims=0, name=None\n# )\n\nmy_kernel = GPy.kern.RBF(input_dim=d, \n variance=kernel_var, \n lengthscale=lengthscale)\nopt_flag = False # whether optimise parameters in gpr\nplot_regret = True\nplot_tree = True\n\n# ----------------------------------------------------------------------------------\n# plot regret from saved file\n\nif plot_regret:\n regret_dict = {}\n regret_ave_dict = {}\n regret_center_dict = {}\n\n # for alg in ['GPOO', 'StoOO', 'Random', 'GPTree'']:\n for alg in ['GPOO', 'StoOO', 'GPTree']:\n saved_file = save_folder + alg + '_regret_' + str(n) + '_' + str(n_repeat) + '.pickle'\n\n if os.path.isfile(saved_file):\n with open(saved_file, 'rb') as handle:\n data_dict = pickle.load(handle)\n regret_dict[alg + ' S=1'] = data_dict['center']\n regret_center_dict[alg] = data_dict['center']\n if alg != 'GPTree':\n regret_dict[alg + ' S=10'] = data_dict['ave']\n regret_ave_dict[alg] = data_dict['ave']\n else:\n print('Warning: ', str(saved_file) + ' not exist. Please check.')\n\n # fig, axes = plt.subplots(1, 1, figsize = (4,8))\n plot_name = 'Regret_' + str(n) + '_' + str(n_repeat)\n plot_regret_one(regret_dict, plot_name, budget = n, n_repeat=n_repeat, save_folder=save_folder)\n\n # plot_name = 'Regret_' + str(n) + '_' + str(n_repeat) + '_center'\n # plot_regret_one(regret_center_dict, plot_name, budget = n, n_repeat=n_repeat, save_folder=save_folder, plot_title='S = 1')\n\n # plot_name = 'Regret_' + str(n) + '_' + str(n_repeat) + '_ave'\n # plot_regret_one(regret_ave_dict, plot_name, budget = n, n_repeat=n_repeat, save_folder=save_folder, plot_title='S = 10')\n\n# -------------------------------------------------------------------------------\n# function and delta\n\n# def f(x):\n# return (np.sin(13.0 * x) * np.sin(27.0*x) + 1)/2.0\n\ndef construct_model_f(opt_num, my_kernel):\n \"\"\"Generate unknown f to be optimised by \n posterior of a the known GP\n \"\"\"\n\n # X = np.random.uniform(0, 1., (sample_size, 1))\n # Y = np.sin(X) + np.random.randn(sample_size, 1)*0.05\n\n # option 1:\n if opt_num == '1':\n sample_size = 5\n X = np.array([0.05, 0.2, 0.4, 0.65, 0.9]).reshape(sample_size,1)\n Y = np.array([0.85, 0.1, 0.87, 0.05, 0.98]).reshape(sample_size,1)\n\n my_kernel = GPy.kern.RBF(input_dim=d, \n variance=kernel_var, \n lengthscale=0.1)\n\n # option 4\n if opt_num == '2':\n # create a function that has different frequency on the left and right? I.e. wiggly on the left, very low wiggle on the right. E.g. only one wave for the high amplitude, but 10 waves for the low amplitude.\n\n r = np.random.RandomState(2021)\n X = []\n Y = []\n split_list = np.linspace(arms_range[0], 0.9, num = 10)\n for i in range(len(split_list)-1):\n center = (split_list[i] + split_list[i+1])/2.0\n # y = r.uniform(0.05, 0.1)\n y = 0.1\n X.append(center)\n Y.append(y) # each center is assigned to a value 0~0.2\n\n # another = r.uniform(split_list[i], split_list[i+1])\n another = center + 2.0 * (split_list[i+1] - split_list[i])/3\n X.append(another)\n # Y.append(r.uniform(0.2, 0.25))\n Y.append(0.2)\n\n X.append(0.95)\n Y.append(0.9)\n X = np.array(X).reshape(-1, 1)\n Y = np.array(Y).reshape(-1,1)\n\n # option 6\n if opt_num == '3':\n # create a function that has different frequency on the left and right? I.e. wiggly on the left, very low wiggle on the right. E.g. only one wave for the high amplitude, but 10 waves for the low amplitude.\n \n lengthscale = 0.01 # 0.05 # kernel para\n my_kernel = GPy.kern.RBF(input_dim=d, \n variance=kernel_var, \n lengthscale=lengthscale)\n\n r = np.random.RandomState(2021)\n sample_size = 9\n X = []\n Y = []\n split_list = np.linspace(arms_range[0], 0.9, num = 10)\n for i in range(len(split_list)-1):\n center = (split_list[i] + split_list[i+1])/2.0\n # y = r.uniform(0.05, 0.1)\n y = 0.1\n X.append(center)\n Y.append(y) # each center is assigned to a value 0~0.2\n\n # another = r.uniform(split_list[i], split_list[i+1])\n another = center + 2.0 * (split_list[i+1] - split_list[i])/3\n X.append(another)\n # Y.append(r.uniform(0.2, 0.25))\n Y.append(0.2)\n\n X.append(0.94)\n Y.append(0.1)\n X.append(0.945)\n Y.append(0.2)\n X.append(0.95)\n Y.append(0.9)\n X = np.array(X).reshape(-1, 1)\n Y = np.array(Y).reshape(-1,1)\n\n model = GPy.models.GPRegression(X,Y,my_kernel, noise_var=gp_noise_var)\n\n return model, X,Y\n\nf_model,X_samples, Y_samples = construct_model_f(opt_num, my_kernel)\n\n# return partial(model.posterior_samples_f, size=size)\ndef f(x):\n return f_model.predict(x)[0]\n\ndef get_opt_x(f, arms_range):\n \"\"\"Empirical opt x.\n \"\"\"\n size = 1000\n \n x = np.linspace(arms_range[0], arms_range[1], size).reshape(-1,1)\n f_list = f(x)\n \n # REVIEW: we assume there is an unique opt point\n return x[np.argmax(f_list)].reshape(-1,1)\n\n# test f\ntestX = np.linspace(arms_range[0], arms_range[1], 1000).reshape(-1, 1)\nposteriorTestY = f(testX)\nopt_x = get_opt_x(f, arms_range)\n\nfig, axes = plt.subplots(1, 1, figsize = (6,6))\n# for i in range(1000):\naxes.plot(testX, posteriorTestY, c = 'black')\naxes.scatter(opt_x, f(opt_x), c = 'red')\naxes.set_xlabel('Arms')\naxes.set_ylabel('Reward Function')\naxes.set_ylim(-0.05,1)\n# axes.scatter(X_samples,Y_samples)\nplt.savefig(save_folder + 'posterior_f' + str(opt_num) + '.pdf', bbox_inches='tight')\n\n \ndef delta1(h):\n # return 14.0 * 2**(-h)\n return 14 * 2**(-h)\n\ndef delta2(h):\n return 222.0 * 2**(-2.0 * h)\n\n#------------------------------------------------------------------------------------\n# run algorithm\n\nopt_x = get_opt_x(f, arms_range)\nprint('opt_x: ', opt_x)\nprint('opt f: ', f(opt_x))\n\nif 'StoOO' in run_alg:\n eta = 0.1\n rep_regret_list1 = []\n rep_regret_list2 = []\n\n for i in range(n_repeat):\n regret_list1 = []\n regret_list2 = []\n print('repeat: ', i)\n for b in range(n):\n # print('budget: ', b)\n sto1 = StoOO(f=f, delta=delta1, root_cell=arms_range, n=b, k=k, d=d, s=1, reward_type = 'center', sigma = 0.1, opt_x = opt_x, eta = eta)\n\n regret_sto1 = sto1.rec()\n regret_list1.append(regret_sto1)\n\n sto2 = StoOO(f=f, delta=delta1, root_cell=arms_range, n=b, k=k, d=d, s=s, reward_type = 'ave', sigma = 0.1, opt_x = opt_x, eta = eta)\n # doo2 = DOO(arms_range, f, delta2, k, n, reward_type)\n\n regret_sto2 = sto2.rec()\n regret_list2.append(regret_sto2)\n # print('**************************************')\n\n rep_regret_list1.append(regret_list1)\n rep_regret_list2.append(regret_list2)\n\n if plot_tree:\n plot_two(arms_range, f, sto1, sto2, 'StoOO', save_folder=save_folder)\n\n data_dict = {}\n data_dict['center'] = rep_regret_list1\n data_dict['ave'] = rep_regret_list2\n \n save_name = save_folder + 'StoOO_regret_' + str(n) + '_' + str(n_repeat) + '.pickle'\n with open(save_name, 'wb') as handle:\n pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) \n\nif 'GPOO' in run_alg:\n rep_regret_list1 = []\n rep_regret_list2 = []\n\n for i in range(n_repeat):\n print('repeat: ', i)\n gpoo1 = GPOO(\n f=f, delta=delta1, root_cell=arms_range, n=n, k=k, d=1, s=1, reward_type = 'center', sigma = 0.1, opt_x = opt_x, hmax = hmax,\n lengthscale = lengthscale, kernel_var = kernel_var, gp_noise_var = gp_noise_var, opt_flag = opt_flag\n )\n regret_gpoo1 = gpoo1.rec()\n rep_regret_list1.append(regret_gpoo1)\n\n # print('center:')\n # print('regret: ', regret_gpoo1)\n # print(gpoo1.rec_node.features)\n # print(gpoo1.rec_node.depth)\n # print('*****************************')\n\n # print([node.features for node in doo1.evaluated_nodes])\n # print(doo1.evaluated_fs)\n # print()\n\n gpoo2 = GPOO(\n f=f, delta=delta1, root_cell=arms_range, n=n, k=k, d=1, s=10, reward_type = 'ave', sigma = 0.1, opt_x = get_opt_x(f, arms_range), hmax = hmax,\n lengthscale = lengthscale, kernel_var = kernel_var, gp_noise_var = gp_noise_var, opt_flag = opt_flag\n )\n regret_gpoo2 = gpoo2.rec()\n rep_regret_list2.append(regret_gpoo2)\n\n if plot_tree:\n plot_two(arms_range, gpoo1.f, gpoo1, gpoo2, 'GPOO',save_folder=save_folder)\n\n data_dict = {}\n data_dict['center'] = rep_regret_list1\n data_dict['ave'] = rep_regret_list2\n \n save_name = save_folder + 'GPOO_regret_' + str(n) + '_' + str(n_repeat) + '.pickle'\n with open(save_name, 'wb') as handle:\n pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nif 'GPTree' in run_alg:\n rep_regret_list1 = []\n rep_regret_list2 = []\n\n for i in range(n_repeat):\n print('repeat: ', i)\n\n gptree1 = GPTree(\n f=f, delta=delta1, root_cell=arms_range, n=n, k=k, d=1, s=1, reward_type = 'center', sigma = 0.1, opt_x = opt_x,\n lengthscale = lengthscale, kernel_var = kernel_var, gp_noise_var = gp_noise_var, opt_flag = opt_flag\n )\n regret_gptree1 = gptree1.rec()\n rep_regret_list1.append(regret_gptree1)\n\n # print('center:')\n # print('regret: ', regret_gptree1)\n # print(gptree1.rec_node.features)\n # print(gptree1.rec_node.depth)\n # print('*****************************')\n\n # print([node.features for node in doo1.evaluated_nodes])\n # print(doo1.evaluated_fs)\n # print()\n\n # NOTE: not running ave case for GPTree\n # gptree2 = GPTree(\n # f=f, delta=delta1, root_cell=arms_range, n=n, k=k, d=1, s=10, reward_type = 'ave', sigma = 0.1, opt_x = opt_x,\n # lengthscale = lengthscale, kernel_var = kernel_var, gp_noise_var = gp_noise_var, opt_flag = opt_flag\n # )\n # regret_gptree2 = gptree2.rec()\n # rep_regret_list2.append(regret_gptree2)\n\n if plot_tree:\n plot_two(arms_range, gptree1.f, gptree1, gptree1, 'GPTree Only Center', save_folder=save_folder)\n\n import pickle \n data_dict = {}\n data_dict['center'] = rep_regret_list1\n # data_dict['ave'] = rep_regret_list2\n \n save_name = save_folder+ 'GPTree_regret_' + str(n) + '_' + str(n_repeat) + '.pickle'\n with open(save_name, 'wb') as handle:\n pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nif 'Random' in run_alg:\n \n rep_regret_list1 = []\n rep_regret_list2 = []\n for i in range(n_repeat):\n print('repeat: ', i)\n regret_list1 = []\n regret_list2 = []\n for b in np.linspace(1, n, n):\n b = int(b)\n ran1 = Random(\n f=f, delta=delta1, root_cell=arms_range, n=b, k=b, d=1, s=1, reward_type = 'center', sigma = 0.1, opt_x = opt_x\n )\n ran2 = Random(\n f=f, delta=delta1, root_cell=arms_range, n=b, k=b, d=1, s=10, reward_type = 'ave', sigma = 0.1, opt_x = opt_x\n )\n regret_list1.append(ran1.rec())\n regret_list2.append(ran2.rec())\n rep_regret_list1.append(regret_list1)\n rep_regret_list2.append(regret_list2)\n\n if plot_tree:\n plot_two(arms_range, ran1.f, ran1, ran2, 'Random', save_folder=save_folder)\n\n import pickle \n data_dict = {}\n data_dict['center'] = rep_regret_list1\n data_dict['ave'] = rep_regret_list2\n \n save_name = save_folder + 'Random_regret_' + str(n) + '_' + str(n_repeat) + '.pickle'\n with open(save_name, 'wb') as handle:\n pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nif 'SK' in run_alg:\n # s v.s. k v.s. regret\n saved_file = save_folder + 'SK_regret_' + str(n) + '_' + str(n_repeat) + '.pickle'\n s_list = [1, 10, 20, 30, 40, 50]\n k_list = [2,3,4,5]\n if os.path.isfile(saved_file):\n with open(saved_file, 'rb') as handle:\n regret_dict = pickle.load(handle)\n df = pd.DataFrame.from_dict(regret_dict, orient = 'index', columns = k_list)\n df.index.name = 'S'\n df.columns.name = 'K'\n \n xr_data = xr.DataArray(np.log(df))\n xr_data.plot()\n plt.title('N='+str(n))\n # plt.clim(-2.25,-0.05)\n plt.xticks(k_list)\n plt.yticks(s_list)\n plt.savefig(save_folder + 'SK_regret_' + str(n) + '.pdf')\n else:\n # regret_dict = {}\n regret_dict = DefaultDict(list) # only record mean \n regret_whole_dict = {}\n \n for s in s_list:\n for k in k_list:\n print('s, k: ' + str(s) + ' ' + str(k))\n if s == 1:\n reward_type = 'center'\n else:\n reward_type = 'ave'\n regret_list = []\n for i in range(n_repeat):\n print('repeat: ', i)\n gpoo = GPOO(\n f=f, delta=delta1, root_cell=arms_range, n=n, k=k, d=d, s=s, reward_type = reward_type, sigma = sigma, opt_x = get_opt_x(f, arms_range), hmax = hmax,\n lengthscale = lengthscale, kernel_var = kernel_var, gp_noise_var = gp_noise_var, opt_flag = opt_flag\n )\n regret_gpoo = gpoo.rec()[-1]\n regret_list.append(regret_gpoo)\n key = str(s) + ',' + str(k)\n regret_whole_dict[key] = regret_list \n regret_dict[s].append(np.mean(regret_list))\n \n \n with open(saved_file, 'wb') as handle:\n pickle.dump(regret_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n saved_whole_file = save_folder + 'SK_whole_regret_' + str(n) + '_' + str(n_repeat) + '.pickle'\n with open(saved_whole_file, 'wb') as handle:\n pickle.dump(regret_whole_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n ","sub_path":"supplementary_code/run_sim.py","file_name":"run_sim.py","file_ext":"py","file_size_in_byte":16287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"98460475","text":"def kas_veel_mahub(vabade_kohtade_arv, soovijate_arv):\n ''' (int, int) -> Boolean\n Funktsioon tagastab tõeväärtuse, kas soovijad mahuvad paati.\n >>> kas_veel_mahub(10, 9)\n True\n >>> kas_veel_mahub(10, 10)\n True\n >>> kas_veel_mahub(10, 11)\n failinimi failisisu = []\nf = open(failinimi, encoding=\"UTF-8\")= input('Sisestage failinimi: ')\nfailisisu = []\nf = open(failinimi, encoding=\"UTF-8\")\nfor rida in f.read().split('\\n'):\n failisisu.append(int(rida))\nf.close()\n False\n '''\n return (vabade_kohtade_arv >= soovijate_arv)\n\nfailinimi = input('Sisestage failinimi: ')\nfailisisu = []\nf = open(failinimi, encoding=\"UTF-8\")\nfor rida in f.read().split('\\n'):\n failisisu.append(int(rida))\nf.close()\n\npaadi_mahutavus = int(input('Mitu reisijat paati mahub: '))\n\nsoidukite_arv = 0\nfor i in failisisu:\n if kas_veel_mahub(paadi_mahutavus, i):\n paadi_mahutavus = paadi_mahutavus - i\n soidukite_arv += 1\n\nprint('Paati said ' + str(soidukite_arv) + ' sõiduki inimesed')\nprint('Vabu kohti jäi ' + str(paadi_mahutavus))\n","sub_path":"arvestustoo05012018.py","file_name":"arvestustoo05012018.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558660062","text":"#!/usr/bin/env python3\r\n#\r\n\r\nfrom sanic import Blueprint\r\nfrom sanic.response import text\r\n\r\ntypes = Blueprint('types', url_prefix='/types')\r\n\r\n\r\n# post\r\n# curl -X POST -d '{\"name\":\"alice\",\"age\":21}' http://192.168.250.201:8090/api/content/types/post\r\n@types.route('/post', methods=['POST', ])\r\nasync def handler_post(request):\r\n return text('POST request - {}'.format(request.json))\r\n\r\n\r\n# get\r\n# curl http://192.168.250.201:8090/api/content/types/get?name=alice&age=21\r\n@types.route('/get', methods=['GET', ])\r\nasync def handler_get(request):\r\n return text('GET request - {}'.format(request.args))","sub_path":"basic_/sanic_/readthedocs/api/content/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"138658543","text":"import cellopt_parser as parser\nimport os\nimport time\nimport csv\nfrom collections import OrderedDict\nimport numpy as np\n\nINPUT = os.environ[\"CELL_OPT_ROOT_DIR\"] + \"/workspace/pending\"\nOUTPUT = os.environ[\"CELL_OPT_ROOT_DIR\"] + \"/workspace/output\"\nRUNSPACE = os.environ[\"CELL_OPT_ROOT_DIR\"] + \"/workspace/runspace\"\nOPTIN = os.environ[\"CELL_OPT_ROOT_DIR\"] + \"/opt_in\"\n\n\nclass Param:\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n\nclass caseParams:\n def __init__(self, caseID):\n self.caseID = caseID\n self.params = []\n\n def add_param(self, param):\n self.params.append(param)\n\n\nclass plot2D:\n def __init__(self):\n self.xVals = None\n self.yVals = None \n self.zVals = None\n \n def create_npArrays(self, num_pts_per_par):\n self.xVals = np.zeros(shape=num_pts_per_par)\n self.yVals = np.zeros(shape=num_pts_per_par)\n self.zVals = np.zeros(shape=(num_pts_per_par,num_pts_per_par))\n\n def update_npArrays(self, xIdx,yIdx,xVal,yVal,zVal):\n if (self.xVals[xIdx]!=0) and (self.xVals[xIdx]!=xVal) :\n print (\"ERROR xVal mismatch, Quitting\")\n print (\"ERROR xVal mismatch, Quitting xIdx=%d,xVals[xIdx]=%e,xVal=%e\" % (xIdx,xVals[xIdx],xVal))\n exit()\n if (self.yVals[yIdx]!=0) and (self.yVals[yIdx]!=yVal) :\n print (\"ERROR yVal mismatch, Quitting yIdx=%d,yVals[yIdx]=%e,yVal=%e\" % (yIdx,yVals[yIdx],yVal))\n exit()\n self.xVals[xIdx] = xVal\n self.yVals[yIdx] = yVal \n self.zVals[xIdx,yIdx] = zVal\n \n def dump(self) :\n print(\"p2D dump\")\n print (\"self.xVals=\\n%s\" % self.xVals )\n print (\"self.yVals=\\n%s\" % self.yVals )\n print (\"self.zVals=\\n%s\" % self.zVals )\n \n \nclass caseInfo:\n def __init__(self):\n self.casePars = None\n # Currently the skew gap of the run (which we wish to minimize)\n self.result = 10**9\n self.start_time = time.time()\n self.launch_command = ''\n\n\nclass coptDB:\n def __init__(self, dbFileName, master_cell_name):\n self.casesDict = OrderedDict()\n self.master_cell = master_cell_name\n self.dbFileName = dbFileName\n self.createDbFile()\n self.p2D = plot2D()\n\n def insertCase(self, caseID, launchCmd, start):\n caseInf = caseInfo()\n caseInf.start_time = time.time() - start\n case_path = RUNSPACE + '/' + caseID + '/' + caseID + '.sp'\n casePars = parser.get_sp_params(case_path)\n caseInf.params = casePars\n caseInf.launch_command = launchCmd\n self.casesDict[caseID] = caseInf\n\n def updateCaseResults(self, caseID):\n result_path = RUNSPACE + \"/\" + caseID + \"/\" + caseID + \"_datasheet.txt\"\n # while not os.path.exists(result_path)\n # time.sleep(0.1)\n self.casesDict[caseID].result = parser.get_gap(result_path)\n self.updateDbFile(caseID)\n\n def createDbFile(self):\n dbFile = open(OPTIN + '/' + self.dbFileName + '.csv', 'w+')\n master_pars = parser.get_sp_params(\n OPTIN + '/' + self.master_cell + '.sp')\n title_line = \"caseID,\"\n for key in master_pars:\n title_line = title_line + key + ','\n title_line = title_line + 'result\\n'\n dbFile.write(title_line)\n dbFile.close()\n\n def updateDbFile(self, caseID):\n print(\"DEBUG: updateDBfile called for caseID \" + caseID)\n dbFile = open(OPTIN + '/' + self.dbFileName + '.csv', 'a')\n case_entry = caseID + ','\n num_params = 0\n for key in self.casesDict[caseID].params:\n case_entry = case_entry + \\\n str(self.casesDict[caseID].params[key]) + ','\n if (num_params==0) :\n xVal = self.casesDict[caseID].params[key] \n if (num_params==1) :\n yVal = self.casesDict[caseID].params[key] \n num_params += 1\n case_entry = case_entry + str(self.casesDict[caseID].result) + '\\n'\n dbFile.write(case_entry)\n dbFile.close()\n if (num_params==2) :\n xIdx = int(caseID.split(\"_\")[2])\n yIdx = int(caseID.split(\"_\")[3])\n zVal = self.casesDict[caseID].result\n self.p2D.update_npArrays(xIdx,yIdx,xVal,yVal,zVal)\n \n def read_csv(self, src_file):\n with open(src_file, 'r') as csv_db:\n reader = csv.reader(csv_db)\n first_line = True\n keys = []\n for row in reader:\n if first_line:\n keys = row\n first_line = False\n else:\n casePars = OrderedDict()\n for index in range(1, len(row) - 1):\n casePars[keys[index]] = row[index]\n caseInf = caseInfo()\n caseInf.casePars = casePars\n caseInf.result = row[-1]\n self.casesDict[row[0]] = caseInf\n\n csv_db.close()\n\n\n# Create python grid mode plot file\n# # Applicable only for 2 parameters , assuming 2 parameters\n#\n# def createGridPyPlotFile(self,pts_per_par): \n# gpFile = open(OPTIN + '/' + self.dbFileName + '_gridPlot.py', 'w+')\n# gpFile.write(\"import numpy as np\")\n# gpFile.write(\"from scipy.interpolate import interp2D\")\n# gpFile.write(\"from mpl_toolkits import mplot3d\")\n# gpFile.write(\"import matplotlib.pyplot as plt\")\n# gpFile.close()\n#\n# def updateGridPyPlotFile(self, caseID):\n# print(\"DEBUG: updateDBfile called for caseID \" + caseID)\n# gpFile = open(OPTIN + '/' + self.dbFileName + '.csv', 'a')\n# case_entry = caseID + ','\n# for key in self.casesDict[caseID].params:\n# case_entry = case_entry + \\\n# str(self.casesDict[caseID].params[key]) + ','\n# case_entry = case_entry + str(self.casesDict[caseID].result) + '\\n'\n# gpFile.write(case_entry)\n# gpFile.close()\n","sub_path":"Cellopt/utils/cellopt_db_manager.py","file_name":"cellopt_db_manager.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129641174","text":"nums = int(input())\nres = []\nfor j in range(nums):\n n = int(input())\n ans = []\n for k in range(n, -1, -1):\n if n & k == k:\n ans.append(str(k))\n res.append(\" \".join(ans))\nfor n in res:\n print(n)","sub_path":"Code/CodeRecords/2669/60692/273177.py","file_name":"273177.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"520750614","text":"import cv2\nimport numpy as np\nimport time\nimport os\n\nIMG_TXT_PATH = \"../../data/list/Tu_indoor/train.txt\"\nROOT_DIR_PATH = \"../../data/Tu_indoor\"\nimg_count = 0\nclass_colors = np.array([\n [0, 0, 0],\n [192, 0, 0],\n [128, 64, 128],\n [0, 0, 128],\n [0, 64, 64],\n [128, 128, 192],\n [128, 0, 64],\n [128, 128, 128],\n ])\nclass_count = np.zeros(len(class_colors))\n\nwith open(IMG_TXT_PATH) as f:\n for img_paths in f.read().splitlines():\n img_path_list = img_paths.split(' ')\n train_L_path = img_path_list[1]\n print(train_L_path)\n img = cv2.imread(os.path.join(ROOT_DIR_PATH, train_L_path), cv2.IMREAD_COLOR)\n for i, color in enumerate(class_colors):\n target_mask = cv2.inRange(img, color, color)\n class_count[i] += cv2.countNonZero(target_mask)\n img_count += 1\n\nnp.set_printoptions(precision = 6, suppress = True)\nclass_mean = class_count / img_count\nclass_weights = class_mean / np.sum(class_mean)\nprint(\"class_weights is\")\nprint(class_weights)\n\n","sub_path":"tools/mytools/get_class_weights.py","file_name":"get_class_weights.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499319045","text":"import os \r\n\r\ndef create_project_dir(directory):\r\n\tif not os.path.exists(directory):\r\n\t\tprint('creating directory: ' + directory)\r\n\t\tos.makedirs(directory)\r\n\r\ndef create_data_files(project_name, base_url, directory):\r\n\tqueue = directory + '/queue.txt'\r\n\tcrawled = directory + '/crawled.txt'\r\n\tdatabased = directory + '/databased.txt'\r\n\tdomains_covered = directory + '/domains_covered.txt'\r\n\tsql = directory +'/'+ project_name+'.sql'\r\n\t\r\n\tprint('creating queue file')\r\n\twrite_file(queue, base_url)\r\n\t\r\n\tprint('creating crawled file')\r\n\twrite_file(crawled, '')\r\n\t\r\n\tprint('creating sql file')\r\n\twrite_file(sql, '')\r\n\t\r\n\tprint('creating databased file')\r\n\twrite_file(databased, '')\r\n\r\n\tprint('creating domains file')\r\n\twrite_file(domains_covered, '')\r\n\r\ndef write_file(path,data):\r\n\tf = open(path, 'w')\r\n\tf.write(data)\r\n\tf.close()\r\n\r\ndef append_to_file(path, data):\r\n\twith open(path, 'a') as file :\r\n\t\tfile.write(data + '\\n')\r\n\r\ndef delete_file_contents(path):\r\n\twith open(path, 'w'):\r\n\t\tpass\r\n\r\ndef file_to_set(file_name):\r\n\tresults = set()\r\n\twith open(file_name, 'rt') as f:\r\n\t\tfor line in f:\r\n\t\t\tresults.add(line.replace('\\n', ''))\r\n\treturn results\r\n\r\ndef set_to_file(links, file):\r\n\tdelete_file_contents(file)\r\n\tfor link in sorted(links):\r\n\t\tappend_to_file(file, link)\r\n\r\ndef file_to_list(file_name):\r\n\tresults = []\r\n\twith open(file_name, 'r') as f:\r\n\t\tfor line in f:\r\n\t\t\tresults.append(line.replace('\\n', ''))\r\n\treturn results\r\n\r\ndef get_path(filename):\r\n\tif '/' not in filename:\r\n\t\treturn ''\r\n\r\n\tsplitpath = filename.split('/')\r\n\tpath = ''\r\n\r\n\tfor i in range(0, len(splitpath)-1):\r\n\t\tpath += splitpath[i]\r\n\r\n\treturn path","sub_path":"general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"344743053","text":"\nfrom common.data_handler import DataHandler\nfrom common.protocol_handler import ProtocolHandler\nfrom common import forward_data\nfrom common import connector\nfrom common import tools\nimport logging\nlogger = logging.getLogger('my_logger')\n\nclass OutDataHandler(DataHandler):\n\n\n def create_connection(self,forward_id,inner_ip,inner_port,inner_connector):\n _protocol_handler = ProtocolHandler()\n forw_data = forward_data.ForwardData(forward_data.DATA_TYPE.NEW_CONNECTION, forward_id,\n inner_ip, inner_port, '')\n send_package = _protocol_handler.build_data(forw_data)\n if inner_connector and inner_connector.con_state == connector.CON_STATE.CON_CONNECTED:\n send_bytes = inner_connector.send(send_package)\n if send_bytes <= 0:\n logger.error(\"CreateConnectionData send failed,forward_id:%d inner_ip:%s inner_port:%d\" % (forward_id, inner_ip, inner_port))\n\n def trans_data(self,forward_id,inner_ip,inner_port,data,inner_connector):\n\n ori = 0\n total_len = len(data)\n while ori < total_len:\n if total_len - ori <= 10000:\n send_data = data[ori:total_len]\n else:\n send_data = data[ori:ori + 10000]\n\n _protocol_handler = ProtocolHandler()\n forw_data = forward_data.ForwardData(forward_data.DATA_TYPE.TRANS_DATA, forward_id,\n inner_ip, inner_port, send_data)\n send_package = _protocol_handler.build_data(forw_data)\n if inner_connector and inner_connector.con_state == connector.CON_STATE.CON_CONNECTED:\n send_bytes = inner_connector.send(send_package)\n if send_bytes <= 0:\n logger.error(\"TransData to inner send failed,forward_id:%d inner_ip:%s inner_port:%d\" % (forward_id, inner_ip, inner_port))\n raise Exception(\"Send data failed\")\n #print 'inner_connector send package'\n #tools.print_hex_buf(send_package)\n ori += 10000\n\n\n def close_connection(self,forward_id,inner_ip,inner_port,inner_connector):\n _protocol_handler = ProtocolHandler()\n forw_data = forward_data.ForwardData(forward_data.DATA_TYPE.CLOSE_CONNECTION, forward_id,\n inner_ip, inner_port, '')\n send_package = _protocol_handler.build_data(forw_data)\n if inner_connector and inner_connector.con_state == connector.CON_STATE.CON_CONNECTED:\n send_bytes = inner_connector.send(send_package)\n if send_bytes <= 0:\n logger.error(\"CloseConnectionData send failed,forward_id:%d inner_ip:%s inner_port:%d\" % (forward_id, inner_ip, inner_port))\n\nclass InnerDataHandler(DataHandler):\n def handle_data(self, ring_buffer,worker_manager):\n while True:\n parsed_data = super(InnerDataHandler, self).parse_data(ring_buffer)\n if parsed_data == None:\n return\n\n if parsed_data.data_type == forward_data.DATA_TYPE.CONNECTION_SUCCESS:\n outer_worker = worker_manager.get_worker_by_id(parsed_data.id)\n if outer_worker != None:\n outer_worker.connecting_reply(True)\n elif parsed_data.data_type == forward_data.DATA_TYPE.TRANS_DATA:\n outer_worker = worker_manager.get_worker_by_id(parsed_data.id)\n if outer_worker != None and outer_worker.is_working():\n outer_worker.trans_data(parsed_data.data)\n elif parsed_data.data_type == forward_data.DATA_TYPE.CLOSE_CONNECTION:\n outer_worker = worker_manager.get_worker_by_id(parsed_data.id)\n if outer_worker != None:\n outer_worker.close()\n elif parsed_data.data_type == forward_data.DATA_TYPE.HEART_BEAT:\n logger.debug('Recv heartbeat')","sub_path":"tcp_forward/forward_server/data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92229152","text":"\n# find function will find that n paths in the report, will display them and also highlight the critical path of circuit.\n\nfrom sys import argv\n\ndef find(txt,tmp,path):\n formatter = \" {} \\n\"\n setup = open(\"setup.txt\",\"w+\")\n hold = open(\"hold.txt\",\"w+\")\n for line in txt.readlines():\n if 'Path' in line or 'setup' in line or 'hold a' in line:\n text = line.split()\n for i in range(len(text)): \n if text[i] == 'delay' :\n tmp.write(text[i+1])\n if text[i] == 'setup' or text[i] == 'hold':\n tmp.write(formatter.format(text[i])) \n \n tmp.seek(0,0)\n for line in tmp.readlines():\n if 'setup' in line: \n setup.write(formatter.format(line.split()[0]))\n if 'hold' in line: \n hold.write(formatter.format(line.split()[0]))\n \n setup.seek(0,0)\n hold.seek(0,0)\n data_s = setup.readlines()\n data_h = hold.readlines()\n data_s.sort(key = float,reverse = True)\n data_h.sort(key = float,reverse = True)\n converted_data_s = []\n converted_data_h = []\n \n for element in data_s:\n converted_data_s.append(element.strip())\n for element in data_h:\n converted_data_h.append(element.strip()) \n \n print (\"\\n\\n{:<15} {:<15} {:<10}\\n\".format('Path Number','For Setup','For Hold'))\n \n if path == 0 or path > len(data_s):\n for i in range(len(data_s)):\n print (\"{:<15} {:<15} {:<10}\".format(i+1,converted_data_s[i],converted_data_h[i]))\n else: \n for i in range(path): \n print (\"{:<15} {:<15} {:<10}\".format(i+1,converted_data_s[i],converted_data_h[i])) \n \n txt.seek(0,0)\n tmp.seek(0,0)\n temp = tmp.readline().split()[0]\n for line in txt.readlines():\n if temp in line:\n print (\"\\n Critical Path: \",line)\n break \n \n setup.close()\n hold.close() \n \nprint (\"Enter the filename:\")\nfilename = input()\nprint (\"Enter the number of path\")\npath = int(input())\ntxt = open(filename)\ntmp = open(\"tmp.txt\",\"w+\")\n\nfind(txt,tmp,path)\n\ntxt.close()\ntmp.close()\n\n","sub_path":"Tools/python/challenge_3.py","file_name":"challenge_3.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"341491446","text":"import math\nimport pylab\nimport pickle\nimport csv\nR = 36.4\nn = 100\nx= 0\nlist = []\ndic = {}\nwhile x < n:\n x += 0.01\n y = 452*(math.cos(x/11)+math.cos(x/8))+R+175\n if not dic.get(y):\n dic[y] = round(x * 100, 1)\n#for key in dic.keys():\n #print (key, dic[key])\n#print(dic)\nfor i in dic.keys():\n list.append(i)\n#print(list)\n\nwith open('ttt.csv', 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n for i in dic.items():\n csv_writer.writerow([i])\n\n\n\n#import math\n## Импортируем один из пакетов Matplotlib\n#import pylab\n## Импортируем пакет со вспомогательными функциями\n#from matplotlib import mlab\n#\n#\n## Рисуем график функции y = sin(x)\n#def func(x):\n# \"\"\"\n# sin (x)\n# \"\"\"\n# return 452*(math.cos(x/11)+math.cos(x/8))+R+175\n#\n#\n## Указываем X наименьее и наибольшее\n#xmin = 1\n#xmax = 10000.0\n#\n## Шаг между точками\n#dx = 1\n#\n## Создадим список координат по оси\n## X на отрезке [-xmin; xmax], включая концы\n#xlist = mlab.frange(xmin, xmax, dx)\n#\n## Вычислим значение функции в заданных точках\n#ylist = [func(x) for x in xlist]\n#\n## Нарисуем одномерный график\n#pylab.plot(xlist, ylist)\n#\n## Покажем окно с нарисованным графиком\n#pylab.show()#","sub_path":"kal.py","file_name":"kal.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381201540","text":"from flask import Flask\r\nfrom flask import request,jsonify\r\nimport json\r\nfrom getOpenid import getOpenid\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root@localhost/xiaochengxu'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\r\ndb = SQLAlchemy(app)\r\n\r\n#定义用户表模型\r\nclass User(db.Model):\r\n __tablename__='user'\r\n id = db.Column(db.Integer, primary_key=True)\r\n openid = db.Column(db.String(80), unique=True)\r\n nickName= db.Column(db.String(120), unique=False)\r\n avatarUrl=db.Column(db.String(255),nullable=False)\r\n gender=db.Column(db.Integer,nullable=False)\r\n\r\n def __init__(self,openid,nickName,avatarUrl,gender):\r\n self.openid=openid\r\n self.nickName=nickName\r\n self.avatarUrl=avatarUrl\r\n self.gender=gender\r\n\r\n#定义用户评论模型\r\nclass Commenttable(db.Model):\r\n __tablename__='commenttable'\r\n id=db.Column(db.Integer,primary_key=True)\r\n panoid=db.Column(db.Integer,nullable=False)\r\n openid = db.Column(db.String(80),nullable=False)\r\n comment=db.Column(db.String(255),nullable=False)\r\n time=db.Column(db.DATETIME,nullable=False)\r\n\r\n def __init__(self,panoid,openid,comment,time):\r\n self.panoid=panoid\r\n self.openid=openid\r\n self.comment=comment\r\n self.time=time\r\n\r\n#定义全景存储模型\r\nclass Panos(db.Model):\r\n __tablename__='panos'\r\n panoid=db.Column(db.Integer,primary_key=True)\r\n title=db.Column(db.String(255),nullable=False)\r\n panolink=db.Column(db.String(255),nullable=False)\r\n author=db.Column(db.String(80),nullable=False)\r\n uploadtime=db.Column(db.DATETIME,nullable=False)\r\n\r\n def __init__(self,panoid,title,panolink,author,uploadtime):\r\n self.panoid=panoid\r\n self.title=title\r\n self.panolink=panolink\r\n self.author=author\r\n self.uploadtime=uploadtime\r\n\r\n\r\n#返回用户唯一标识openid\r\n@app.route('/getcode')\r\ndef getcode():\r\n code=request.args.get('code')\r\n res=getOpenid(code)\r\n return res['openid']\r\n\r\n#获取用户评论信息\r\n@app.route('/comment')\r\ndef userComment():\r\n comment=request.args.get('comment')\r\n openid=request.args.get('openid')\r\n print(comment,openid)\r\n return 'ok'\r\n\r\n\r\n\r\n@app.route('/userInfo')\r\ndef userInfo():\r\n res = request.args.get('userInfo')\r\n openid=request.args.get('openid')\r\n booluser = User.query.filter_by(openid=openid).first()\r\n print(\"准备插入数据库\")\r\n #如果用户表不存在该用户,则添加至数据表user当中\r\n if not booluser:\r\n res = json.loads(res)\r\n nickName=res['nickName']\r\n avatarUrl=res['avatarUrl']\r\n gender=res['gender']\r\n user=User(openid,nickName,avatarUrl,gender)\r\n db.session.add(user)\r\n db.session.commit()\r\n print(\"插入数据库成功\")\r\n print(res)\r\n return \"false\"\r\n\r\n@app.route('/panoInfo')\r\ndef panoInfo():\r\n panos = Panos.query.all()\r\n panoInfos = []\r\n for pano in panos:\r\n panoInfo = {\r\n \"panoid\": pano.panoid,\r\n \"title\": pano.title,\r\n \"panolink\": pano.panolink,\r\n \"author\": pano.author,\r\n \"uploadtime\": pano.uploadtime\r\n }\r\n panoInfos.append(panoInfo)\r\n return jsonify(panoInfos)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176809884","text":"import os\nimport os.path as osp\nimport sys\n\nimport torch\nimport torch.utils.data as data\n\nimport numpy as np\nfrom PIL import Image, ImageOps\nimport glob\nimport random\nimport cv2\nimport torchvision\nfrom torchvision import transforms\nfrom torchvision.transforms import Compose, ToTensor, Normalize, ConvertImageDtype\nfrom glob import glob\n# By Ziteng Cui, cui@mi.t.u-tokyo.ac.jp\nrandom.seed(1143)\n\n\n# input: low light image path\n# return: train image ids, test image ids\n\ndef populate_train_list(images_path, mode='train'):\n\n train_list = [os.path.basename(f) for f in glob(os.path.join(images_path, '*.jpg'))]\n train_list.sort()\n\n if mode == 'train':\n random.shuffle(train_list)\n\n return train_list\n\n\nclass adobe5k_loader(data.Dataset):\n\n def __init__(self, images_path, mode='train', normalize=False):\n self.train_list = populate_train_list(images_path, mode)\n # self.h, self.w = int(img_size[0]), int(img_size[1])\n self.mode = mode # train or test\n self.data_list = self.train_list\n self.low_path = images_path\n self.high_path = images_path.replace('Inputs_jpg', 'Experts_C')\n self.normalize = normalize\n self.resize = True\n #self.image_size = 1200\n #self.image_size_w = 900\n self.image_size = 600\n self.image_size_w = 450\n #self.test_resize = True\n print(\"Total examples:\", len(self.data_list))\n #print(\"Total testing examples:\", len(self.test_list))\n # self.transform_train = transforms.Compose()\n\n def FLIP_aug(self, low, high):\n if random.random() > 0.5:\n low = cv2.flip(low, 0)\n high = cv2.flip(high, 0)\n\n if random.random() > 0.5:\n low = cv2.flip(low, 1)\n high = cv2.flip(high, 1)\n\n return low, high\n\n\n def get_params(self, low):\n self.h, self.w = low.shape[0], low.shape[1] # 900, 1200\n #print(self.h, self.w)\n #self.crop_height = random.randint(self.h / 2, self.h) # random.randint(self.MinCropHeight, self.MaxCropHeight)\n #self.crop_width = random.randint(self.w / 2, self.w) # random.randint(self.MinCropWidth,self.MaxCropWidth)\n self.crop_height = self.h / 2 #random.randint(self.MinCropHeight, self.MaxCropHeight)\n self.crop_width = self.w / 2 #random.randint(self.MinCropWidth,self.MaxCropWidth)\n\n i = random.randint(0, self.h - self.crop_height)\n j = random.randint(0, self.w - self.crop_width)\n return i, j\n\n def Random_Crop(self, low, high):\n self.i, self.j = self.get_params(low)\n self.i, self.j = int(self.i), int(self.j)\n #if random.random() > 0.5:\n low = low[self.i: self.i + int(self.crop_height), self.j: self.j + int(self.crop_width)]\n high = high[self.i: self.i + int(self.crop_height), self.j: self.j + int(self.crop_width)]\n return low, high\n\n def __getitem__(self, index):\n img_id = self.data_list[index]\n \n #data_lowlight = Image.open(osp.join(self.low_path, img_id))\n data_lowlight = cv2.imread(osp.join(self.low_path, img_id), cv2.IMREAD_UNCHANGED)\n data_highlight = cv2.imread(osp.join(self.high_path, img_id), cv2.IMREAD_UNCHANGED)\n\n if data_lowlight.shape[0] >= data_lowlight.shape[1]:\n data_lowlight = cv2.transpose(data_lowlight)\n data_highlight = cv2.transpose(data_highlight)\n\n if self.resize:\n data_lowlight = cv2.resize(data_lowlight, (self.image_size, self.image_size_w))\n data_highlight = cv2.resize(data_highlight, (self.image_size, self.image_size_w))\n #print(data_lowlight.shape)\n if self.mode == 'train': #data augmentation\n data_lowlight, data_highlight = self.FLIP_aug(data_lowlight, data_highlight)\n #data_lowlight, data_highlight = self.Random_Crop(data_lowlight, data_highlight)\n #print(data_lowlight.shape)\n data_lowlight = (np.asarray(data_lowlight[..., ::-1]) / 255.0)\n data_highlight = (np.asarray(data_highlight[..., ::-1]) / 255.0)\n\n data_lowlight = torch.from_numpy(data_lowlight).float() # float32\n data_highlight = torch.from_numpy(data_highlight).float() # float32\n\n return data_lowlight.permute(2, 0, 1), data_highlight.permute(2, 0, 1)\n\n def __len__(self):\n return len(self.data_list)\n\n\nif __name__ == \"__main__\":\n os.environ['CUDA_VISIBLE_DEVICES'] = '3'\n train_path = '/home/czt/DataSets/five5k_dataset/Inputs_jpg'\n test_path = '/home/czt/DataSets/five5k_dataset/UPE_testset/Inputs_jpg'\n test_dataset = adobe5k_loader(train_path, mode='train')\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=1,\n pin_memory=True)\n for iteration, imgs in enumerate(test_loader):\n print(iteration)\n print(imgs[0].shape)\n print(imgs[1].shape)\n low_img = imgs[0]\n high_img = imgs[1]\n # visualization(low_img, 'show/low', iteration)\n # visualization(high_img, 'show/high', iteration)","sub_path":"IAT_enhance/data_loaders/mit5k.py","file_name":"mit5k.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"326938737","text":"from switchboard import *\nimport json\nimport serverDB\nfrom bson import json_util\n\nmessageList = {}\nmessageNum = 0\nsocketList = dict()\nappSocketList = dict()\n\n\ndef sentBoardInfoReq(nodeid):\n Msg = {'message_type': SB_BOARD_INFO_REQ}\n Msg['node'] = nodeid\n Msg['flags'] = 0\n return Msg\n\n\ndef sentStateChangeReq(nodeid, sbtype, self):\n Msg = {'message_type': SB_STATE_CHANGE_REQ}\n Msg['node'] = nodeid\n if(sbtype == SB_TYPE_4X4):\n switch1 = self.get_argument('switch1', default=None)\n switch2 = self.get_argument('switch2', default=None)\n switch3 = self.get_argument('switch3', default=None)\n switch4 = self.get_argument('switch4', default=None)\n Msg['sbType'] = SB_TYPE_4X4\n Msg['switch1'] = SW_TURN_ON if (switch1 == 'on') else SW_TURN_OFF\n Msg['switch2'] = SW_TURN_ON if (switch2 == 'on') else SW_TURN_OFF\n Msg['switch3'] = SW_TURN_ON if (switch3 == 'on') else SW_TURN_OFF\n Msg['switch4'] = SW_TURN_ON if (switch4 == 'on') else SW_TURN_OFF\n Msg['switch5'] = SW_DONT_CARE\n Msg['switch6'] = SW_DONT_CARE\n Msg['switch7'] = SW_DONT_CARE\n Msg['switch8'] = SW_DONT_CARE\n return Msg\n\n\ndef sentStateChangeReqForApp(nodeid, sbtype, message):\n Msg = {'message_type': SB_STATE_CHANGE_REQ}\n Msg['node'] = nodeid\n if(sbtype == SB_TYPE_4X4):\n switch1 = message['switch1']\n switch2 = message['switch2']\n switch3 = message['switch3']\n switch4 = message['switch4']\n Msg['sbType'] = SB_TYPE_4X4\n Msg['switch1'] = SW_TURN_ON if (switch1 == 'on') else SW_TURN_OFF\n Msg['switch2'] = SW_TURN_ON if (switch2 == 'on') else SW_TURN_OFF\n Msg['switch3'] = SW_TURN_ON if (switch3 == 'on') else SW_TURN_OFF\n Msg['switch4'] = SW_TURN_ON if (switch4 == 'on') else SW_TURN_OFF\n Msg['switch5'] = SW_DONT_CARE\n Msg['switch6'] = SW_DONT_CARE\n Msg['switch7'] = SW_DONT_CARE\n Msg['switch8'] = SW_DONT_CARE\n return Msg\n\n\ndef informWebClient(message):\n #TODO : Handle App socket connections\n mid = message['mid']\n global socketList\n print(\"mid is %d:\" %mid)\n print(messageList)\n if mid in messageList:\n nodeList = serverDB.findHub(int(messageList[mid]['addr']))\n nodeList['serverPush'] = 'stateChange'\n nodeList['appSocketID'] = '0'\n if '_id' in nodeList:\n del nodeList['_id']\n msg = json.dumps(nodeList, default=json_util.default)\n messageList[mid]['value'] = 1\n hubAddr = int(messageList[mid]['addr'])\n if hubAddr in socketList:\n print(socketList[hubAddr])\n print(\"sending to browsers:%d\" % len(socketList[hubAddr]))\n if len(socketList[hubAddr]) != 0:\n socketList[hubAddr][0].session.broadcast(socketList[hubAddr], msg)\n del messageList[mid]\n else:\n print(\"Not found in socket list\")\n print(hubAddr)\n else:\n print(\"NO ACTIVE REQUEST FOUND FOR RESPONSE\")\n\n\ndef processMsgFromClient(connection, clientMessage):\n clientMessage = json.loads(clientMessage)\n if clientMessage['message_type'] == SB_BOARD_INFO_RSP:\n print (\"Info Response received\")\n serverDB.updateNode(connection, clientMessage)\n informWebClient(clientMessage)\n elif clientMessage['message_type'] == SB_STATE_CHANGE_RSP:\n print (\"State change Response received\")\n connection.write_message(msg)\n elif clientMessage['message_type'] == SB_DEVICE_READY_NTF:\n print (\"Hub is up and running\")\n print(\"Message received %s\\n\" %clientMessage)\n serverDB.addHub(connection, clientMessage)\n elif clientMessage['message_type'] == SB_DEVICE_INFO_NTF:\n print(\"hub addr:%s\" % format(clientMessage['hubAddr'], '#010x'))\n serverDB.addHubStates(clientMessage, connection)\n","sub_path":"serverMethods.py","file_name":"serverMethods.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500652094","text":"#!/usr/bin/env python\nimport socket\n\nip = \"0.0.0.0\"\nportIn = 11000\nsocketServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nstatus = 'ON'\n\ntry:\n socketServer.bind((ip, portIn))\n socketServer.listen(10)\n while True:\n print(\"Listennig aplication...\")\n conn, addr = socketServer.accept()\n print(\"Connected\", addr[0])\n\n comand = conn.recv(1024)\n\n if not comand:\n break\n\n else :\n print('#######################################')\n print('Temperatura - ' + comand[3:].decode())\n print('#######################################')\n conn.close()\n\n\n\nexcept Exception as erro:\n print(erro)\n socketServer.close()","sub_path":"Servidor - Socket/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"440565207","text":"from django.shortcuts import render\n\nfrom django.http import HttpResponse\n\nfrom .models import Question\nfrom .models import TestResults\n\nfrom django.template import loader\n\nimport csv\n\nsop = [\n {\n \"m\":2.27,\n \"s\":2.06\n },\n {\n \"m\":7.73,\n \"s\":2.88\n },\n {\n \"m\":9.23,\n \"s\":4.59\n },\n {\n \"m\":10.36,\n \"s\":3.41\n },\n {\n \"m\":12.47,\n \"s\":4.23\n },\n {\n \"m\":8.04,\n \"s\":3.29\n },\n {\n \"m\":7.17,\n \"s\":4.05\n },\n]\n\ndef man(request):\n latest_question_list = Question.objects.filter(question_variant=\"man\").order_by('id')\n context = {'latest_question_list': latest_question_list}\n return render(request, 'polls/man.html', context)\n\ndef woman(request):\n latest_question_list = Question.objects.filter(question_variant=\"woman\").order_by('id')\n context = {'latest_question_list': latest_question_list}\n return render(request, 'polls/woman.html', context)\n\ndef stat(request):\n results = TestResults.objects.all().order_by('id')\n context = {'stat': results}\n return render(request, 'polls/stat.html', context)\n\ndef fill_man(request):\n Question.objects.filter(question_variant=\"man\").delete()\n file_obj = open(\"man.txt\")\n id = 0\n for row in file_obj:\n id += 1\n Question.objects.create(question_id=id, question_text=row, question_variant=\"man\")\n pass\n\ndef fill_woman(request):\n Question.objects.filter(question_variant=\"woman\").delete()\n file_obj = open(\"woman.txt\")\n id = 0\n for row in file_obj:\n id += 1\n Question.objects.create(question_id=id, question_text=row, question_variant=\"woman\")\n pass\n\ndef fill(request):\n fill_woman(request)\n fill_man(request)\n pass\n\ndef test_man(request):\n y = request.GET.getlist(\"question[]\")\n\n scales = [\n {\n \"id\":1,\n \"no\":[2,4,6,21,23,33,38,47,54,79,83,87],\n \"yes\":[13,30,32]\n },\n {\n \"id\":2,\n \"no\":[1,10,11,55,61,86,93],\n \"yes\":[11,22,34,41,44,50,53,59,80,88,91]\n },\n {\n \"id\":3,\n \"no\":[95],\n \"yes\":[14,18,22,26,27,31,34,35,43,46,59,60,62,63,64,67,74,81,91]\n },\n {\n \"id\":4,\n \"no\":[24,76],\n \"yes\":[3,6,9,12,16,27,28,37,39,51,52,58,68,73,90,91,92,96,98]\n },\n {\n \"id\":5,\n \"no\":[15,40,75,82],\n \"yes\":[3,5,16,17,25,37,42,45,48,49,51,65,66,70,71,72,77,89,94,97]\n },\n {\n \"id\":6,\n \"no\":[29],\n \"yes\":[7,19,20,36,49,56,57,69,70,71,78,84,89,94]\n },\n {\n \"id\":7,\n \"no\":[55,61,86],\n \"yes\":[18,26,31,34,35,42,43,44,48,52,62,63,64,67,74,91,94]\n },\n ]\n results = []\n idx = 0\n for scale in scales:\n sum = 0\n all = len(scale[\"yes\"] + scale[\"no\"])\n for i in scale[\"yes\"]:\n if str(i) in y:\n sum += 1\n for i in scale[\"no\"]:\n if str(i) not in y:\n sum += 1 \n t = round(10*(sum - sop[idx][\"m\"])/sop[idx][\"s\"] + 50)\n results.append(t)\n idx += 1\n TestResults.objects.create(\n scale1 = results[0],\n scale2 = results[1],\n scale3 = results[2],\n scale4 = results[3],\n scale5 = results[4],\n scale6 = results[5],\n scale7 = results[6],\n gender = \"мужчина\"\n )\n\n return render(request, 'polls/result.html', {\"results\": results})\n pass\n\ndef test_woman(request):\n y = request.GET.getlist(\"question[]\")\n scales = [\n {\n \"id\":1,\n \"no\":[2,4,8,21,33,38,54,79,83,87],\n \"yes\":[13,30,32,]\n },\n {\n \"id\":2,\n \"no\":[10,11,86,93],\n \"yes\":[1,11,22,34,41,44,50,53,55,59,61,80,91]\n },\n {\n \"id\":3,\n \"no\":[95],\n \"yes\":[14,18,22,26,27,31,34,35,43,59,60,62,63,64,67,74,81,91]\n },\n {\n \"id\":4,\n \"no\":[24],\n \"yes\":[3,6,9,12,27,28,39,51,52,58,68,73,75,76,90,91,92,96,98,99]\n },\n {\n \"id\":5,\n \"no\":[15,40],\n \"yes\":[3,5,16,17,25,42,45,48,49,51,65,66,71,77,82,85,89,94,101,102,103,104]\n },\n {\n \"id\":6,\n \"no\":[29],\n \"yes\":[7,19,20,36,49,56,57,69,70,71,78,84,89,94]\n },\n {\n \"id\":7,\n \"no\":[93],\n \"yes\":[1,3,7,11,25,28,31,35,43,48,53,58,61,63,64,65,66,79,98,99,102]\n },\n ]\n results = []\n idx = 0\n for scale in scales:\n sum = 0\n all = len(scale[\"yes\"] + scale[\"no\"])\n for i in scale[\"yes\"]:\n if str(i) in y:\n sum += 1\n for i in scale[\"no\"]:\n if str(i) not in y:\n sum += 1 \n t = round(10*(sum - sop[idx][\"m\"])/sop[idx][\"s\"] + 50)\n results.append(t)\n idx += 1\n TestResults.objects.create(\n scale1 = results[0],\n scale2 = results[1],\n scale3 = results[2],\n scale4 = results[3],\n scale5 = results[4],\n scale6 = results[5],\n scale7 = results[6],\n gender = \"женщина\"\n )\n\n return render(request, 'polls/result.html', {\"results\": results})\n pass","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532446236","text":"def main():\n \"\"\"git\n Added docstring\n :return:\n \"\"\"\n print(\"hello world11\")\n a = input(\"cum te cheama?\")\n print(\"salut \" + a)\n b = input(\"read a number\")\n b = int(b)\n square = b*b\n print(\"patratul este \", square)\n\nmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"193399959","text":"#Class BinarySearch \nclass BinarySearch(list):\n\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n for i in range(self.a):\n list.append(self, self.b)\n self.b += b\n\n self.length = self.a\n\n def search(self, value):\n find = False\n end = (self.length - 1)\n start = 0\n count = 0\n try:\n index = self.index(value)\n find = True\n except ValueError:\n index = -1\n find\n while start <= end and value != self[end] and find:\n mid_item = (start + end) // 2\n mid_value = self[mid_item]\n if value > mid_value:\n start = mid_item + 1\n count += 1\n elif value < mid_value:\n end = mid_item - 1\n count += 1\n else:\n count += 1\n break\n return {'count': count, 'index': index}\n\n","sub_path":"Binary_search.py","file_name":"Binary_search.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559759195","text":"# -*- coding:utf-8 -*-\n\nRESOURCES_BASE_PATH = './resources/record'\n\n# ==========================================\n\n# 屏蔽群 例:[12345678, 87654321]\nblockGroupNumber = []\n# 服务器配置\nhost = 'http://127.0.0.1'\nport = 8888\n\nmax_info_length = 341\n\n# ==========================================\n\nimport util.db.sql as op\nimport time\nfrom iotbot import GroupMsg, FriendMsg\n\n\ntry:\n import ujson as json\nexcept:\n import json\n\n\ndef receive_group_msg(ctx: GroupMsg):\n msg_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ctx.MsgTime))\n print(f'{ctx.FromNickName}在{msg_time}的时候,在群{ctx.FromGroupId}发了一个类型是{ctx.MsgType}的消息,内容为:')\n print(f'{ctx.Content}')\n op.insert_group_msg(ctx)\n\n\ndef receive_friend_msg(ctx: FriendMsg):\n print(f'{ctx.FromUin}向{ctx.ToUin}发了一个类型是{ctx.MsgType}的消息,内容为:')\n print(f'{ctx.Content}')\n op.insert_friend_msg(ctx)\n","sub_path":"plugins/bot_record.py","file_name":"bot_record.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"372551095","text":"import csv\nfrom ctypes import *\nfrom datetime import datetime as dt\nfrom struct import *\nclass inputdata:\n def __init__(self): #the datatype of input\n self.datetime = 0 #datetime Y/M/D H:M:S\n self.item = 0 #the number of item\n self.name=0\n self.out=0 #if it is a arrive item out=0,if it is a shipped item out=1\nclass caserack:\n def __init__(self): #the datatype of input\n self.space = [] #datetime Y/M/D H:M:S\n self.name=[]\n self.num=[]\n self.item_name=0\nArriveDate=[]#keep the ArriveDate data\nArriveItem=[]#keep the ArriveItem data\nArriveName=[]#keep the ArriveTime data\nShippedDate=[]#keep the ShippedDate data\nShippedItem=[]#keep the ShippedItem data\nShippedName=[]#keep the ShippedTime data\nf=open('input2.csv','r',encoding='big5')\nfor row in csv.DictReader(f):\n ArriveDate.append(row['ArriveDate'])#read ArriveDate\n ArriveItem.append(row['ArriveItem'])#read ArriveItem\n ArriveName.append(row['Arrivelname'])#read ArriveTime\n ShippedDate.append(row['ShippedDate'])#read ShippedDate\n ShippedItem.append(row['ShippedItem'])#read ShippedItem\n ShippedName.append(row['Shippedname'])#read ShippedTime\nf.close()\nprint(len(ArriveDate))\nprint(len(ArriveItem))\nprint(len(ArriveName))\nprint(len(ShippedDate))\nprint(len(ShippedItem))\nprint(len(ShippedName))\nADate=[] #The argument that we keep the value of converting str to Date type(ArriveDate was str when we read from the file)\nAllData=[] #Keep all of the data\nSDate=[] #The argument that we keep the value of converting str to Date type(ShippedDate was str when we read from the file)\nfor x in range(0,len(ArriveDate),1): #this for loop is doing about converting the str type into Datetime Type(ArriveDate)\n if ArriveDate[x].strip(): #check if there is Data int the arrary\n ADate.append(dt.strptime(ArriveDate[x], \"%Y-%m-%d %H:%M:%S\"))\n AllData.append(inputdata()) #append alldata\n AllData[x].datetime=ADate[x] #put arrivedate into alldata\n AllData[x].item=int(ArriveItem[x]) #put arrive item into alldata\n AllData[x].name=ArriveName[x]\n AllData[x].out=0 #know if the item is come or out\ncount=0\nprint(len(AllData))\nfor x in range(len(AllData),len(ShippedDate)+len(AllData),1): #this for loop is doing about converting the str type into Datetime Type(ShippedDate)\n if ShippedDate[count].strip():\n SDate.append(dt.strptime(ShippedDate[count], \"%Y-%m-%d %H:%M:%S\"))\n AllData.append(inputdata())\n AllData[x].datetime=SDate[count]\n AllData[x].item=int(ShippedItem[count])\n AllData[x].name=ShippedName[count]\n AllData[x].out=1\n count+=1 \nprint(len(AllData))\nfor x in range(0,len(AllData),1):\n print(AllData[x].datetime)\n print(AllData[x].out)\nhold=inputdata() #the argument we need in sort cause we need to hold and change\nfor a in range(0,len(AllData),1): #this loop is doing sorting the data's type\n for x in range(a,len(AllData),1):\n if AllData[x].datetime= 10000):\n break\n while True:\n\n time.sleep(5)\n if len(mdict) > 0:\n print(len(mdict))\n else:\n #check if only one consumer processed a given message; total of 10k\n print(0)\n ccnt = 0\n for v in tdict.values():\n ccnt += v\n print(ccnt)\n\nclass Consumer(threading.Thread):\n daemon = True\n\n def run(self):\n th_name = threading.currentThread().getName()\n\n client = KafkaClient(hosts=kafka_hosts)\n topic = client.topics[topic_name]\n\n consumer = topic.get_balanced_consumer(consumer_group='group1', auto_commit_enable=True, zookeeper_connect=kafka_zookeeper)\n\n while True:\n try:\n message = consumer.consume(block=True)\n\n txt = message.value.decode(\"utf-8\")\n\n #keep track of not only that message was processed, but also by how many consumers (should be only one to one)\n lock2.acquire()\n if mdict.has_key(txt):\n mdict.pop(txt)\n\n cnt = tdict.get(txt)\n\n if cnt is None:\n tdict[txt]=1\n else:\n tdict[txt]=(cnt+1)\n\n lock2.release()\n\n print (\"Consumer %s; Offset %s; messsage %s\" % (th_name, message.offset, txt))\n\n except Exception as e:\n print(e)\n logging.error(traceback.format_exc())\n consumer = topic.get_balanced_consumer(consumer_group='group1', zookeeper_connect=kafka_zookeeper)\ndef main():\n threads = [\n Producer(),\n Consumer(),\n Consumer(),\n Consumer(),\n Consumer(),\n Consumer(),\n Consumer(),\n Consumer(),\n Consumer(),\n Consumer(),\n Consumer(),\n Consumer()\n ]\n\n for t in threads:\n t.start()\n\n time.sleep(1000000)\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',\n level=logging.INFO\n )\n main()\n","sub_path":"tests/pykafka/test_issue_527.py","file_name":"test_issue_527.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"526672866","text":"import socket\nimport threading \n\nall_connections = []\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #for have not error about port when we want after relise that port\ns.bind(('0.0.0.0', 8000))\ns.listen()\n\n\ndef client_recv(conn, my_id):\n\tmy_name = None\n\twhile True:\n\t\tdata = conn.recv(1024)\n\t\tif len(data) == 0:\n\t\t\tbreak\n\t\tprint('{0}: {1}'.format(my_id, [data]))\n\t\tif my_name == None:\n\t\t\tmy_name = data\n\t\t\tcontinue\t\n\t\tfor c in all_connections:\n\t\t\tif c == conn:\n\t\t\t\tcontinue\n\t\t\tc.sendall(my_name + b':' + data)\n\tprint('{0} closed'.format(my_id))\n\t\t# conn.sendall(input().decode())\n\tconn.close()\n\n\n\nnum_of_clients = 0\nwhile True:\n\tconn, addr = s.accept()\n\tnum_of_clients +=1\n\tall_connections.append(conn)\n\tprint('new: ', addr, num_of_clients)\n\tt = threading.Thread(None, client_recv, None, (conn, num_of_clients))\n\tt.start()\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"283112524","text":"\"\"\"\nGiven an integer N, return the total number self divisible numbers that\nare strictly less than N (starting from one).\nNote: A self divisible number if a number that is divisible by all of\nits digits.\n\nEx: Given the following value of N...\n\nN = 17, return 12 because 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15 are all\nself divisible numbers.\n\"\"\"\nfrom typing import Generator\n\n\ndef self_divisible_numbers(limit: int) -> Generator[int, None, None]:\n \"\"\"Returns self divisible number upto limit\"\"\"\n for num in range(1, limit):\n digits = (int(digit) for digit in str(num))\n if all(digit != 0 and num % digit == 0 for digit in digits):\n yield num\n\n\ndef main() -> None:\n \"\"\"Main function\"\"\"\n print(len(list(self_divisible_numbers(17))))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"p111_divisible_digits.py","file_name":"p111_divisible_digits.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"526233484","text":"import pytest\nfrom collections import Counter\nimport hand as h\n\ndef test_init():\n hand = h.Hand('')\n assert len(hand.deck) == 60\n\ndef test_process():\n hand = h.Hand(\"tests/testdeck.txt\")\n assert len(hand.deck) == 14\n assert hand.deck[0] == \"testcard1\"\n assert hand.deck[4] == \"testcard3\"\n assert hand.deck[10] == \"testcard5\"\n assert hand.decklist[\"testcard2\"] == 2\n assert hand.decklist[\"testcard4\"] == 4\n hand = h.Hand(\"tests/mini_testdeck.txt\")\n assert len(hand.deck) == 6\n assert hand.deck[0] == \"card1\"\n assert hand.decklist[\"card2\"] == 2\n\ndef test_new_hand():\n hand = h.Hand(\"tests/testdeck.txt\")\n hand.new_hand()\n assert hand.size == 7\n assert 2 <= len(hand.cards) <= 5 #the types of cards selected\n hand.new_hand(14)\n assert hand.size == 14\n assert len(hand.cards) == 5 #All cards selected\n assert \"testcard1\" in hand.cards\n assert hand.card_counts[\"testcard3\"] == 3\n\ndef test_generate_subset_hands():\n hand = h.Hand(\"tests/testdeck.txt\")\n assert hand.size == 0\n assert hand.num_subsets == 0\n hand.new_hand(1)\n hand.generate_subset_hands(1)\n assert hand.size == 0\n assert hand.num_subsets == 1\n\n hand.new_hand(7)\n hand.generate_subset_hands(1)\n assert hand.size == 6\n assert hand.num_subsets == 7\n\n hand.new_hand(7)\n hand.generate_subset_hands(2)\n assert hand.size == 5\n assert hand.num_subsets == 21\n\ndef test_set_hand():\n hand = h.Hand(\"tests/testdeck.txt\")\n newHand = ['testcard2','testcard3']\n hand.set_hand(newHand)\n assert hand.card_counts['testcard3'] == 1\n assert hand.card_counts['testcard2'] == 1\n newHand.append('testcard2')\n hand.set_hand(newHand)\n assert hand.card_counts['testcard3'] == 1\n assert hand.card_counts['testcard2'] == 2\n\ndef test_select_random_remaining():\n hand = h.Hand(\"tests/mini_testdeck.txt\")\n newHand = ['card2', 'card3', 'card3', 'card3', 'card1']\n hand.draw_card(0)\n hand.draw_card(1)\n hand.draw_card(3)\n hand.draw_card(4)\n hand.draw_card(5)\n assert hand.select_random_remaining() == 2\n\n hand.new_hand(0)\n hand.draw_card(1)\n hand.draw_card(2)\n hand.draw_card(3)\n hand.draw_card(4)\n chosenNums = []\n chosenNums.append(hand.select_random_remaining())\n hand.draw_card(chosenNums[0])\n chosenNums.append(hand.select_random_remaining())\n assert Counter(chosenNums) == Counter([0,5])\n\n hand.draw_card(chosenNums[1])\n with pytest.raises(Exception):\n #Test selecting from an empty deck\n hand.select_random_remaining()\n\ndef test_draw_card():\n hand = h.Hand(\"tests/testdeck.txt\")\n hand.draw_card()\n assert hand.size == 1\n hand.new_hand(7)\n hand.draw_card()\n hand.draw_card()\n assert hand.size == 9\n hand.new_hand(11)\n hand.draw_card()\n hand.draw_card()\n hand.draw_card()\n assert hand.size == 14 #We should now have the full deck\n assert hand.card_counts[\"testcard4\"] == 4\n assert hand.card_counts[\"testcard5\"] == 4\n \n #Test specific draw\n hand.new_hand(0)\n hand.draw_card(0)\n assert hand.size == 1\n assert hand.card_counts[\"testcard1\"] == 1\n with pytest.raises(Exception):\n #Test selecting a drawn card\n hand.draw_card(0)","sub_path":"tests/test_hand.py","file_name":"test_hand.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"483193632","text":"'''\nCreated on 2013-10-28\n\n@author: fana\n'''\n#user config\nimport site\nprint (site.getusersitepackages())\n\n#print\nprint (\"Hello World\")\ntax = 12.5 / 100\nprice = 100.50\nprint (price * tax)\n\n#list\na, b = 0, 1\nwhile b < 1000:\n print( b,end=' ')\n a, b = b, a + b\n\n#if statement\nx = int(input(\"Please enter an integer: \"))\nif x < 0:\n x = 0\n print('Negative changed to zero')\nelif x == 0:\n print('Zero')\nelif x == 1:\n print('Single')\nelse:\n print('More')\n \n# loop\nwords = ['cat', 'window', 'defenestrate']\nfor w in words:\n print(w, len(w))\n \n \na = ['Mary', 'had', 'aaa', 'little', 'lamb']\nfor i in range(2, len(a), 2):\n print(i, a[i])\n \nprint(len(range (2,2)))\n\n# break \nfor n in range(2, 10):\n for x in range(2, n):\n if n % x == 0:\n print(n, 'equals', x, '*', n//x)\n break\n else:\n # loop fell through without finding a factor\n print(n, 'is a prime number')\n\n# continue\nfor num in range(2, 10):\n if num % 2 == 0:\n print(\"Found an even number\", num)\n continue\n print(\"Found a number\", num)\n \n\n","sub_path":"pythonsample/src/com/fan/sample/HelloWorld.py","file_name":"HelloWorld.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"71342728","text":"import serial\nfrom pythonosc.udp_client import SimpleUDPClient\n\n\nclass ArduinoOscInput:\n def __init__(\n self,\n ip=\"127.0.0.1\",\n osc_address=\"/arduino/sensor1\",\n osc_port=51593,\n serial_device=\"/dev/tty.usbmodem14201\",\n baud_rate=19200,\n datalog_file=None,\n ):\n self.osc_client_ip = ip\n self.osc_address = osc_address\n self.osc_port = osc_port\n self.serial_device = serial_device\n self.baud_rate = baud_rate\n\n def start(self):\n print(\n f\"OSC UDP client sending to {self.osc_client_ip} on port {self.osc_port}, address: {self.osc_address}\"\n )\n print(\n f\"Connecting to serial at {self.serial_device} with baud {self.baud_rate}\"\n )\n self.osc_client = SimpleUDPClient(self.osc_client_ip, self.osc_port)\n self.ser = serial.Serial(\n self.serial_device, baudrate=self.baud_rate, timeout=None\n )\n self.ser.flushInput()\n\n while True:\n try:\n ser_bytes = self.ser.readline()\n # print(ser_bytes)\n decoded_bytes = float(ser_bytes[0 : len(ser_bytes) - 2].decode(\"utf-8\"))\n # Send a float through OSC\n self.osc_client.send_message(self.osc_address, decoded_bytes)\n print(decoded_bytes)\n # with open(\"test_data.csv\",\"a\") as f:\n # writer = csv.writer(f,delimiter=\",\")\n # writer.writerow([time.time(),decoded_bytes])\n except Exception as e:\n print(f\"Keyboard interrupt!: {e}\" )\n break\n\n\nif __name__ == \"__main__\":\n app = ArduinoOscInput()\n app.start()\n","sub_path":"python-osc-client/python-osc-client.py","file_name":"python-osc-client.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"296961730","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views\n\napp_name = 'api'\n\nrouter = DefaultRouter()\nrouter.register(r'tasks', views.TaskViewSet, basename='task')\nrouter.register(r'tasks', views.TaskGenericViewSet)\nrouter.register(r'tasks', views.TaskModelViewSet)\n\n\nurlpatterns = [\n # version 1\n path('tasks/', views.task_list),\n path('tasks/', views.task_detail),\n \n # version 2 using api_view()\n path('v2/tasks/', views.task_list_v2),\n path('v2/tasks/', views.task_detail_v2),\n \n # version 3 using APIView()\n path('v3/tasks/', views.TaskList.as_view()),\n path('v3/tasks/', views.TaskDetail.as_view()),\n \n # version 4 using mixins classes\n path('v4/tasks/', views.TaskListMixin.as_view()),\n path('v4/tasks/', views.TaskDetailMixin.as_view()),\n \n # version 5 using ViewSet\n path('v5/', include(router.urls)),\n \n # version 6 using GenericViewSet\n path('v6/', include(router.urls)),\n \n # version 7 using GenericViewSet\n path('v7/', include(router.urls)),\n \n \n \n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"374139968","text":"import metric\nimport matplotlib.pyplot as plt\n\ndef read_file(filename):\n results = []\n with open(filename) as finn:\n for line in finn:\n split = line.split(\",\")\n results.append((int(split[0]), int(split[1]), float(split[2])))\n return results\n\n\nif __name__ == \"__main__\":\n c_a = metric.CUMM_ACCURACY\n a = metric.ACCURACY\n\n results = read_file(\"results/DBN_BPIC15_1_day.csv\")\n c_a_results = c_a.calculate(results)\n a_result = a.calculate(results)\n\n plt.plot(range(len(c_a_results)), c_a_results, label=\"update-retain (day)\")\n plt.show()\n print(\"Accuracy:\", a_result)","sub_path":"Prediction Testbed/Process_Results.py","file_name":"Process_Results.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"532844838","text":"# Copyright 2021 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport yaml\nimport requests\nimport os\nimport logging\nfrom typing import List, Dict, Set, Any\n\n\nclass BaseRepr:\n \"\"\"\n Base representation from which all other representation objects inherit.\n\n Primarily implements automatic serialization into YAML/YAML-like string formats,\n along with defining other universally used properties.\n\n Args:\n name (`str`):\n Name of the internal object described by this representation.\n \"\"\"\n def __init__(self, name: str):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def is_valid(info: Dict[str, Any], essentials: Set[str]) -> bool:\n \"\"\"\n Check if the src is a valid YAML file to describe a component in Towhee.\n\n Args:\n info (`Dict[str, Any]`):\n The dict loaded from the source file.\n essentials (`Set[str]`):\n The essential keys that a valid YAML file should contain.\n\n Returns:\n (`bool`)\n Return `True` if the src file is a valid YAML file to describe a\n component in Towhee, else `False`.\n \"\"\"\n info_keys = set(info.keys())\n if not isinstance(info, dict) or not essentials.issubset(info_keys):\n logging.error('Info [%s] is not valid, lack attr [%s]', str(info), essentials - info_keys)\n return False\n return True\n\n @staticmethod\n def load_str(string: str) -> List[dict]:\n \"\"\"\n Load the representation(s) information from a YAML file (pre-loaded as string).\n\n Args:\n string (`str`):\n The string pre-loaded from a YAML.\n\n Returns:\n (`List[dict]`)\n The list loaded from the YAML file that contains the representation\n information.\n \"\"\"\n return yaml.safe_load(string)\n\n @staticmethod\n def load_file(file: str) -> List[dict]:\n \"\"\"\n Load the representation(s) information from a local YAML file.\n\n Args:\n file (`str`):\n The file path.\n\n Returns:\n (`List[dict]`)\n The list loaded from the YAML file that contains the representation\n information.\n \"\"\"\n with open(file, 'r', encoding='utf-8') as f:\n return BaseRepr.load_str(f)\n\n @staticmethod\n def load_url(url: str) -> List[dict]:\n \"\"\"\n Load the representation information from a remote YAML file.\n\n Args:\n url (`str`):\n The url points to the remote YAML file.\n\n Returns:\n (`List[dict]`)\n The list loaded from the YAML file that contains the representation\n information.\n \"\"\"\n src = requests.get(url, timeout=5).text\n return BaseRepr.load_str(src)\n\n @staticmethod\n def load_src(file_or_src: str) -> List[dict]:\n \"\"\"\n Load the information for the representation. We support file from local\n file/HTTP/HDFS.\n\n Args:\n file_or_src (`str`):\n The source YAML file or the URL points to the source file or a str\n loaded from source file.\n\n returns:\n (`List[dict]`)\n The YAML file loaded as list.\n \"\"\"\n # If `file_or_src` is a loacl file.\n if os.path.isfile(file_or_src):\n return BaseRepr.load_file(file_or_src)\n # If `file_or_src` from HTTP.\n elif file_or_src.lower().startswith('http'):\n return BaseRepr.load_url(file_or_src)\n # If `file_or_src` is neither a file nor url.\n return BaseRepr.load_str(file_or_src)\n","sub_path":"towhee/dag/base_repr.py","file_name":"base_repr.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"360962980","text":"\nimport sys\nimport re\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\nfrom sklearn.feature_extraction.text import *\nimport numpy as np\n\ndata = []\nlabel = []\nfor line in sys.stdin:\n tmp = line.split('\\t')\n \n if len(tmp) == 4:\n data.append(re.split(\"[, \\.\\n]+\", tmp[2]+tmp[3]))\n \n else:\n data.append(re.split(\"[, \\.\\n]+\", tmp[-1]))\n \n label.append(tmp[1])\n \n#print (data.shape)\ntmp_str = \"\"\ntmp = []\n\nfor entry in data:\n \n tmp_str = \"\"\n for token in entry:\n tmp_str = tmp_str + \" \" + token\n tmp.append(tmp_str)\n \n#print(tmp)\n\ncountVec = CountVectorizer(analyzer=u'word')\nDocMatrix= countVec.fit_transform(tmp)\n\nMNB = MultinomialNB()\nMNB.fit(DocMatrix,label)\ntrain_pred_MNB = MNB.predict(DocMatrix)\nprint(classification_report(label, train_pred_MNB))\n ","sub_path":"HW2/mnb_classifier.py","file_name":"mnb_classifier.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"232722306","text":"from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7\n\n# Importing the Kratos Library\nimport KratosMultiphysics as KM\n\n# Importing the base class\nfrom . import co_simulation_solver_wrapper\n\n# CoSimulation imports\nimport KratosMultiphysics.CoSimulationApplication.co_simulation_tools as cs_tools\n\nclass CoSimulationCoupledSolver(co_simulation_solver_wrapper.CoSimulationSolverWrapper):\n def __init__(self, settings, solver_name):\n super(CoSimulationCoupledSolver, self).__init__(settings, solver_name)\n\n self.solver_wrappers = self.__CreateSolverWrappers()\n self.coupling_sequence = self.__GetSolverCoSimulationDetails()\n\n ### Creating the predictors\n self.predictors_list = cs_tools.CreatePredictors(\n self.settings[\"predictors\"],\n self.solver_wrappers,\n self.echo_level)\n\n ### Creating the coupling operations\n self.coupling_operations_dict = cs_tools.CreateCouplingOperations(\n self.settings[\"coupling_operations\"],\n self.solver_wrappers,\n self.echo_level)\n\n ### Creating the data transfer operators\n self.data_transfer_operators_dict = cs_tools.CreateDataTransferOperators(\n self.settings[\"data_transfer_operators\"],\n self.echo_level)\n\n def Initialize(self):\n for solver in self.solver_wrappers.values():\n solver.Initialize()\n\n for solver in self.solver_wrappers.values():\n solver.InitializeIO(self.solver_wrappers, self.echo_level)\n # we use the Echo_level of the coupling solver, since IO is needed by the coupling\n # and not by the (physics-) solver\n\n super(CoSimulationCoupledSolver, self).Initialize()\n\n for predictor in self.predictors_list:\n predictor.Initialize()\n\n for coupling_operation in self.coupling_operations_dict.values():\n coupling_operation.Initialize()\n\n def Finalize(self):\n for solver in self.solver_wrappers.values():\n solver.Finalize()\n\n for predictor in self.predictors_list:\n predictor.Finalize()\n\n for coupling_operation in self.coupling_operations_dict.values():\n coupling_operation.Finalize()\n\n def AdvanceInTime(self, current_time):\n self.time = 0.0\n for solver in self.solver_wrappers.values():\n self.time = max(self.time, solver.AdvanceInTime(current_time))\n\n return self.time\n\n def Predict(self):\n for predictor in self.predictors_list:\n predictor.Predict()\n\n for solver in self.solver_wrappers.values():\n solver.Predict()\n\n def InitializeSolutionStep(self):\n for solver in self.solver_wrappers.values():\n solver.InitializeSolutionStep()\n\n for predictor in self.predictors_list:\n predictor.InitializeSolutionStep()\n\n for coupling_operation in self.coupling_operations_dict.values():\n coupling_operation.InitializeSolutionStep()\n\n def FinalizeSolutionStep(self):\n for solver in self.solver_wrappers.values():\n solver.FinalizeSolutionStep()\n\n for predictor in self.predictors_list:\n predictor.FinalizeSolutionStep()\n\n for coupling_operation in self.coupling_operations_dict.values():\n coupling_operation.FinalizeSolutionStep()\n\n def OutputSolutionStep(self):\n for solver in self.solver_wrappers.values():\n solver.OutputSolutionStep()\n\n def SolveSolutionStep(self):\n err_msg = 'Calling \"SolveSolutionStep\" of the \"CoSimulationCoupledSolver\"!\\n'\n err_msg += 'This function has to be implemented in the derived class!'\n raise Exception(err_msg)\n\n def _SynchronizeInputData(self, solver_name):\n to_solver = self.solver_wrappers[solver_name]\n input_data_list = self.coupling_sequence[solver_name][\"input_data_list\"]\n\n for i in range(input_data_list.size()):\n i_input_data = input_data_list[i]\n\n # interval_util = KM.IntervalUtility(i_input_data)\n # current_time = self.model_part.ProcessInfo[KratosMultiphysics.TIME]\n # if not interval_util.IsInInterval(current_time):\n # continue\n\n # from solver\n from_solver = self.solver_wrappers[i_input_data[\"from_solver\"].GetString()]\n from_solver_data = from_solver.GetInterfaceData(i_input_data[\"from_solver_data\"].GetString())\n\n # to solver\n to_solver_data = to_solver.GetInterfaceData(i_input_data[\"data\"].GetString())\n\n # Importing data from external solvers\n to_solver.ImportCouplingInterfaceData(to_solver_data)\n\n # perform the data transfer\n self.__ExecuteCouplingOperations(i_input_data[\"before_data_transfer_operations\"])\n\n data_transfer_operator_name = i_input_data[\"data_transfer_operator\"].GetString()\n # TODO check the order of solvers!\n self.__GetDataTransferOperator(data_transfer_operator_name).TransferData(from_solver_data, to_solver_data, i_input_data[\"data_transfer_operator_options\"])\n\n self.__ExecuteCouplingOperations(i_input_data[\"after_data_transfer_operations\"])\n\n def _SynchronizeOutputData(self, solver_name):\n from_solver = self.solver_wrappers[solver_name]\n output_data_list = self.coupling_sequence[solver_name][\"output_data_list\"]\n\n for i in range(output_data_list.size()):\n i_output_data = output_data_list[i]\n\n # from solver\n from_solver_data = from_solver.GetInterfaceData(i_output_data[\"data\"].GetString())\n\n # to solver\n to_solver = self.solver_wrappers[i_output_data[\"to_solver\"].GetString()]\n to_solver_data = to_solver.GetInterfaceData(i_output_data[\"to_solver_data\"].GetString())\n\n # perform the data transfer\n self.__ExecuteCouplingOperations(i_output_data[\"before_data_transfer_operations\"])\n\n data_transfer_operator_name = i_output_data[\"data_transfer_operator\"].GetString()\n # TODO check the order of solvers!\n self.__GetDataTransferOperator(data_transfer_operator_name).TransferData(from_solver_data, to_solver_data, i_output_data[\"data_transfer_operator_options\"])\n\n self.__ExecuteCouplingOperations(i_output_data[\"after_data_transfer_operations\"])\n\n # Importing data from external solvers\n from_solver.ExportCouplingInterfaceData(from_solver_data)\n\n\n def __GetDataTransferOperator(self, data_transfer_operator_name):\n try:\n return self.data_transfer_operators_dict[data_transfer_operator_name]\n except KeyError:\n raise NameError('The data-transfer-operator \"{}\" does not exist!'.format(data_transfer_operator_name))\n\n\n def __ExecuteCouplingOperations(self, settings):\n for i in range(settings.size()):\n coupling_operation_name = settings[i].GetString()\n self.coupling_operations_dict[coupling_operation_name].Execute()\n\n def PrintInfo(self):\n super(CoSimulationCoupledSolver, self).PrintInfo()\n\n cs_print_info(self._Name(), \"Has the following components:\")\n for solver in self.solver_wrappers.values():\n solver.PrintInfo()\n\n for predictor in self.predictors_list:\n predictor.PrintInfo()\n\n for coupling_operation in self.coupling_operations_dict.values():\n coupling_operation.PrintInfo()\n\n def Check(self):\n super(CoSimulationCoupledSolver, self).Check()\n for solver in self.solver_wrappers.values():\n solver.Check()\n\n for predictor in self.predictors_list:\n predictor.Check()\n\n for coupling_operation in self.coupling_operations_dict.values():\n coupling_operation.Check()\n\n def __CreateSolverWrappers(self):\n ### ATTENTION, big flaw, also the participants can be coupled solvers !!!\n import KratosMultiphysics.CoSimulationApplication.factories.solver_wrapper_factory as solvers_wrapper_factory\n from collections import OrderedDict\n # first create all solvers\n solvers = {}\n for solver_name, solver_settings in self.settings[\"solvers\"].items():\n solvers[solver_name] = solvers_wrapper_factory.CreateSolverWrapper(solver_settings, solver_name)\n\n # then order them according to the coupling-loop\n # NOTE solvers that are not used in the coupling-sequence will not participate\n solvers_map = OrderedDict()\n for i_solver_settings in range(self.settings[\"coupling_sequence\"].size()):\n solver_settings = self.settings[\"coupling_sequence\"][i_solver_settings]\n solver_name = solver_settings[\"name\"].GetString()\n solvers_map[solver_name] = solvers[solver_name]\n\n return solvers_map\n\n def __GetSolverCoSimulationDetails(self):\n def ValidateAndAssignDefaultsDataList(data_list, defaults):\n for i_data_list in range(data_list.size()):\n data_list[i_data_list].ValidateAndAssignDefaults(defaults)\n\n solver_cosim_details = {}\n for i_solver_settings in range(self.settings[\"coupling_sequence\"].size()):\n solver_settings = self.settings[\"coupling_sequence\"][i_solver_settings]\n solver_name = solver_settings[\"name\"].GetString()\n solver_cosim_details[solver_name] = solver_settings\n\n ValidateAndAssignDefaultsDataList(solver_settings[\"input_data_list\"], GetInputDataDefaults())\n ValidateAndAssignDefaultsDataList(solver_settings[\"output_data_list\"], GetOutputDataDefaults())\n\n return solver_cosim_details\n\n @classmethod\n def _GetDefaultSettings(cls):\n this_defaults = KM.Parameters(\"\"\"{\n \"coupling_sequence\" : [],\n \"solvers\" : {},\n \"predictors\" : [],\n \"coupling_operations\" : {},\n \"data_transfer_operators\" : {}\n }\"\"\")\n this_defaults.AddMissingParameters(super(CoSimulationCoupledSolver, cls)._GetDefaultSettings())\n\n return this_defaults\n\ndef GetInputDataDefaults():\n return KM.Parameters(\"\"\"{\n \"data\" : \"UNSPECIFIED\",\n \"from_solver\" : \"UNSPECIFIED\",\n \"from_solver_data\" : \"UNSPECIFIED\",\n \"data_transfer_operator\" : \"UNSPECIFIED\",\n \"data_transfer_operator_options\" : [],\n \"before_data_transfer_operations\" : [],\n \"after_data_transfer_operations\" : [],\n \"interval\" : []\n }\"\"\")\n\ndef GetOutputDataDefaults():\n return KM.Parameters(\"\"\"{\n \"data\" : \"UNSPECIFIED\",\n \"to_solver\" : \"UNSPECIFIED\",\n \"to_solver_data\" : \"UNSPECIFIED\",\n \"data_transfer_operator\" : \"UNSPECIFIED\",\n \"data_transfer_operator_options\" : [],\n \"before_data_transfer_operations\" : [],\n \"after_data_transfer_operations\" : [],\n \"interval\" : []\n }\"\"\")\n","sub_path":"applications/CoSimulationApplication/python_scripts/base_classes/co_simulation_coupled_solver.py","file_name":"co_simulation_coupled_solver.py","file_ext":"py","file_size_in_byte":11237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"640296265","text":"# Задание-1:\n# Напишите функцию, округляющую полученное произвольное десятичное число\n# до кол-ва знаков (кол-во знаков передается вторым аргументом).\n# Округление должно происходить по математическим правилам (0.6 --> 1, 0.4 --> 0).\n# Для решения задачи не используйте встроенные функции и функции из модуля math.\n\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Process some integer for number cyrcle print Text.')\nparser.add_argument('Input_float', type=float , help='Float')\nparser.add_argument('Input_int_round', type=int, help='Number of character')\nround_a = int(parser.parse_args().Input_float * (10**parser.parse_args().Input_int_round) + 0.41)\nEnd = round_a / 10**parser.parse_args().Input_int_round\nprint(End)\n\n\n\n\n","sub_path":"EASY_tsk1.py","file_name":"EASY_tsk1.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"404637234","text":"from __future__ import absolute_import, division, print_function\nfrom .store import Entry\nimport sys, warnings\n\nif sys.version_info[0] < 3:\n str = unicode\n\nEXTENSIONS = ['.yaml', '.yml']\n\n\ndef make_entry(key, dict):\n notes = ' | '.join(str(dict[key]) for key in ['L', 'N'] if key in dict)\n return Entry(\n key=key,\n user=str(dict.get('U', '')),\n password=str(dict.get('P', '')),\n notes=notes)\n\n\ndef parse_entries(src):\n warnings.warn(\n \"YAML support is deprecated and will be removed in the next version\",\n DeprecationWarning)\n import yaml\n try:\n from yaml import CLoader as Loader\n except ImportError:\n from yaml import Loader as Loader\n root_node = yaml.load(src, Loader=Loader)\n return list(_collect_entries(root_node, ''))\n\n\ndef _collect_entries(current_node, current_key):\n # list of accounts?\n if isinstance(current_node, list):\n for child_node in current_node:\n assert isinstance(child_node, dict), \"expected list of accounts\"\n yield make_entry(current_key, child_node)\n return\n\n # single acccount?\n if isinstance(current_node, dict) and 'P' in current_node:\n yield make_entry(current_key, current_node)\n return\n\n # single password?\n if not isinstance(current_node, dict):\n yield Entry(key=current_key,\n user='',\n password=str(current_node),\n notes='')\n return\n\n # otherwise: subtree!\n for key, child_node in current_node.items():\n # ignore entries in parentheses\n if key.startswith('('):\n continue\n\n # recurse\n for entry in _collect_entries(child_node, current_key + '.' + key if\n current_key else key):\n yield entry\n","sub_path":"pw/_yaml.py","file_name":"_yaml.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"10559241","text":"# /usr/bin/env python\n# -*- coding : utf-8 -*-\n\n__author__ = 'Sizhan Liu'\n__version__ = \"1.0\"\n\n'''\nKeysight N1000A Chasis interface\n'''\n\npath1 = r'/home/whhw/Desktop/AutoTest/inf_optics/interface'\n\nimport sys\nsys.path.append(path1)\n\nimport time\nimport re\nimport pylab as pl\nfrom TCPinterface import TCP\n\n\nip = '192.168.48.57'\nport = 5025\n\n############### --Connectiong -- ###############\nclass N1000A(TCP):\n def __init__(self, host, port):\n '''Setup a remote connection for FTB'''\n self._host = host\n self._port = port\n super().__init__(self._host, self._port)\n time.sleep(1)\n \n command = \"*CLS\"\n self.write(command)\n time.sleep(1)\n\n# check if you expect result\n A = self.deviceID.decode().replace(\"\\n\",\"\").lower()\n if 'keysight technologies' in A:\n print(\"--> Welcome to Infinera SHG team! :) --\")\n else:\n print('--> Wrong connection!')\n self.close()\n\n def close(self):\n '''Close remote connection for FTB-8'''\n self.TCP_close()\n print(self.__class__.__name__ + ' had been disconnected!')\n\n########## --data parse for luna only -- ###############\n def data_pasre(self, command):\n return command.decode().replace(\"\\n\",'')\n\n########## --super class alternative method --- #######\n# def read(self):\n# '''\n# The Luna response time may larger than TCP time out time.\n# Luna will response a binary list with EOS \"\\x00\". \n# '''\n# data = TCP.read_raw(self)\n# while True:\n# try:\n# if len(data) <= 1 or data[-1]!=0:\n# data += TCP.read_raw(self)\n# else:\n# break\n# #return data \n# except:\n# pass\n# return data \n \n def write(self,cmd):\n '''Re-write 'write' command '''\n cmd = str(cmd) + '\\n'\n TCP.write(self,cmd)\n \n \n def query(self,cmd):\n '''query result from Luna'''\n self.write(cmd)\n data = self.read_raw()\n return data\n\n# def inspection(self,command =\"SYST:ERR?\" ,exception = '0'):\n# while True:\n# try:\n# Q = self.query(command)\n# #print (type(Q))\n# #print (Q)\n# if self.data_pasre(Q) == exception:\n# break\n# else:\n# print(\"--> Verifing, or use Ctrl+C to end this process.\")\n# except:\n# pass\n\n\n#################---Traffic analyser ---###############\n @property\n def deviceID(self):\n return self.query(\"*IDN?\")\n def CLS(self):\n self.write(\"*CLS\\n\")\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n ","sub_path":"labdevice/Keysight_N1000A.py","file_name":"Keysight_N1000A.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"153433816","text":"import random\nimport time\nfrom contextlib import contextmanager\nimport ctypes\nfrom array import array\n\n\n@contextmanager\ndef timer(prefix=None):\n t = time.monotonic()\n yield\n t = time.monotonic() - t\n if prefix is not None:\n print('{:15}'.format(prefix), end='')\n if t < 0.001:\n print('{:.1f}us'.format(t * 1000000))\n elif t < 1:\n print('{:.1f}ms'.format(t * 1000))\n else:\n print('{:.1f}s'.format(t))\n\n\nlib = ctypes.CDLL('./bench.dll')\n\n\ndef benchmark_pi():\n print('*** Pi ***')\n\n pi = lib.pi\n pi.argtypes = [ctypes.c_longlong]\n pi.restype = ctypes.c_double\n\n n = 10000000\n\n with timer('Go'):\n gopy = pi(n)\n\n with timer('Python'):\n pypi = sum((-1) ** i * 4 / (i * 2 + 1) for i in range(n))\n\n print('Go pi =', gopy)\n print('Py pi =', pypi)\n\n\ndef benchmark_list_conversion():\n print('*** List Convertions ***')\n\n n = 10000000\n with timer('Alloc'):\n nums = list(range(n))\n\n with timer('Ctypes-to'):\n buf = (ctypes.c_double * n)(*nums)\n\n with timer('Array-to'):\n arr = array('d', nums)\n (ctypes.c_double * n).from_buffer(arr)\n\n with timer('Ctypes-from'):\n list(buf)\n\n with timer('Array-from'):\n list(arr)\n\n\ndef benchmark_shuffle():\n print('*** Shuffle ***')\n shuffle = lib.shuffle\n shuffle.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.c_longlong]\n\n n = 10000000\n with timer('Alloc'):\n nums = list(range(n))\n\n with timer('Go'):\n arr = array('d', nums)\n buf = (ctypes.c_double * n).from_buffer(arr)\n shuffle(buf, n)\n list(arr)\n\n with timer('Random'):\n random.shuffle(nums)\n\n # Importing numpy.random make ctypes.CDLL not find the dll. :-\\\n from numpy import random as nprandom\n\n with timer('Numpy'):\n nprandom.shuffle(nums)\n\n\ndef benchmark_dot():\n print('*** Dot ***')\n dot = lib.dot\n dot.argtypes = [\n ctypes.POINTER(ctypes.c_double),\n ctypes.c_longlong,\n ctypes.POINTER(ctypes.c_double),\n ctypes.c_longlong,\n ]\n dot.restype = ctypes.c_double\n\n import numpy\n\n n = 100000000\n t = 10\n\n with timer('Alloc'):\n arr1 = numpy.ndarray([n], dtype=numpy.float64)\n arr2 = numpy.ndarray([n])\n arr1[:] = 1\n arr2[:] = 1\n # print(arr1, arr2)\n\n with timer('Go'):\n for _ in range(t):\n p1 = arr1.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n p2 = arr2.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n dot(p1, len(arr1), p2, len(arr2))\n \n with timer('Numpy'):\n for _ in range(t):\n arr1.dot(arr2)\n\n\n# benchmark_pi()\n# print()\n# benchmark_list_conversion()\n# print()\nbenchmark_shuffle()\n# print()\nbenchmark_dot()\n","sub_path":"src/bench/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"645884478","text":"def solution(operations):\r\n answer = []\r\n\r\n result = []\r\n for operation in operations: \r\n if operation.startswith('I '):\r\n result.append(int(operation[2:]))\r\n \r\n if operation == ('D -1'):\r\n if len(result) == 0: continue\r\n result.pop(0)\r\n elif operation == ('D 1'):\r\n if len(result) == 0: continue\r\n result.pop()\r\n result.sort()\r\n\r\n if len(result) == 0:\r\n answer = [0, 0]\r\n else:\r\n answer += (result[-1], result[0])\r\n \r\n return answer","sub_path":"heap/42628.py","file_name":"42628.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623488761","text":"# TODO: multiple clipboard ( clipboard event )\n# TODO: cheater\n# TODO: snippet generator\n# TODO: get password\n# TODO: get credential for current url\n# TODO: functions per app\n# TODO: control media player\n# TODO: otra combinacion de teclas. ., dispara otros eventos en pycharm\n# TODO: Script para supervisorctl\n\nimport sys\nfrom time import sleep, time\nimport ctypes as ct\nfrom ctypes.util import find_library\nfrom utils.commands import cmds\n\nfrom utils.windows import *\nfrom utils.app_navigation import *\nfrom utils.selection import *\nimport subprocess\nimport re\nimport pdb\nimport platform\nfrom keys import *\nimport socket\n\n\n# linux only!\nassert(\"linux\" in sys.platform)\n\nx11 = ct.cdll.LoadLibrary(find_library(\"X11\"))\ndisplay = x11.XOpenDisplay(None)\n\n# this will hold the keyboard state. 32 bytes, with each\n# bit representing the state for a single key.\nkeyboard = (ct.c_char * 32)()\n\nlast_pressed = set()\nlast_pressed_adjusted = set()\nlast_modifier_state = {}\ncaps_lock_state = 0\n\n\ndef test():\n subprocess.call( \"python3 /home/m/proyectos/key_master/gui_socket.py&\", shell=True)\n # subprocess.check_output( \"python /home/m/proyectos/key_master/menues.py &\", shell=True)\n\ndef gui_socket():\n current_window_id = int(subprocess.check_output(['xdotool', 'getwindowfocus']))\n try:\n pid = subprocess.check_output(\"ps -sa | grep 'gui_socket' | grep -v grep\", shell=True).decode('utf-8').split(\"\\n\")[0].split()[1]\n if pid:\n pid = int(pid)\n subprocess.call( \"kill \" + str(pid), shell=True)\n else:\n subprocess.call( \"python3 /home/m/proyectos/key_master/gui_socket.py &>-\", shell=True)\n except:\n subprocess.call( \"python3 /home/m/proyectos/key_master/gui_socket.py &>-\", shell=True)\n\ndef clear():\n subprocess.call( \"cls\" if platform.system() == \"Windows\" else \"clear\", shell=True)\n\ndef fetch_keys_raw():\n x11.XQueryKeymap(display, keyboard)\n return keyboard\n\ndef fetch_keys():\n global caps_lock_state, last_pressed, last_pressed_adjusted, last_modifier_state\n keypresses_raw = fetch_keys_raw()\n\n\n # check modifier states (ctrl, alt, shift keys)\n modifier_state = {}\n # for mod, (i, byte) in modifiers.iteritems():\n # for mod, (i, byte) in modifiers:\n for mod, value in modifiers.items():\n i, byte = value\n modifier_state[mod] = bool(ord(keypresses_raw[i]) & byte)\n\n # shift pressed?\n shift = 0\n for i, byte in shift_keys:\n if ord(keypresses_raw[i]) & byte:\n shift = 1\n break\n\n # caps lock state\n if ord(keypresses_raw[8]) & 4: caps_lock_state = int(not caps_lock_state)\n\n\n # aggregate the pressed keys\n pressed = []\n for i, k in enumerate(keypresses_raw):\n o = ord(k)\n if o:\n for byte,key in key_mapping.get(i, {}).items():\n if byte & o:\n if isinstance(key, tuple): key = key[shift or caps_lock_state]\n pressed.append(key)\n\n\n tmp = pressed\n pressed = list(set(pressed).difference(last_pressed))\n state_changed = tmp != last_pressed and (pressed or last_pressed_adjusted)\n last_pressed = tmp\n last_pressed_adjusted = pressed\n\n if pressed: pressed = pressed[0]\n else: pressed = None\n\n\n state_changed = last_modifier_state and (state_changed or modifier_state != last_modifier_state)\n last_modifier_state = modifier_state\n\n return state_changed, modifier_state, pressed\n\ndef log(done, callback, sleep_interval=.005):\n while not done():\n sleep(sleep_interval)\n changed, modifiers, keys = fetch_keys()\n if changed: callback(time(), modifiers, keys)\n\ndef log2(buffer, done, callback, sleep_interval=.005):\n print (\"log2 !!!!!\")\n while not done():\n sleep(sleep_interval)\n changed, modifiers, keys = fetch_keys()\n if changed: callback(buffer, time(), modifiers, keys)\n\n\n\n\n\ndef get_menu_or_exec(buffer, t, modifiers, keys, *args, **kwargs):\n clear()\n buffer.buffer += str(keys)\n selected_commands = { k: v for k, v in cmds.items() if k.startswith(buffer.buffer) }\n\n if len(selected_commands) == 0:\n buffer.buffer = str(keys)\n elif len(selected_commands) == 1 and str(buffer.buffer) in cmds.keys():\n remove = 'BackSpace ' * len(str(buffer.buffer))\n subprocess.call( 'xdotool key ' + remove , shell=True)\n globals()[cmds[str(buffer.buffer)]['command']]()\n buffer.buffer = \"\"\n\n cli_menu = ''\n html_menu = ''\n selected_commands = { k: v for k, v in cmds.items() if k.startswith(buffer.buffer) }\n if len(selected_commands) > 0:\n cli_menu += 10*\"=\" + \"\\n\"\n cli_menu += \"Menu\" + \"\\n\"\n cli_menu += 10*\"-\" + \"\\n\"\n\n for cmd in selected_commands:\n cli_menu += cmd + \" \" + cmds[cmd]['description'] + \"\\n\"\n html_menu += ''\n cli_menu += 10*\"=\" + \"\\n\"\n elif len(selected_commands) == 0:\n cli_menu += 10*\"=\" + \"\\n\"\n cli_menu += \"Menu\" + \"\\n\"\n cli_menu += 10*\"-\" + \"\\n\"\n for cmd in cmds.keys():\n cli_menu += cmd + \" \" + cmds[cmd]['description'] + \"\\n\"\n html_menu += ''\n cli_menu += 10*\"=\" + \"\\n\"\n\n cli_menu += \"buffer: \" + buffer.buffer\n html_menu += ''\n\n print (cli_menu)\n\n # Send to gui_socket\n try:\n host = '127.0.0.1'\n port = 5002\n cli_menu += '-'\n s = socket.socket()\n s.connect((host, port))\n s.send(html_menu.encode('utf-8'))\n data = s.recv(1024)\n data = data.decode('utf-8')\n\n # TODO: Remove this PATCH\n # Start PATCH: reponse comes with weird codes, so I remove them\n data = re.sub('\\x00', '', data)[1:]\n # End PATCH\n\n # print(\"recieve from server: \" + data)\n s.close()\n except:\n print(\"ERROR in Socket Connection\")\n\n\n\nclass Buffer():\n buffer = \"\"\n\n def __str__(self):\n return self.buffer\n\nif __name__ == \"__main__\":\n now = time()\n buffer = Buffer()\n done = lambda: time() > now + 60\n # def print_keys(t, modifiers, keys):\n # clear()\n # print \"%.2f %r %r\" % (t, keys, modifiers)\n\n # log(done, print_keys)\n log2(buffer, done, get_menu_or_exec)\n","sub_path":"keylogger.py","file_name":"keylogger.py","file_ext":"py","file_size_in_byte":6425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"402858014","text":"#\n# Project: fuzzrl\n# Created by bbrighttaer on 9/5/18\n#\n\n\n#\n# Project: fuzzrl\n# Created by bbrighttaer on 9/3/18\n#\n\n\nimport deap.tools as tools\nimport fuzzrl.core.ga.schedule as sch\nimport fuzzrl.core.plot.analysis as ana\nimport gym\nimport matplotlib.pyplot as plt\nfrom fuzzrl.core.algorithm.alg import Algorithm\nfrom fuzzrl.core.conf import Constants as const\nfrom fuzzrl.core.conf.parser import *\nfrom fuzzrl.core.ga.genalg import GeneticAlgorithm\nfrom fuzzrl.core.ga.op import Operator\nfrom fuzzrl.core.io.memory import Cache\nfrom fuzzrl.core.io.randomprocess import OrnsteinUhlenbeckProcess\nfrom fuzzrl.core.io.simdata import Document, Text, Line\nfrom matplotlib import style\nfrom fuzzrl.core.conf import Defuzz as dfz\n\nstyle.use(\"seaborn-paper\")\n\nconst.MF_TUNING_RANGE = [-0.001, 0.001]\nconst.LEARN_RULE_OP = False\nconst.ACTION_SPACE = const.CONTINUOUS\n\nNUM_OF_GENS = 100\nNUM_EPISODES_PER_IND = 1\nMAX_TIME_STEPS = 500\nPOP_SIZE = 20\nLIN_VARS_FILE = \"res/mountain_car_linvars.xml\"\nGFT_FILE = \"res/mountain_car.xml\"\nLOAD_INIT_POP = False\nAPPLY_EVO = True\nQLFD_IND_FILE = \"data/qualified.txt\"\nSAVE_BEST = True\nSCORE_THRESHOLD = 90.0\n\n\ndef reward_shaping(pos, r):\n # Adjust reward based on car position\n reward = pos + 0.5\n\n # Adjust reward for task completion\n if pos >= 0.5:\n reward = r + 1\n return reward\n\n\ndef main():\n # creates an environment\n env = gym.make(\"MountainCarContinuous-v0\")\n\n # chart series\n weighted_avg = ana.WeightedAvg(beta=0.9)\n all_ind_series = ana.Series(name=\"Individuals Performance\")\n avg_series = ana.Series(name=\"Average (window = {})\".format(round((1 / (1 - weighted_avg.beta)))))\n gen_series = ana.Series(name=\"Generation Performance\")\n mut_prob_series = ana.Series(name=\"Mutation probability\")\n\n # create linguistic variables in a registry\n reg = xmlToLinvars(open(LIN_VARS_FILE).read())\n\n # create GFT with linguistic variables in the registry\n reg = xmlToGFT(open(GFT_FILE).read(), registry=reg, defuzz_method=dfz.centroid)\n\n # create GA instance with the registry object\n ga = GeneticAlgorithm(registry=reg, seed=5)\n\n # create a mutation probability schedule\n mut_sch = sch.ExponentialDecaySchedule(initial_prob=.1, decay_factor=1e-2)\n\n # create GFT algorithm object with the registry\n rand_proc = OrnsteinUhlenbeckProcess(theta=0.01)\n alg = Algorithm(registry=reg, random_process=rand_proc)\n\n # create a cache for managing simulation data\n cache = Cache(reg.gft_dict.keys())\n\n # get initial population\n if LOAD_INIT_POP:\n pop = ga.load_initial_population(QLFD_IND_FILE, POP_SIZE)\n pop = pop[::-1]\n print(\"Num. of loaded individuals =\", len(pop))\n else:\n pop = ga.generate_initial_population(POP_SIZE)\n\n # initialize epoch or generation counter\n epoch = 0\n\n # initialize individual counter\n ind_count = 0\n\n # create an object for retrieving input values\n obs_accessor = MountainCarObs()\n\n # perform the simulation for a specified number of generations\n while epoch < NUM_OF_GENS:\n\n # Run the simulation with the current population\n for ind in pop:\n ind_count += 1\n\n # initialize reward accumulator for the individual\n total_reward = 0\n\n # configure the GFT with the current individual\n alg.configuregft(chromosome=ind)\n\n # control the environment with the configured GFT\n\n # reset the environment\n observation = env.reset()\n\n # set the received observation as the current array for retrieving input values\n obs_accessor.current_observation = observation\n\n # run through the time steps of the simulation\n for t in range(MAX_TIME_STEPS):\n\n # show the environment\n env.render()\n\n # since only one agent applies to this case study set a dummy agent ID\n agent_id = 0\n\n # get an action\n actions_dict, input_vec_dict = alg.executebfc(obs_accessor, agent_id, add_noise=True)\n\n # mark the GFSs that executed for the agent in this time step\n cache.mark(output_dict_keys=actions_dict.keys())\n\n # apply the selected action to the environment and observe feedback\n next_state, reward, done, _ = env.step(list(actions_dict.values()))\n reward = reward_shaping(pos=next_state[0], r=reward)\n\n # decompose the received reward\n reward_dict = cache.decomposeReward(reward)\n\n # create experiences for the agent with respect to each GFSs that executed for the agent\n exp_dict = cache.createExperiences(agent_id=agent_id, action=list(actions_dict.values()),\n dec_reward_dict=reward_dict,\n input_vec_dict=input_vec_dict, output_dict=actions_dict,\n next_state_dict=None)\n\n # add the experiences of the agent to the cache\n cache.addExperiences(time_step=t, exp_dict=exp_dict)\n\n # set the received observation as the current array for retrieving input values\n obs_accessor.current_observation = next_state\n\n # accumulate the rewards of all time steps\n total_reward += reward\n\n # if the episode is over end the current episode\n if done:\n break\n\n # save contents of the cache and clear it for the next episode\n # cache.compute_states_value(gamma=.9)\n cache.save_csv(path=\"data/\")\n print(\n \"Episode: {t}/{T} | score: {r}\".format(t=ind_count, T=(NUM_OF_GENS * POP_SIZE),\n r=total_reward))\n\n # set the return from the environment as the fitness value of the current individual\n ind.fitness.values = (total_reward,)\n\n # save qualified individual\n if SAVE_BEST and total_reward >= SCORE_THRESHOLD:\n document = Document(name=QLFD_IND_FILE)\n document.addline(line=Line().add(text=Text(str(ind))))\n document.save(append=True)\n\n # store the performance of this individual in the corresponding series\n all_ind_series.addrecord(ind_count, total_reward)\n weighted_avg.update(total_reward)\n avg_series.addrecord(ind_count, weighted_avg.value)\n\n # Logging and other I/O operations\n print(\"Epoch {} completed\".format(epoch))\n record = ga.stats.compile(pop)\n print(\"Statistics for epoch {} = {}\".format(epoch, record))\n ga.logbook.record(epoch=epoch, **record)\n\n # store max return\n gen_series.addrecord(epoch, record[\"max\"])\n if APPLY_EVO:\n # perform evolution\n offspring = applyEvolution(population=pop, ga_alg=ga, mut_sch=mut_sch, epoch=epoch)\n\n # set offspring as current population\n pop = offspring\n\n # update mutation probability series\n mut_prob_series.addrecord(epoch, mut_sch.prob)\n # increment epoch\n epoch += 1\n\n # print logbook\n ga.logbook.header = \"epoch\", \"avg\", \"std\", \"min\", \"max\"\n print(ga.logbook)\n\n # plotting\n plot_charts(avg_series, mut_prob_series)\n\n # terminates environment\n env.close()\n\n\ndef plot_charts(avg_series, mut_prob_series):\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, dpi=300)\n # epochs = ga.logbook.select(\"epoch\")\n # fit_avg = ga.logbook.select(\"avg\")\n # fig0 = plt.figure(0)\n # plt.plot(epochs, fit_avg)\n # plt.xlabel(\"epoch\")\n # plt.ylabel(\"avg\")\n # plt.grid(True)\n # fig1 = plt.figure(1)\n ax1.set_title(avg_series.name)\n ax1.set_xlabel(\"individual\")\n ax1.set_ylabel(\"score\")\n ax1.plot(avg_series.data()['x'], avg_series.data()['y'], linewidth=avg_series.linewidth, label=avg_series.name,\n color=avg_series.color,\n marker=avg_series.marker,\n linestyle=avg_series.linestyle)\n # ax1.legend(fancybox=True, shadow=True, fontsize='small') # loc=\"upper right\"\n # plt.grid(True)\n # fig2 = plt.figure(2)\n ax2.set_title(mut_prob_series.name)\n ax2.set_xlabel(\"epoch\")\n ax2.set_ylabel(\"probability\")\n ax2.plot(mut_prob_series.data()['x'], mut_prob_series.data()['y'], linewidth=mut_prob_series.linewidth,\n label=mut_prob_series.name,\n color=mut_prob_series.color,\n marker=mut_prob_series.marker,\n linestyle=mut_prob_series.linestyle)\n # ax2.legend(fancybox=True, shadow=True, fontsize='small') # loc=\"upper right\"\n plt.grid(True)\n fig.suptitle(\"cartpole simulation\")\n plt.subplots_adjust(left=0.2, wspace=0.8, top=0.8)\n plt.show()\n\n\ndef applyEvolution(population, ga_alg, mut_sch, epoch):\n \"\"\"\n Helper function to apply one step of evolution to the submitted population\n\n Parameters\n ------------\n :param epoch: current epoch\n :param mut_sch: The mutation probability schedule in use\n :param population: The population for the current evolution task\n :param ga_alg: An instance of the GeneticAlgorithm class\n :return: Offspring of the evolution exercise\n \"\"\"\n # create selection operator\n selargs = {\"k\": len(population),\n \"tournsize\": 3}\n selop = Operator(tools.selTournament, **selargs)\n\n # create crossover operator\n crossargs = {\"indpb\": 0.2}\n crossop = Operator(tools.cxUniform, **crossargs)\n\n # create mutation operator\n mutargs = {\"mu\": 0,\n \"sigma\": 0.1,\n \"indpb\": 0.1}\n mutop = Operator(tools.mutGaussian, **mutargs)\n\n # Perform one step of evolution\n prob = mut_sch.get_prob(epoch)\n # print(\"{} - {}\".format(epoch, prob))\n offspring = ga_alg.evolve(population, selop=selop, crossop=crossop,\n mutop=mutop, mut_prob=prob, cross_prob=0.7)\n return offspring\n\n\nclass MountainCarObs(object):\n def __init__(self):\n self.current_observation = None\n\n def getCarPosition(self, agentId):\n assert self.current_observation is not None\n return self.current_observation[0]\n\n def getCarVelocity(self, agentId):\n assert self.current_observation is not None\n return self.current_observation[1]\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fuzzrl_projects/archive/mountainCarCont/mountaincarcont.py","file_name":"mountaincarcont.py","file_ext":"py","file_size_in_byte":10500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"198061671","text":"#! python3\n\nimport re, pyperclip\n\n#--------REGEX FOR PHONENUMBERS\nphoneregex = re.compile(r\"\"\"(\n# 720-325-7676, 325-7676, (720) 325-7676, 325-7676 ext. 12345, ext 123456, x12345\n((\\d\\d\\d)|(\\(\\d\\d\\d\\)))? # area code (optional)\n(\\s|-) # first separator\n\\d\\d\\d # first 3 digits\n- # second separator\n\\d\\d\\d\\d # last four digits\n(((ext(\\.)?\\s)|x)(\\d{2,5}))? # extension (optional)\n)\"\"\", re.VERBOSE)\n#--------REGEX FOR EMAILS\nemailregex = re.compile(r\"\"\"\n# something@something.com\n[a-zA-Z0-9_.+]+ # email name\n@ # @ symbol\n[a-zA-Z0-9_.+]+ # domain name\n\"\"\", re.VERBOSE)\n#--------GET INFO FROM CLIPBOARD\ntext = pyperclip.paste()\n#--------EXTRACT INFO\nextract_phone = phoneregex.findall(text)\nextract_email = emailregex.findall(text)\n\n#--------TAKES FULL PHONE NUMBER OUT OF FIRST TUPLE\nallphonenums = []\nfor phonenum in extract_phone:\n allphonenums.append(phonenum[0])\n \n#--------REMOVES \"U\" OFF OF EMAILS\nallcleanemails = []\nfor mail in extract_email:\n if \"U\" in mail[-1]:\n mail = mail[:-1]\n Unamecheck = mail[:2]\n #IF \"U\" IS PART OF EMAIL --> PASS\n if str(\"U\"+mail[0]).title() == Unamecheck:\n pass\n elif \"U\" in mail[0]:\n mail = mail[1:]\n allcleanemails.append(mail)\n\n#--------COPY EXTRACTED INFO TO CLIPBOARD\nresults = \"\\n\".join(allphonenums) + \"\\n\" + \"\\n\".join(allcleanemails)\npyperclip.copy(results)\n\n#PRINT RESULTS \nprint(results)\n","sub_path":"practice/email_phone_scraper.py","file_name":"email_phone_scraper.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"553786390","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport json\n\n\ndef do(d):\n with open('dic_for_test.json','w') as f:\n json.dump(d, f, indent=0, ensure_ascii=False, encoding='utf-8', separators=(', ', ': '))#pep8写法\n\nif __name__ == \"__main__\":\n d = {'你': {'attr': ['per_pro', 'per_pro'], 'grammar_key': 'personal_data'},\n '来自': {'attr': ['verb'], 'grammar_key': 'motherland'},\n '火星': {'attr': ['noun'], 'grammar_key': ''}\n }\n do(d)\n","sub_path":"wechatrobot/gen_dic_for_test.py","file_name":"gen_dic_for_test.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"5747367","text":"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport re\n\n# Initiating test\nprint(\"Hello, and welcome to this FoldX data processing program! :)\")\nprint(\"This is the test for cmk.\")\n\n# set working directory to folder with FoldX output files\nos.chdir(\"\") # <- Insert working directory pathway between \"\"\n\n# Import and clean up files\nfiles = os.listdir()\ni = 0\nwhile i < len(files):\n if files[i][0:9] != \"energies_\":\n del(files[i])\n else:\n i += 1\n\n# Identify and save position number for each file\nnumberList = []\nfor i in range(0, len(files)):\n number = re.search(r\"\\D(\\d+)\\D\", files[i])\n numberList.append(number.group(1))\n \n# Create pair lists with file and position number\nPairList =[]\nPositionTable = []\nfor i in range(0,len(files)):\n PairList.append([files[i],int(numberList[i])])\n \n# Sort pair lists\nPairList.sort(key = lambda x: x[1])\n \n# Prepare data tables \nEnergyTable = []\nLabelTable = []\n\n# Lists for sorting mutations into three categories\nMutSta = []\nMutDes = []\nMutNeu = []\n\n# Processing of each file\nfor i in range(0,len(PairList)):\n \n # Identify and save position\n position = PairList[i][1]\n PositionTable.append(position)\n\n # Import table with energy reads, set index\n energies = pd.read_csv(PairList[i][0], header = None, delimiter = r\"\\s+\")\n\n # Extract columns with names, clean up, and save\n AAnames = np.array(energies.iloc[:,0])\n AAnames[0] = \"WTref\"\n for s in range(1, len(AAnames)):\n AAnames[s] = AAnames[s][0:3] + str(position)\n LabelTable.append(AAnames)\n\n # Extract energies from file\n ddG = np.array(energies.iloc[:,1])\n\n # Calculating dddG values\n WTbase = float(ddG[0])\n dddG = []\n for r in range(0, len(ddG)):\n dddG.append(WTbase - float(ddG[r]))\n\n # Add data to table\n EnergyTable.append(dddG)\n \n # Sort mutations into categories\n stable = 0\n destable = 0\n neutral = 0\n for m in range(1, len(dddG)):\n if dddG[m] > 0.5:\n stable += 1\n elif dddG[m] < -0.5:\n destable += 1\n else:\n neutral += 1\n MutSta.append(stable)\n MutDes.append(destable)\n MutNeu.append(neutral)\n\n##### Data visualisation\n\nprocessing = True\nwhile processing == True:\n \n print(\"Alright, what would you like to do?\")\n print(\"1: Graph\")\n print(\"2: Position data\")\n print(\"3: Only destabilised\")\n print(\"4: Exit\")\n \n func = input(\"Please answer here (1/2/3/4): \")\n \n # Create bar chart over mutations\n if func == \"1\":\n\n n=len(PositionTable)\n st = np.array(MutSta)\n ds = np.array(MutDes)\n ne = np.array(MutNeu)\n ind=np.arange(n)\n width=1\n plt.rcParams[\"figure.figsize\"] = (50,3)\n\n p1=plt.bar(ind,st,width,color=\"green\")\n p2=plt.bar(ind,ds,width,color=\"red\",bottom=st)\n p3=plt.bar(ind,ne,width,color=\"blue\",bottom=st+ds)\n\n plt.title(\"Predicted stabilities of cmk mutants\", fontsize = 12)\n plt.xticks(ind+width/2, PositionTable, fontsize = 12, rotation=90)\n plt.yticks(fontsize=12)\n plt.ylim([0,25])\n plt.legend((p1[0], p2[0], p3[0]), ('Stabilising', 'Destabilising', 'Neutral'), loc=2, fontsize=11, ncol=4, framealpha=0, fancybox=True)\n \n # Save figure\n save = input(\"Do you want to save the figure? (y/n): \")\n \n if save == \"y\":\n name = input(\"Name the file: \")\n plt.savefig(name + \".png\") \n else:\n pass\n\n plt.show()\n \n # Extract ddG data for specific positions\n elif func == \"2\":\n \n scan = input(\"Which position?: \")\n \n t = 0\n while PositionTable[t] != int(scan):\n t +=1\n \n for i in range(0, len(LabelTable[t])):\n print(LabelTable[t][i], \"\\t\", round(EnergyTable[t][i],5))\n \n t = 0\n \n # Show only destabilised mutations\n elif func == \"3\":\n \n n=len(PositionTable)\n ds = np.array(MutDes)\n ind=np.arange(n)\n width=1\n plt.rcParams[\"figure.figsize\"] = (50,3)\n\n p2=plt.bar(ind,ds,width,color=\"red\")\n\n plt.title(\"Predicted stabilities of cmk mutants\", fontsize = 12)\n plt.xticks(ind+width/2, PositionTable, fontsize = 12, rotation=90)\n plt.yticks(fontsize=12)\n plt.ylim([0,25])\n plt.savefig(\"cmk_destabilised\", transparent = True, )\n \n save = input(\"Do you want to save the figure? (y/n): \")\n \n # Save figure\n if save == \"y\":\n name = input(\"Name the file: \")\n plt.savefig(name + \".png\") \n else:\n pass\n\n plt.show()\n \n # End script\n elif func == \"4\":\n processing = False\n \nprint(\"Goodbye!\")\n \n \n\n\n\n","sub_path":"FoldX_MutationAnalysis_cmk.py","file_name":"FoldX_MutationAnalysis_cmk.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"354937158","text":"import os\nimport sys\nimport launch\nimport launch_ros.actions\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch_ros.actions import ComposableNodeContainer\nfrom launch_ros.descriptions import ComposableNode\n\ndef generate_launch_description():\n yolox_ros_share_dir = get_package_share_directory('yolox_ros_cpp')\n yolox_param_yaml = os.path.join(yolox_ros_share_dir, \"param\", \"nano_trtexec.yaml\")\n\n container = ComposableNodeContainer(\n name='yolox_container',\n namespace='',\n package='rclcpp_components',\n executable='component_container',\n composable_node_descriptions=[\n ComposableNode(\n package='v4l2_camera',\n plugin='v4l2_camera::V4L2Camera',\n name='v4l2_camera',\n parameters=[{\n \"image_size\": [640,480]\n }]),\n ComposableNode(\n package='yolox_ros_cpp',\n plugin='yolox_ros_cpp::YoloXNode',\n name='yolox_ros_cpp',\n parameters=[yolox_param_yaml],\n )\n ],\n output='screen',\n )\n\n rqt_graph = launch_ros.actions.Node(\n package=\"rqt_graph\", executable=\"rqt_graph\",\n )\n\n return launch.LaunchDescription([\n container,\n rqt_graph,\n ])","sub_path":"yolox_ros_cpp/yolox_ros_cpp/launch/yolox_tensorrt.launch.py","file_name":"yolox_tensorrt.launch.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"406966751","text":"# -*- coding: utf-8 -*-\nimport sqlite3\n\n\nclass MyDataBase:\n def __init__(self):\n ...\n\n @staticmethod\n def get_inform_from_db(db):\n con = sqlite3.connect(db)\n cur = con.cursor()\n master = 'sqlite_master'\n query = \"SELECT name FROM \" + master + \" WHERE type = 'table'\"\n cur.execute(query)\n data = cur.fetchall()\n return data\n\n @staticmethod\n def sqlite3_simple_read_db(data_base, table, column_name=None):\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n query_columns = 'pragma table_info(' + table + ')'\n cur.execute(query_columns)\n columns_description = cur.fetchall()\n columns_names = []\n for column in columns_description:\n columns_names.append(column[1])\n if column_name is None:\n query = 'SELECT * FROM ' + table\n cur.execute(query)\n data = cur.fetchall()\n else:\n query = ' SELECT ' + column_name + ' FROM ' + table\n cur.execute(query)\n data = cur.fetchall()\n new_data = []\n for element in data:\n new_data.append(element[0])\n data = new_data\n\n cur.close()\n con.close()\n return columns_names, data\n\n @staticmethod\n def add_record_table(lst, data_base, table):\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n cur.execute('INSERT INTO ' + table + ' VALUES (%s)' % ','.join('?' * len(lst)), lst)\n con.commit()\n cur.close()\n con.close()\n\n @staticmethod\n def simple_search_from_db(data_base: str, table: str, column_name: str, key: str) -> list:\n \"\"\"\n This method searches the database based on the selected parameters\n\n :param data_base: database name\n :param table: table name\n :param column_name: str - selected parameter to search\n :param key: str - value from input field from user when searching string\n\n :return: list of tuples -> founded rows to delete from table\n \"\"\"\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n key = chr(0x22) + key + chr(0x22)\n try:\n query = 'SELECT * FROM ' + table + ' WHERE ' + column_name + ' = ' + key\n cur.execute(query)\n except sqlite3.OperationalError:\n pass\n\n data = cur.fetchall()\n if not data:\n data = 'Значение не найдено в базе данных!'\n cur.close()\n con.close()\n return data\n\n @staticmethod\n def sqlite3_simple_delete_record(data_base, table, id_column, record_id):\n \"\"\"\n This method removes a row from the database table\n\n :param data_base: database name\n :param table: table name\n :param id_column: column name\n :param record_id: record in this column\n\n :return: nothing\n \"\"\"\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n query = 'DELETE FROM ' + table + ' WHERE ' + id_column + \" = '\" + record_id + \"'\"\n cur.execute(query)\n con.commit()\n cur.close()\n con.close()\n\n @staticmethod\n def sqlite3_simple_clear_table(data_base, table):\n \"\"\"\n This method delete all records from selected table\n\n :param data_base: database name\n :param table: table name\n\n :return: nothing\n \"\"\"\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n query = 'DELETE FROM ' + table\n cur.execute(query)\n con.commit()\n cur.close()\n con.close()\n\n @staticmethod\n def sqlite3_simple_delete_table(data_base, table):\n \"\"\"\n This method removes the selected table from database\n\n :param data_base: database name\n :param table: table name\n\n :return: nothing\n \"\"\"\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n query = 'DROP TABLE IF EXISTS ' + table\n cur.execute(query)\n cur.close()\n con.close()\n\n @staticmethod\n def sqlite3_update_record(data_base, table, param_column, param_value, id_column, record_id):\n \"\"\"\n This method updates the entry in the database table\n\n :param data_base: database name\n :param table: table name\n :param param_column: column in which need to change the value\n :param param_value: value to be set\n :param id_column: column name\n :param record_id: record in this column\n\n :return: nothing\n \"\"\"\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n try:\n query = 'UPDATE ' + table + ' SET ' + param_column + ' = \"' + param_value + '\" WHERE ' + id_column + \\\n \" = '\" + record_id + \"'\"\n cur.execute(query)\n except sqlite3.OperationalError:\n pass\n con.commit()\n cur.close()\n con.close()\n\n\n\n","sub_path":"qt_data_base.py","file_name":"qt_data_base.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"153950752","text":"from typing import List\n\nimport vapoursynth as vs\nfrom lvsfunc.misc import replace_ranges, source\nfrom lvsfunc.types import Range\nfrom vardautomation import (FileInfo, Patch, PresetBD, PresetFLAC, VPath,\n X265Encoder)\n\nfrom bento_filters import flt\n\ncore = vs.core\n\ncore.num_threads = 16\n\nEPNUM = __file__[-5:-3]\n\n# Sources\nJPBD_NCOP = FileInfo(r'BDMV/Vol.1/BDMV/STREAM/00003.m2ts', 0, -24,\n idx=lambda x: source(x, cachedir=''),\n preset=[PresetBD, PresetFLAC])\nJPBD_EP = FileInfo(r'BDMV/Vol.2/BDMV/STREAM/00000.m2ts', 1534, 1534+JPBD_NCOP.clip_cut.num_frames,\n idx=lambda x: source(x, cachedir=''),\n preset=[PresetBD, PresetFLAC])\nJPBD_NCOP.name_file_final = VPath(fr\"premux/{JPBD_NCOP.name} (Premux).mkv\")\nJPBD_NCOP.do_qpfile = True\n\n\n# Common variables\nreplace_op: List[Range] = [(418, 526)]\nop_aisle: List[Range] = [(281, 373)]\nred_circle: List[Range] = [(1934, 1951), (1956, 1979), (1984, 2054)]\n\n\ndef main() -> vs.VideoNode:\n \"\"\"Vapoursynth filtering\"\"\"\n from adptvgrnMod import adptvgrnMod\n from havsfunc import FastLineDarkenMOD\n from vsutil import depth\n\n src_op = JPBD_NCOP.clip_cut\n src_ep = JPBD_EP.clip_cut\n src = replace_ranges(src_op, src_ep, replace_op)\n\n scaled = flt.rescaler(src, 720)\n\n denoised = flt.denoiser(scaled, bm3d_sigma=[0.8, 0.6], bm3d_rad=1)\n\n aa_rep = flt.clamped_aa(denoised)\n trans_sraa = flt.transpose_sraa(denoised)\n aa_ranges = replace_ranges(aa_rep, trans_sraa, red_circle)\n\n darken = FastLineDarkenMOD(aa_ranges, strength=48, protection=6, luma_cap=255, threshold=2)\n\n deband = flt.masked_deband(darken, denoised=True, deband_args={'iterations': 2, 'threshold': 5.0, 'radius': 8, 'grain': 6}) # noqa\n pdeband = flt.placebo_debander(darken, grain=4, deband_args={'iterations': 2, 'threshold': 8.0, 'radius': 10})\n deband = replace_ranges(deband, pdeband, op_aisle)\n\n grain = adptvgrnMod(deband, strength=0.3, luma_scaling=10, size=1.25, sharp=80, grain_chroma=False, seed=42069)\n\n return depth(grain, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2], [0, 1, 2])\n\n\nclass Encoding:\n def __init__(self, file: FileInfo, clip: vs.VideoNode) -> None:\n self.file = file\n self.clip = clip\n\n def run(self) -> None:\n assert self.file.a_src\n assert self.file.a_enc_cut\n\n self.preqpfileflt()\n\n p = Patch(\n file_to_fix=f'premux/{JPBD_NCOP.name[:-2]}01 (Premux).mkv',\n filtered_clip=filtered,\n frame_start=281,\n frame_end=527,\n encoder=X265Encoder('x265', 'settings/x265_settings_BD'),\n file=JPBD_NCOP,\n output_filename=VPath(fr\"{JPBD_NCOP.name} (Premux).mkv\")\n )\n p.run()\n p.do_cleanup()\n\n def preqpfileflt(self) -> None:\n \"\"\"Pre-QP file generation filtering so the scenes match properly\"\"\"\n self.file.clip_cut = replace_ranges(self.file.clip_cut, JPBD_EP.clip_cut, replace_op)\n\n\nif __name__ == '__main__':\n filtered = main()\n Encoding(JPBD_NCOP, filtered).run()\nelse:\n JPBD_NCOP.clip_cut.set_output(0)\n FILTERED = main()\n FILTERED.set_output(1)\n","sub_path":"[OPMan]/Seasonals [TV]/2011-4 - Fall/[a8292] Ben-To/BentoBD_NCOP1v03.py","file_name":"BentoBD_NCOP1v03.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"203211183","text":"import json\r\nfrom asyncio.streams import StreamReader\r\nimport discord, asyncio\r\nfrom discord import Embed, Colour, Member\r\nfrom discord import colour\r\nfrom discord.ext.commands import cog\r\nfrom discord_slash import cog_ext, SlashCommand, SlashContext\r\nfrom discord_slash.utils.manage_commands import create_option, create_choice, create_permission\r\nfrom discord.ext import commands\r\n\r\n\r\n\r\nclass Moderation(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n\r\n\r\n @commands.command()\r\n @commands.has_permissions(manage_messages=True)\r\n async def mute(self, ctx, user: Member, time, *, reason: str = None):\r\n guild = ctx.guild\r\n muted_role = discord.utils.get(guild.roles, name=\"NoobBotPunishment\")\r\n if not muted_role:\r\n muted_role = await guild.create_role(name=\"NoobBotPunishment\")\r\n for channel in guild.channels:\r\n await channel.set_permissions(muted_role, speak=False, send_messages=False, read_message_history=True,\r\n read_messages=True)\r\n await user.add_roles(muted_role, reason=reason)\r\n if time.endswith(\"s\"):\r\n await asyncio.sleep(time.split(\"s\")[0])\r\n await user.remove_roles(muted_role, reason=None)\r\n mebed = Embed(\r\n title=\"Unmuted\",\r\n description=f\"Now {user.mention} is unmuted!\",\r\n color=Colour.random()\r\n )\r\n await ctx.send(embed=mebed)\r\n elif time.endswith(\"m\"):\r\n await asyncio.sleep(time.split(\"m\")[0] * 60)\r\n await user.remove_roles(muted_role, reason=None)\r\n mebed = Embed(\r\n title=\"Unmuted\",\r\n description=f\"Now {user.mention} is unmuted!\",\r\n color=Colour.random()\r\n )\r\n await ctx.send(embed=mebed)\r\n elif time.endswith(\"h\"):\r\n await asyncio.sleep(time.split(\"h\")[0] * 3600)\r\n await user.remove_roles(muted_role, reason=None)\r\n mebed = Embed(\r\n title=\"Unmuted\",\r\n description=f\"Now {user.mention} is unmuted!\",\r\n color=Colour.random()\r\n )\r\n await ctx.send(embed=mebed)\r\n mbed = Embed(\r\n title=\"Muted\",\r\n description=f\"{user.mention} is now muted!\",\r\n color=Colour.random()\r\n )\r\n await ctx.send(embed=mbed)\r\n await user.send(f\"Looks like you've got muted in {guild.name}!\")\r\n\r\n @commands.command()\r\n @commands.has_permissions(manage_messages=True)\r\n async def unmute(self, ctx, user: Member):\r\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\r\n if not muted_role:\r\n await ctx.send(\"You havent muted anyone before!\")\r\n else:\r\n user.remove_roles(muted_role, reason=None)\r\n mbed = Embed(\r\n title=\"Unmuted\",\r\n description=f\"Now {user.mention} is unmuted!\",\r\n color=Colour.random()\r\n )\r\n ctx.send(embed=mbed)\r\n\r\n @commands.command()\r\n @commands.has_permissions(manage_messages=True)\r\n async def purge(self, ctx, amount=11):\r\n if amount > 100:\r\n await ctx.send(\"I cant remove more than 100 messages!\")\r\n else:\r\n await ctx.channel.purge(limit=amount)\r\n await ctx.send(embed=Embed(\r\n title=\"Cleared!\", description=f\"Touched {amount} messages!\", colour=Colour.random()\r\n ))\r\n\r\n @commands.command()\r\n @commands.has_permissions(kick_members=True)\r\n async def kick(self, ctx, member: Member, *, reason: str):\r\n mbed = Embed(\r\n title=\"Kicked!\",\r\n description=f\"Kicked {member.mention}, reason is {reason}\",\r\n colour=Colour.random()\r\n )\r\n await member.send(f\"Looks like you've got kicked from {ctx.guild.name}\")\r\n await member.kick(reason=reason)\r\n await ctx.send(embed=mbed)\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def ban(self, ctx, member: Member, *, reason: str):\r\n mbed = Embed(\r\n title=\"Banned!\",\r\n description=f\"Banned {member.mention}, reason is {reason}\",\r\n colour=Colour.random()\r\n )\r\n await member.ban(reason=reason)\r\n await ctx.send(embed=mbed)\r\n await member.send(f\"Looks like you've got banned from {ctx.guild.name} OOF!\")\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members=True)\r\n async def unban(self, ctx, member: Member):\r\n mbed = Embed(\r\n title=\"Unbanned!\",\r\n description=f\"Unbanned {member}!\",\r\n colour=Colour.random()\r\n )\r\n banned = await ctx.guild.bans()\r\n name, discrim = member.split(\"#\")\r\n for ban_entry in banned:\r\n user = ban_entry.user\r\n if (user.name, user.discriminator) == (name, discrim):\r\n await ctx.guild.unban(user)\r\n await ctx.send(embed=mbed)\r\n\r\n return\r\n\r\n\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Moderation(bot))\r\n","sub_path":"cogs/moderation.py","file_name":"moderation.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"597827223","text":"#parsing tournament\n# 1) Check if tournament is finished (maybe just stage) or loaded\n# 2) Parse each stage\n# 3) load tournament info into db\n# 4) Parse each match\n# 5) Check if decks have been loaded\n# 6) If they haven't load the lineup for the player and create player_id for tournament\n# 7) \n\nimport sys\nsys.path.append('/home/jgutman/workspace/hearthstone_decks/')\nsys.path.append('/home/jgutman/workspace/hearthstone_decks/TourStopLoader')\nsys.path.append('../')\nfrom config import basedir\nsys.path.append(basedir)\nsys.path.append(basedir + '/lineupSolver')\nfrom pprint import pprint\nimport json, requests\nimport datetime\nimport pytz\nfrom dateutil import parser\nfrom deck_manager import EasyDeck\nfrom label_archetype_file import label_archetype\nimport os\nimport re\nos.environ['TZ'] = 'America/Chicago'\nimport MySQLdb\nimport MySQLdb.constants as const\nfrom config import *\nconnection = MySQLdb.connect(host='localhost', user=db_user, passwd=db_passwd, charset = 'utf8mb4')\n#connection = MySQLdb.connect(user = 'guest', db = 'test', charset = 'utf8')\ncursor = connection.cursor()\n#cursor.execute(\"SET NAMES utf8\")\ntotal_time = {}\n\n\n#deckstring_re = re.compile('AA(?:[A-Za-z0-9+/]{2})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4}){12,}')\ndeckstring_re = re.compile('AA[A-Za-z0-9+/]+=*')\n\n#all_cups_url = 'https://playhearthstone.com/en-us/esports/schedule/scheduleData?month=%(month)s&year=%(year)s'\nall_cups_url = 'https://playhearthstone.com/en-us/api/esports/schedule/masters-qualifiers/?month=%(month)s&year=%(year)s'\ntournament_info_url = 'https://dtmwra1jsgyb0.cloudfront.net/tournaments/%(tournament_id)s'\nall_matches_url = 'https://dtmwra1jsgyb0.cloudfront.net/stages/%(stage_id)s/matches'\nmatch_info_url = 'https://dtmwra1jsgyb0.cloudfront.net/matches/%(match_id)s?extend%%5Btop.team%%5D%%5Bplayers%%5D%%5Buser%%5D=true&extend%%5Bbottom.team%%5D%%5Bplayers%%5D%%5Buser%%5D=true'\nstage_link_str = 'https://battlefy.com/hsesports/hearthstone-masters-qualifiers/%(tournament_id)s/stage/%(stage_id)s/bracket/'\nalt_tournament_info_url = 'https://majestic.battlefy.com/hearthstone-masters/tournaments?start=%(start_date)s&end=%(end_date)s'\ntournament_link_str = 'https://battlefy.com/hsesports/%(tournament_name)s/%(tournament_id)s/'\n#archetypes = {}\n\ndef get_time_from_utc(timestr):\n utc_time = parser.parse(timestr)\n local_tz = pytz.timezone('America/Chicago')\n local_time = utc_time.replace(tzinfo=datetime.timezone.utc).astimezone(tz=local_tz)\n return int(local_time.strftime(\"%s\"))\n\ndef get_masters_cups(end_date):\n # returns list of (id, date, region, name)\n res = []\n start_date = '2019-03-01'\n tournaments = json.loads(requests.get(alt_tournament_info_url % locals()).text)\n for tourn in tournaments:\n #utc_time = parser.parse(tourn['startTime'])\n #local_time = utc_time.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None)\n #etime = int(local_time.strftime(\"%s\"))\n etime = get_time_from_utc(tourn['startTime'])\n res.append((tourn['_id'], etime, tourn['region'], tourn['slug']))\n return res\n\ndef get_stage_ids(tournament_id):\n data_tourney = json.loads(requests.get(tournament_info_url % locals()).text)\n stage_ids = data_tourney['stageIDs']\n return stage_ids\n\ndef get_stage_link(tournament_id, stage_id):\n return stage_link_str % locals()\n\ndef get_stage_games(stage_id):\n return json.loads(requests.get(all_matches_url % locals()).text)\n\ndef check_legal(lineup):\n if len(lineup) < 3: return True\n try:\n decks = [EasyDeck(i) for i in lineup]\n except:\n return True\n if decks[0].get_distance(decks[1]) > 5:\n return False\n if decks[0].get_distance(decks[2]) > 5:\n return False\n return True\n\n#def get_matches(tournament_id, stage_id):\n\nplayer_tourn_decks = {}\nplayer_tourn_names = {}\nplayer_ids = {}\nplayer_games = {}\nplayer_wins = {}\nplayer_match_wins = {}\nplayer_matches = {}\nplayer_events = {}\ndef get_match_info(tournament_id, bracket_id, match):\n match_id = match['_id']\n p1_id = match['top']['team']['captainID']\n p2_id = match['bottom']['team']['captainID']\n p1_score = match['top']['score']\n p2_score = match['bottom']['score']\n #pprint(match)\n round_num = match['roundNumber']\n if 'readyAt' not in match['top']:\n return\n #match_time = get_time_from_utc(match['completedAt']) - get_time_from_utc(match['top']['readyAt'])\n match_start = get_time_from_utc(match['top']['readyAt'])\n if 'readyAt' not in match['bottom']:\n return\n #match_time = min(match_time, get_time_from_utc(match['completedAt']) -get_time_from_utc( match['bottom']['readyAt']))\n match_start = max(match_start, get_time_from_utc( match['bottom']['readyAt']))\n match_end = get_time_from_utc(match['completedAt'])\n winner = match['top']['team']['name'] if match['top']['winner'] else match['bottom']['team']['name']\n match_info = None\n if (tournament_id, p1_id) not in player_tourn_decks:\n match_info = json.loads(requests.get(match_info_url % locals()).text)\n p1_name = match_info[0]['top']['team']['players'][0]['inGameName']\n if tournament_id not in player_ids:\n player_ids[tournament_id] = {}\n player_ids[tournament_id][p1_name] = p1_id\n p1_decks = []\n if 'deckStrings' in match_info[0]['top']['team']['players'][0]['gameAttributes']:\n for i in match_info[0]['top']['team']['players'][0]['gameAttributes']['deckStrings']:\n p1_decks.append(deckstring_re.findall(i)[0])\n player_tourn_decks[(tournament_id, p1_id)] = p1_decks\n player_tourn_names[(tournament_id, p1_id)] = p1_name\n #if not check_legal(p1_decks):\n # print(\"ILLEGAL LINEUP\", p1_name, \" \".join(p1_decks))\n #p1_name = p1_name.encode('utf-8')\n archetype = label_archetype(p1_decks[0]) if len(p1_decks) > 0 else ''\n deck1 = p1_decks[0] if len(p1_decks) > 0 else ''\n deck2 = p1_decks[1] if len(p1_decks) > 1 else ''\n deck3 = p1_decks[2] if len(p1_decks) > 2 else ''\n load_player(tournament_id, p1_id, p1_name, deck1, deck2, deck3, archetype)\n else:\n p1_name = player_tourn_names[(tournament_id, p1_id)]\n p1_decks = player_tourn_decks[(tournament_id, p1_id)]\n if (tournament_id, p2_id) not in player_tourn_decks:\n if match_info is None:\n match_info = json.loads(requests.get(match_info_url % locals()).text)\n p2_name = match_info[0]['bottom']['team']['players'][0]['inGameName']\n player_ids[tournament_id][p2_name] = p2_id\n p2_decks = []\n if 'deckStrings' in match_info[0]['bottom']['team']['players'][0]['gameAttributes']:\n for i in match_info[0]['bottom']['team']['players'][0]['gameAttributes']['deckStrings']:\n p2_decks.append(deckstring_re.findall(i)[0])\n player_tourn_decks[(tournament_id, p2_id)] = p2_decks\n player_tourn_names[(tournament_id, p2_id)] = p2_name\n #if not check_legal(p2_decks):\n # print(\"ILLEGAL LINEUP\", p2_name, \" \".join(p2_decks))\n #p2_name = p2_name.encode('utf-8')\n archetype = label_archetype(p2_decks[0]) if len(p2_decks) > 0 else ''\n deck1 = p2_decks[0] if len(p2_decks) > 0 else ''\n deck2 = p2_decks[1] if len(p2_decks) > 1 else ''\n deck3 = p2_decks[2] if len(p2_decks) > 2 else ''\n load_player(tournament_id, p2_id, p2_name, deck1, deck2, deck3, archetype)\n else:\n p2_name = player_tourn_names[(tournament_id, p2_id)]\n p2_decks = player_tourn_decks[(tournament_id, p2_id)]\n if p1_name == winner:\n player_match_wins[p1_name] = player_match_wins.get(p1_name, 0) + 1\n else:\n player_match_wins[p2_name] = player_match_wins.get(p2_name, 0) + 1\n player_games[p1_name] = player_games.get(p1_name, 0) + p1_score + p2_score\n player_games[p2_name] = player_games.get(p2_name, 0) + p1_score + p2_score\n player_wins[p1_name] = player_wins.get(p1_name, 0) + p1_score\n player_wins[p2_name] = player_wins.get(p2_name, 0) + p2_score\n if p1_name not in player_events:\n player_events[p1_name] = set()\n if p2_name not in player_events:\n player_events[p2_name] = set()\n player_events[p1_name].add(tournament_id)\n player_events[p2_name].add(tournament_id)\n player_matches[p1_name] = player_matches.get(p1_name, 0) + 1\n player_matches[p2_name] = player_matches.get(p2_name, 0) + 1\n if p1_decks:\n a1 = label_archetype(p1_decks[0])\n #archetypes[a1] = archetypes.get(a1, 0) + 1\n if p2_decks:\n a2 = label_archetype(p2_decks[0])\n #archetypes[a2] = archetypes.get(a2, 0) + 1\n result = 'W' if p1_name == winner else 'L'\n load_game(tournament_id, p1_id, bracket_id, round_num, p2_id, p1_score, p2_score, result)\n result = 'W' if p2_name == winner else 'L'\n load_game(tournament_id, p2_id, bracket_id, round_num, p1_id, p2_score, p1_score, result)\n #print(round_num, p1_name, p2_name, p1_score, p2_score, winner, label_archetype(p1_decks[0]) if p1_decks else None, label_archetype(p2_decks[0]) if p2_decks else None)\n #print(p1_name, match_time)\n #print(p2_name, match_time)\n load_times(tournament_id, p1_id, bracket_id, round_num, match_start, match_end)\n load_times(tournament_id, p2_id, bracket_id, round_num, match_start, match_end)\n print(p1_name, tournament_id, match_start, match_end)\n print(p2_name, tournament_id, match_start, match_end)\n #total_time[p1_name] = total_time.get(p1_name, 0) + match_time\n #total_time[p2_name] = total_time.get(p2_name, 0) + match_time\n \n\nmonth, year = 3, 2019\nstart_date = '2019-03-01'\nend_date = '2024-12-31'\ncups_data = json.loads(requests.get(all_cups_url % locals()).text)\n\ndef create_tournament(tournament_id, tournament_name, time, swiss_id, top8_id):\n db = 'masters_cups'\n #cursor.execute(\"SELECT tournament_id from %(db)s.tournament where tournament_id = '%(tournament_id)s'\" % locals())\n if not cursor.fetchall():\n sql = \"\"\"INSERT INTO %(db)s.tournament (tournament_id, tournament_name, time, swiss_bracket, top8_bracket) \n VALUES ('%(tournament_id)s', '%(tournament_name)s', %(time)s, '%(swiss_id)s', '%(top8_id)s')\"\"\"\n #cursor.execute(sql % locals())\n connection.commit()\n return True\n return False\n\n#CREATE TABLE player_info\n#(\n# tournament_id varchar(24) NOT NULL,\n# player_id varchar(24) NOT NULL,\n# player_name varchar(32) NOT NULL,\n# deck1 varchar(128) NOT NULL,\n# deck2 varchar(128) NOT NULL,\n# deck3 varchar(128) NOT NULL,\n# archetype_prim varchar(32) NOT NULL,\n# PRIMARY KEY(tournament_id, player_id)\n#);\ndef load_player(tournament_id, player_id, player_name, deck1, deck2, deck3, archetype):\n #player_name = str(player_name.encode('utf-8'))\n db = 'masters_cups'\n archetype = str(archetype).replace(\"'\", '')\n sql = \"\"\"INSERT INTO %(db)s.player_info (tournament_id, player_id, player_name, deck1, deck2, deck3, archetype_prim)\n VALUES ('%(tournament_id)s', '%(player_id)s', '%(player_name)s', '%(deck1)s', '%(deck2)s', '%(deck3)s', \"%(archetype)s\")\n ON DUPLICATE KEY UPDATE\n deck1 = '%(deck1)s',\n deck2 = '%(deck2)s',\n deck3 = '%(deck3)s',\n archetype_prim = '%(archetype)s';\n \"\"\"\n #print(len(str(player_name)))\n #print(sql % locals())\n #cursor.execute(sql % locals())\n connection.commit()\n \ndef load_times(tournament_id, player_id, bracket_id, round_number, start_time, end_time):\n#| tournament_id | varchar(24) | NO | PRI | NULL | |\n#| player_id | varchar(24) | NO | PRI | NULL | |\n#| bracket_id | varchar(24) | NO | PRI | NULL | |\n#| round_number | varchar(3) | NO | PRI | NULL | |\n#| start_time | int(32) | NO | | NULL | |\n#| end_time | int(32) | NO | | NULL | |\n db = 'masters_cups'\n sql = \"\"\"INSERT INTO %(db)s.times (tournament_id, player_id, bracket_id, round_number, start_time, end_time)\n VALUES ('%(tournament_id)s', '%(player_id)s', '%(bracket_id)s', '%(round_number)s', %(start_time)s, %(end_time)s)\n ON DUPLICATE KEY UPDATE\n start_time = %(start_time)s,\n end_time = %(end_time)s;\n \"\"\"\n cursor.execute(sql % locals())\n connection.commit()\n\n\n#CREATE TABLE games\n#(\n# tournament_id varchar(24) NOT NULL,\n# player_id varchar(24) NOT NULL,\n# bracket_id varchar(24) NOT NULL,\n# round_number varchar(3) NOT NULL,\n# opponent_id varchar(24) NOT NULL,\n# score1 int(32) NOT NULL,\n# score2 int(32) NOT NULL,\n# result varchar(1) NOT NULL,\n# PRIMARY KEY(tournament_id, player_id, bracket_id, round_number)\n#);\n\ndef load_game(tournament_id, player_id, bracket_id, round_number, opponent_id, score1, score2, result):\n db = 'masters_cups'\n sql = \"\"\"REPLACE %(db)s.games (tournament_id, player_id, bracket_id, round_number, opponent_id, score1, score2, result)\n VALUES ('%(tournament_id)s', '%(player_id)s', '%(bracket_id)s', '%(round_number)s', '%(opponent_id)s', %(score1)s, %(score2)s, '%(result)s')\n \"\"\"\n #print(len(str(player_name)))\n #print(sql % locals())\n #cursor.execute(sql % locals())\n connection.commit()\n \n\n#CREATE TABLE winners\n#(\n# tournament_id varchar(24) NOT NULL,\n# player_id varchar(48) NOT NULL,\n# PRIMARY KEY(tournament_id, player_id)\n#);\n\ndef load_winner(tournament_id, player_id):\n db = 'masters_cups'\n sql = \"\"\"REPLACE %(db)s.winners (tournament_id, player_id)\n VALUES ('%(tournament_id)s', '%(player_id)s')\n \"\"\"\n #print(len(str(player_name)))\n #print(sql % locals())\n #cursor.execute(sql % locals())\n connection.commit()\n\ndef check_winner(tournament_id):\n db = 'masters_cups'\n sql = \"SELECT tournament_id FROM %(db)s.winners where tournament_id = '%(tournament_id)s'\"\n #cursor.execute(sql % locals())\n res = [i for (i,) in cursor.fetchall()]\n if len(res) > 0: \n return True\n return False\n\n\nfor tournament_id, tournament_time, tournament_region, tournament_name in get_masters_cups(end_date):\n tnum = int(tournament_name.split('-')[-1])\n #if tnum < 180: continue\n #self.cursor.execute(\"INSERT INTO %(db)s.deck_to_cards (deck_id, card_id, quantity) VALUES (%(deck_id)s, %(card_id)s, %(quantity)s)\" % locals())\n date_str = datetime.datetime.fromtimestamp(tournament_time).strftime(\"%Y_%m_%d %H:%M\")\n stage_ids = get_stage_ids(tournament_id)\n if len(stage_ids) < 2: \n # print(\"%s,%s,%s\" % (tournament_name.split('-')[-1], date_str, tournament_link_str % locals()))\n continue\n swiss_link = get_stage_link(tournament_id, stage_ids[0])\n swiss_games = get_stage_games(stage_ids[0])\n for game in swiss_games:\n if not game['isBye']:\n get_match_info(tournament_id, stage_ids[0], game)\n top8_link = get_stage_link(tournament_id, stage_ids[1])\n top8_games = get_stage_games(stage_ids[-1])\n if 'completedAt' not in top8_games[-1]: continue\n for game in top8_games:\n get_match_info(tournament_id, stage_ids[1], game)\n match_id = top8_games[-1]['_id']\n finals = json.loads(requests.get(match_info_url % locals()).text)\n finals_name = finals[0]['top']['team']['name']\n winner = finals[0]['top']['team']['name'] if finals[0]['top']['winner'] else finals[0]['bottom']['team']['name']\n winner_id = finals[0]['top']['team']['captainID'] if finals[0]['top']['winner'] else finals[0]['bottom']['team']['captainID']\n load_winner(tournament_id, winner_id)\n #print(\"\\n%s,%s,%s,%s,%s,%s\\n\" % (tournament_name.split('-')[-1], date_str, tournament_link_str % locals(), swiss_link, top8_link, winner))\n tmp_final = top8_games[-1]\n\n\n#if True:\n# for i,j in sorted(player_wins.items(), key=lambda x:x[1], reverse=True)[:50]:\n# print(\"%-20s %s %s %s %%\" % (i,j, player_games[i], int(100 * j / player_games[i])))\n\n#for i,j in sorted(archetypes.items(), key=lambda x:x[1], reverse=True):\n# print(\"%-25s %s\" % (i,j))\n\n#for i,j in sorted(total_time.items, key=lambda x:x[1], reverse=True):\n# print(i,j)\n","sub_path":"TourStopLoader/load_times.py","file_name":"load_times.py","file_ext":"py","file_size_in_byte":16291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300934852","text":"import argparse, json\nimport _root\nimport _helper\n\n\n\ndef main(source_dataset, dest_dataset, *, notrim=False):\n # Process arguments to get name of dataset\n\n source_config = _helper.datasetConfigFilename(source_dataset)\n dest_config = _helper.datasetConfigFilename(dest_dataset)\n\n source = {}\n dest = {}\n\n with open(source_config, 'rt') as configfile:\n config = json.load(configfile)\n source['sample_rate'] = config['sample_rate']\n source['start_time_ms'] = config['start_time_ms']\n\n with open(dest_config, 'rt') as configfile:\n config = json.load(configfile)\n dest['sample_rate'] = config['sample_rate']\n dest['length'] = config['length']\n dest['start_time_ms'] = config['start_time_ms']\n\n if source['sample_rate'] != dest['sample_rate']:\n _helper.errorExit(\"Source and dest datasets should have the same sample rate\")\n\n start_sample = ((dest['start_time_ms'] - source['start_time_ms']) / 1000) * source['sample_rate']\n\n if start_sample != int(start_sample):\n _helper.errorExit(\"Source and dest datasets are not offset by an integer number of samples\")\n\n start_sample = int(start_sample)\n end_sample = int(start_sample + dest['length'])\n\n source_labels = _helper.getLabelsLatest(source_dataset)\n if source_labels:\n for session in source_labels:\n\n session_name = session['session']\n source_name = session['source']\n session_labels = session['labels']\n label_filename = _helper.latestLabelsFilename(dest_dataset, session_name)\n\n output = ''\n output += ('{\"session\":\"%s\", \"source\": \"%s\", \"labels\":[' % (session_name, source_name))\n was_prev = False\n\n for ll in session_labels:\n label_start = ll['lo']\n label_end = ll['hi']\n label_name = ll['label']\n label = [label_start, label_end, label_name]\n\n if notrim:\n output += _helper.activityJSON(label, was_prev)\n was_prev = True\n\n elif label_end > start_sample and label_start < end_sample:\n\n # Trim label start if needed\n if label_start < start_sample:\n label[0] = start_sample\n\n # Trim label end if needed\n if label_end > end_sample:\n label[1] = end_sample\n\n # Start label offset from 0\n label[0] -= start_sample\n label[1] -= start_sample\n\n output += _helper.activityJSON(label, was_prev)\n was_prev = True\n\n output += ']}\\n'\n\n _helper.ensureDirExists(label_filename, True)\n with open(label_filename, 'wt') as labelsfile:\n labelsfile.write(output)\n print('labels added to ', label_filename)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Get trimmed labels for sample subrange of a dataset.')\n parser.add_argument('source', type=str, help='Parent dataset to get labels from.', default=None)\n parser.add_argument('dest', type=str, help='Child dataset to trim and copy labels to.', default=None)\n parser.add_argument('--notrim', action='store_true', help='Do not trim parent dataset labels.')\n args = parser.parse_args()\n\n main(args.source, args.dest, notrim=args.notrim)\n","sub_path":"scripts/copy_labels.py","file_name":"copy_labels.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"390869115","text":"num = int(raw_input())\n\nstudents = {}\n\nfor i in range(0,num):\n line = list(raw_input().split())\n \n name = line[0] \n grade = (float(line[1]) + float(line[2]) + float(line[3])) / 3.00\n \n students[name] = grade\n\nname = raw_input()\n\nprint(\"%.2f\" % students[name])\n","sub_path":"Python/Introduction/finding_the_percentage.py","file_name":"finding_the_percentage.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"267958604","text":"import json\nimport time\nimport sys\nfrom collections import deque\n\nimport utility as U\nfrom finite_state import *\n\n\nclass Recipe:\n\n\n def __init__(self, rval):\n self.value = rval\n\n self.idxpos = None\n self.next = None\n self.prev = None\n\n self.fast_jump = None\n\n def fastappend(self, nextrec):\n assert nextrec.next == None\n assert self.next == None\n self.next = nextrec\n\n nextrec.idxpos = self.idxpos+1\n nextrec.prev = self\n\nclass PMachine(FiniteStateMachine):\n \n \n def __init__(self):\n \n statemap = \"\"\"\n {\n \"SPCS\" : \"F:PRI\",\n \"FPCS\" : \"T:SC\",\n \"HTNR\" : \"F:ABR\"\n }\n \"\"\"\n \n FiniteStateMachine.__init__(self, json.loads(statemap))\n\n self.is_test = False\n self.target_sequence = \"760221\"\n\n self.recipe_head = Recipe(3)\n self.recipe_head.idxpos = 0\n self.recipe_tail = self.recipe_head\n self.append_recipe(7)\n\n self.elf1 = self.recipe_head\n self.elf2 = self.recipe_tail\n\n\n def recipe_count(self):\n return self.recipe_tail.idxpos + 1\n\n def get_result(self):\n self.print_recipes()\n\n return self.recipe_count() - len(self.target_sequence)\n\n def s1_init_machine(self):\n self.tseq_list = [int(c) for c in self.target_sequence]\n\n def s3_print_recipe_info(self):\n self.print_recipes()\n\n def print_recipes(self):\n if self.recipe_tail.idxpos >= 30 or not self.is_test:\n return\n\n ptr = self.recipe_head\n\n while ptr != None:\n sp = \" {} \".format(ptr.value)\n if ptr == self.elf1:\n sp = \"({})\".format(ptr.value)\n elif ptr == self.elf2:\n sp = \"[{}]\".format(ptr.value)\n print(sp, end='')\n\n ptr = ptr.next\n\n print(\"\")\n\n\n def s4_have_two_new_recipes(self):\n rsum = self.elf1.value + self.elf2.value\n return rsum >= 10\n\n def s5_add_first_recipe(self):\n rsum = self.elf1.value + self.elf2.value\n self.append_recipe(rsum // 10)\n\n\n def s8_first_pass_check_solution(self):\n return self.fast_prefix_check()\n\n\n def s10_add_basic_recipe(self):\n rsum = self.elf1.value + self.elf2.value\n self.append_recipe(rsum % 10)\n\n\n\n def append_recipe(self, rc):\n newnode = Recipe(int(rc))\n self.recipe_tail.fastappend(newnode)\n self.recipe_tail = self.recipe_tail.next\n\n\n def jump_to_next(self, orig):\n if orig.fast_jump != None:\n return orig.fast_jump\n\n cycled = False\n gimp = orig\n #numstep = orig.idxpos + orig.value + 1 \n numstep = orig.value + 1 \n\n for _ in range(numstep):\n if gimp.next != None:\n gimp = gimp.next\n continue\n\n gimp = self.recipe_head\n cycled = True\n\n\n if not cycled:\n # Log the jump result, IF we didn't cycle\n orig.fast_jump = gimp\n\n return gimp\n\n\n def s18_advance_elves(self):\n\n #self.elf1 = newpos(self.elf1)\n #self.elf2 = newpos(self.elf2)\n\n self.elf1 = self.jump_to_next(self.elf1)\n self.elf2 = self.jump_to_next(self.elf2)\n\n #self.elf1 = self.slow_index_jump(self.elf1)\n #self.elf2 = self.slow_index_jump(self.elf2)\n\n def slow_index_jump(self, elf):\n np = elf.idxpos + elf.value + 1\n np = np % (self.recipe_tail.idxpos+1)\n return self.slow_index_lookup(np)\n\n def slow_index_lookup(self, idx):\n ptr = self.recipe_head\n\n for _ in range(idx):\n ptr = ptr.next\n\n return ptr\n\n\n def get_current_suffix(self):\n\n ptr = self.backup_from_tail(len(self.target_sequence)-1)\n suffix = \"\"\n\n for _ in range(len(self.target_sequence)):\n suffix += str(ptr.value)\n\n if ptr.next == None:\n break \n\n ptr = ptr.next\n\n return suffix\n\n\n def slow_suffix_check(self): \n return self.get_current_suffix() == self.target_sequence\n\n\n def backup_from_tail(self, numstep):\n ptr = self.recipe_tail\n\n for _ in range(numstep):\n if ptr.prev == None:\n break \n ptr = ptr.prev\n\n return ptr\n\n def fast_prefix_check(self):\n\n\n if self.recipe_count() < len(self.tseq_list):\n return False\n\n ptr = self.recipe_tail\n checkseq = copy.copy(self.tseq_list)\n checkseq.reverse()\n\n for idx, cseq in enumerate(checkseq):\n if cseq != ptr.value:\n return False\n\n if idx == len(self.tseq_list)-2:\n csuffix = self.get_current_suffix()\n print(\"Have {} recipes, current suffix is {}\".format(self.recipe_count(), csuffix))\n\n ptr = ptr.prev\n\n return True\n\n def s20_second_pass_check_solution(self):\n return self.fast_prefix_check()\n\n def s30_success_complete(self):\n pass \n \n\n\ndef run_tests():\n \n testmap = { '51589' : 9, '01245': 5, '92510': 18, '59414': 2018 }\n\n for tseq, expd in testmap.items():\n pmod = PMachine()\n pmod.is_test = True\n pmod.target_sequence = tseq\n pmod.run2_completion()\n rslt = pmod.get_result()\n print(\"For Tseq={}, expected {} and got {}\".format(tseq, expd, rslt))\n\n \"\"\"\n scount = 10000000\n alpha = time.time()\n timemod = PMachine()\n timemod.run2_step_count(scount)\n print(\"Ran {} steps, took {:.03f} secs\".format(scount, time.time()-alpha))\n \"\"\"\n\n\n\n ","sub_path":"src/p14b.py","file_name":"p14b.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"434374870","text":"\"\"\"\n問題文\n文字列Sが与えられます。\nSに含まれる文字をそれぞれ一度ずつ使い、何個かの回文を作ることを考えます。\n例えばS=aaab のとき、二つの回文 aba と a を作ることができます。\nこのように、文字は自由な順序で使用することができ、Sに複数回現れる文字は合計でその回数だけ使用します。\n長さLの回文を1個作るごとに、L^2のスコアが得られます。\n最大で合計いくつのスコアを得ることができるでしょうか?\n\n制約\n1≦|S|≦100,000\nSは小文字アルファベットのみからなる。\n===================\ntried-01:\n\nimprovement:\nn = sum(s.count(chr(x)) & 1 for x in range(97, 123)) の部分で存在するアルファベット種類をカウント。\nprint(m**2 if not n else n-1+(m-n+1)**2) のうち\nif not n で\n ①2つ以上存在するアルファベットがない(ダブりがない)場合\n →  m**2\n    文字数の2乗を出力\n ②2つ以上存在するアルファベットがある場合\n →  n-1+(m-n+1)**2\n  →  (m-n+1)**2\n     ダブりがないアルファベット1つを核に使って長い回文を1つ作る\n  →  n-1\n     ダブりのアルファベット群から核に使った1つを除いて加える(1の2乗は1と変わらないので2乗しない)\n\"\"\"\n\n# === tried-01 ===\nfrom collections import Counter\n\ns = list(input())\n\nvals = Counter(s).values()\n'''\n戦略\n・2文字以上ある単語はまとめて、長大な1つの回文にする\n・1文字しかない単語(や、余った文字)は1単語のみの回文として計上\n'''\nlong_word = 0\nshort_words = 0\nresult = 0\n\nfor val in vals:\n long_word += val // 2\n short_words += val % 2\n\nif 1 <= short_words:\n result += (long_word * 2 + 1) ** 2\n result += short_words - 1\nelif 0 == short_words:\n result += (long_word * 2) ** 2\nelse:\n pass\n\nprint(result)\n\n# === improvement ===\ns = input()\nn = sum(s.count(chr(x)) & 1 for x in range(97, 123))\nm = len(s)\nprint(m**2 if not n else n-1+(m-n+1)**2)\n\nprint(n, m)\nprint(list(s.count(chr(x)) & 1 for x in range(97, 123)))\n","sub_path":"program_king_exhibition/G.py","file_name":"G.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"568673008","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom sys import argv\nfrom subprocess import call\nimport threading\nimport webbrowser\nimport os\nfrom shutil import copy, move, rmtree\nfrom os.path import join, dirname, realpath, exists\nfrom glob import glob\nimport re\n\ntry:\n from setuptools import setup, Command\n from setuptools.command.sdist import sdist as SDistCommand\n from setuptools.command.bdist import bdist as BDistCommand\n from setuptools.command.upload import upload as UploadCommand\nexcept ImportError:\n from distutils.core import setup, Command\n\ndirectory = dirname(realpath(__file__))\nsys.path.insert(0, join(directory, 'escher'))\nversion = __import__('version').__version__\nfull_version = __import__('version').__full_version__\npackage = __import__('version').package\nport = 8789\n\nclass CleanCommand(Command):\n description = \"Custom clean command that removes static site\"\n user_options = []\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n def run(self):\n def remove_if(x):\n if exists(x): rmtree(x)\n remove_if(join(directory, 'build'))\n remove_if(join(directory, 'dist'))\n # remove site files\n remove_if(join(directory, '..', 'builder'))\n for f in glob(join(directory, '..', 'index.html')):\n os.remove(f)\n print('done cleaning')\n\n\nclass BuildGHPagesCommand(Command):\n description = \"Custom build command that generates static site, and copies escher libs\"\n user_options = []\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n def run(self):\n # generate the static site\n call(['python', join('escher', 'generate_index.py')])\n call(['python', join('escher', 'static_site.py')])\n print('Done building gh-pages')\n\n\nclass TestCommand(Command):\n description = \"Custom test command that runs pytest\"\n user_options = [('noweb', None, 'Skip run tests that require the Escher website')]\n def initialize_options(self):\n self.noweb = False\n def finalize_options(self):\n pass\n def run(self):\n import pytest\n if self.noweb:\n exit_code = pytest.main(['-m', 'not web'])\n else:\n exit_code = pytest.main([])\n sys.exit(exit_code)\n\n\nsetup(\n name='Escher',\n version=full_version,\n author=package['author'],\n url=package['homepage'],\n description=package['description'],\n keywords=', '.join(package['keywords']),\n license=package['license'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Operating System :: OS Independent'\n ],\n packages=['escher'],\n package_data={'escher': ['package.json', 'static/escher/*', 'static/fonts/*',\n 'static/jsonschema/*', 'static/homepage/*',\n 'static/img/*', 'static/lib/*', 'templates/*']},\n install_requires=['Jinja2>=2.7.3',\n 'tornado>=4.0.2',\n 'pytest>=2.6.2',\n 'cobra>=0.3.0',\n 'jsonschema>=2.4.0'],\n extras_require={'docs': ['sphinx>=1.2',\n 'sphinx-rtd-theme>=0.1.6'],\n 'all': ['sphinx>=1.2',\n 'sphinx-rtd-theme>=0.1.6',\n 'ipython>=4.0.2',\n 'jupyter>=1.0.0',\n 'wheel>=0.24.0',\n 'twine>=1.5.0'] },\n cmdclass={'clean': CleanCommand,\n 'build_gh': BuildGHPagesCommand,\n 'test': TestCommand}\n)\n","sub_path":"py/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"432937797","text":"import csv, json\n\n\ndef main():\n\tdata = parse_data('Data/results.json')\n\t\n\ttest_file = open('Data/test.csv', 'w', encoding='utf-8-sig')\n\tcsv_writer=csv.writer(test_file, lineterminator='\\n')\n\tfor tweet in data:\n\t\tlink = 'www.twitter.com/' + tweet['user']['screen_name'] + '/status/' + str(tweet['id'])\n\t\ttweet = tweet['text']\n\t\tprint(link)\n\t\tcsv_writer.writerow([link, tweet])\n\ttest_file.close()\n\t \ndef parse_data(file_name):\n\twith open(file_name, 'r', encoding='utf-8-sig') as json_file:\n\t\tdata = json.load(json_file)\n\t\treturn data\n\n\nif __name__ == \"__main__\": main()\n \n","sub_path":"JSON_reader.py","file_name":"JSON_reader.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"11904950","text":"import unittest\nimport urllib2\nimport os\nimport sys\nimport shutil\nimport subprocess\n\n\nclass TestServer(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n Initialize Server\n \"\"\"\n try:\n open('../schema.sql','r')\n except IOError as e:\n raise Exception('schema.sql file does not exist.\\\n run the daemon or server first')\n shutil.copy('../schema.sql', 'schema.sql')\n try:\n open('../results.db','r')\n except IOError as e:\n raise Exception('results.db file does not exist.\\\n run the daemon or server first and let results\\\n populate')\n shutil.copy('../results.db', 'results.db')\n self.server = subprocess.Popen([\"python\", \"../server.py\"])\n self.base_url = 'http://127.0.0.1:5000/'\n self.connect_timeout = 5\n\n def send_http_get(self, params=''):\n response = urllib2.urlopen(\"%s%s\" % (self.base_url, params),\n timeout=self.connect_timeout)\n return response\n\n def test_main_page(self):\n \"\"\"\n Test that we can render the main template\n \"\"\"\n response = self.send_http_get('/').read()\n self.assertTrue('' in response)\n\n def test_get_tweets_format_html(self):\n \"\"\"\n Test that we can render get_tweets method in html\n \"\"\"\n response = self.send_http_get('/getTweets?format=html').read()\n self.assertTrue('id=\"tweets\"' in response)\n\n def test_get_tweets_format_html(self):\n \"\"\"\n Test that we can render get_tweets method in html\n \"\"\"\n response = self.send_http_get('/getTweets?format=json').read()\n self.assertTrue('\"results\": [' in response)\n\n def tearDown(self):\n os.remove('schema.sql')\n os.remove('results.db')\n self.server.kill()\n\n\nif __name__ == '__main__':\n # can't use relative parent imports if script called as main\n # so append parent directory to path\n parent_dir = os.path.normpath(os.path.join(os.getcwd(), '..'))\n sys.path.append(parent_dir)\n unittest.main()\n","sub_path":"tests/web_server_test.py","file_name":"web_server_test.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"630132265","text":"# coding: utf-8\n\nimport os\nimport re\n\nimport queue\nimport logging\n\nfrom abc import ABC\nfrom abc import abstractclassmethod\n\nimport jpype\n\nimport jieba\nfrom jieba import posseg\n\nfrom pyltp import Segmentor\nfrom pyltp import Postagger\nfrom pyltp import Parser as LParser\nfrom pyltp import NamedEntityRecognizer\n\nfrom common.utils import iter_file\n\nfrom nlp.config import add_user_words\nfrom nlp.config import LTP_MODEL_DIR\nfrom nlp.config import CUSTOM_POS_FILE\nfrom nlp.config import CUSTOM_TOKEN_FILE\nfrom nlp.config import DEFAULT_PARSER\nfrom nlp.config import HANLP_MODEL_DIR\nfrom nlp.config import RESOURCE_DIR\n\nfrom nlp.normalize import PosReviser\nfrom nlp.normalize import PosNormalizer\n\nlogger = logging.getLogger(__file__)\n\n\nclass Parser(ABC):\n\n def __init__(self):\n self._pos_cache = dict()\n self._queue = queue.Queue(500000)\n\n super(Parser, self).__init__()\n\n def ssplit(self, txt):\n \"\"\"\n 对文本分句/分词\n :param txt: 一段文本\n :return: [[word1, word2, ...], [word3, word4, ...], ...]\n \"\"\"\n self\n return [x for x in re.split(r'[\\s,,.。::!!??、]', txt) if x]\n\n @abstractclassmethod\n def segment(self, txt):\n \"\"\"\n 分词\n :param txt: 文本\n :return: [word1, word2, ...]\n \"\"\"\n pass\n\n @abstractclassmethod\n def pos(self, txt, cache=False):\n \"\"\"\n 对文本进行词性标注,附带了分句\n :param txt:\n :param cache:\n :return: [Token1, Token2, ...]\n \"\"\"\n pass\n\n @abstractclassmethod\n def ner(self, txt):\n pass\n\n @abstractclassmethod\n def parse2relations(self, txt):\n \"\"\"\n 依存句法分析\n :param txt:\n :return: [Relation, Relation, ...]\n :rtype: list of Relation\n \"\"\"\n pass\n\n @abstractclassmethod\n def parse2sents(self, txt):\n \"\"\"\n 解析一个句子文本\n :param txt:\n :return: [Sentence, Sentence, ...]\n :rtype: list of Sentence\n \"\"\"\n pass\n\n def _get_from_cache(self, txt):\n return self._pos_cache.get(txt)\n\n def _set_cache(self, txt, result):\n self._pos_cache[txt] = result\n self._queue.put(txt)\n\n if self._queue.full():\n try:\n key = self._queue.get()\n self._pos_cache.pop(key)\n except Exception:\n logger.exception('pos cache pop error')\n\n\nclass LTPParser(Parser):\n \"\"\"\n 基于LTP实现的Parser\n\n LTP对用户自定义词典的支持不是很好,http://www.ltp-cloud.com/support/\n 1. 扩展自定义词典后,需要重新编译LTP\n 2. 分词支持自定义词典,但词性标注不支持\n \"\"\"\n\n def __init__(self, ltp_model_dir, custom_seg_file=None, custom_pos_file=None):\n \"\"\"\n :param ltp_model_dir:\n \"\"\"\n\n super(LTPParser, self).__init__()\n\n self._ltp_dir = ltp_model_dir\n\n '''加载分词模型'''\n seg_model_file = os.path.join(self._ltp_dir, 'cws.model')\n self._segmentor = Segmentor()\n if custom_seg_file:\n self._segmentor.load_with_lexicon(seg_model_file, custom_seg_file)\n else:\n self._segmentor.load(seg_model_file)\n\n '''加载词性标注���型'''\n self._tagger = Postagger()\n pos_model_file = os.path.join(self._ltp_dir, 'pos.model')\n if custom_pos_file:\n self._tagger.load_with_lexicon(pos_model_file, custom_pos_file)\n else:\n self._tagger.load(pos_model_file)\n\n '''加载命名实体识别模型'''\n self._ner = NamedEntityRecognizer()\n self._ner.load(os.path.join(self._ltp_dir, 'ner.model'))\n\n '''加载依存句法分析模型'''\n self._parser = LParser()\n self._parser.load(os.path.join(self._ltp_dir, 'parser.model'))\n\n def segment(self, txt):\n return list(self._segmentor.segment(txt))\n\n def pos(self, txt, cache=False):\n\n result = None\n\n if cache:\n result = self._get_from_cache(txt)\n\n if result is None:\n tokenized = self.segment(txt)\n tags = self._tagger.postag(tokenized)\n\n result = []\n for i, w, t in zip(list(range(len(tokenized))), tokenized, tags):\n result.append(Token(w, t, i))\n\n self._set_cache(txt, result)\n\n return result\n\n def ner(self, txt):\n tokens = self.pos(txt)\n return list(self._ner.recognize([t.word for t in tokens], [t.pos for t in tokens]))\n\n def parse2relations(self, txt):\n tokens = self.pos(txt, revise=True)\n\n words = [t.word for t in tokens]\n tags = [t.pos for t in tokens]\n\n arcs = self._parser.parse(words, tags)\n\n result = []\n for i, w, p, a in zip(list(range(len(words))), words, tags, arcs):\n head_token = Token(words[a.head - 1] if a.head > 0 else 'Root', tags[a.head - 1] if a.head > 0 else 'Root',\n a.head - 1)\n dep_token = Token(w, p, i)\n\n result.append(Relation(a.relation, head_token, dep_token))\n\n return result\n\n def parse2sents(self, txt):\n sents = []\n\n for sent_txt in self.ssplit(txt):\n sent_relations = self.parse2relations(sent_txt + '。')\n tokens = set()\n\n for relation in sent_relations:\n if relation.token1.word != 'ROOT':\n tokens.add(relation.token1)\n tokens.add(relation.token2)\n\n tokens = sorted(tokens, key=lambda t: t.id)\n\n # sent = Sentence(''.join([w.word for w in tokens]))\n sent = Sentence(sent_txt)\n\n sent.tokens = tokens\n sent.relations = sent_relations\n\n sents.append(sent)\n\n return sents\n\n\nclass HanLPParser(Parser):\n \"\"\"\n 基于HanLP实现的Parser\n 在hanlp.properties里配置用户自定义词典。新添加词后,需要删除词库data\\dictionary\\custom\\CustomDictionary.txt.bin缓存文件。\n \"\"\"\n\n def __init__(self, hanlp_dir):\n \"\"\"\n :param hanlp_dir:\n \"\"\"\n super(HanLPParser, self).__init__()\n\n self._HanLP = jpype.JClass('com.hankcs.hanlp.HanLP')\n self._NLPTokenizer = jpype.JClass('com.hankcs.hanlp.tokenizer.NLPTokenizer')\n\n self._CustomDictionary = jpype.JClass('com.hankcs.hanlp.dictionary.CustomDictionary')\n\n self._CustomDictionary.add('好用', 'a 9999')\n\n def segment(self, txt):\n result = []\n\n # words = self._HanLP.segment(txt)\n words = self._NLPTokenizer.segment(txt)\n for word in words:\n result.append(word.word)\n\n return result\n\n def pos(self, txt, cache=False):\n result = None\n\n if cache:\n result = self._get_from_cache(txt)\n\n if result is None:\n result = []\n\n # words = self._HanLP.segment(txt)\n words = self._NLPTokenizer.segment(txt)\n for i, word in enumerate(words):\n result.append(Token(word.word, word.nature.toString(), i))\n\n self._set_cache(txt, result)\n\n return result\n\n def ner(self, txt):\n pass\n\n def parse2relations(self, txt):\n result = []\n\n relations = self._HanLP.parseDependency(txt)\n iterator = relations.iterator()\n\n while iterator.hasNext():\n word2 = iterator.next()\n word1 = word2.HEAD\n\n token1 = Token(word1.LEMMA, word1.POSTAG, word1.ID)\n token2 = Token(word2.LEMMA, word2.POSTAG, word2.ID)\n\n result.append(Relation(word2.DEPREL, token1, token2))\n\n return result\n\n def parse2sents(self, txt):\n sents = []\n\n for sent_txt in self.ssplit(txt):\n sent_relations = self.parse2relations(sent_txt + '。')\n tokens = set()\n\n for relation in sent_relations:\n if relation.token1.word != '##核心##':\n tokens.add(relation.token1)\n tokens.add(relation.token2)\n\n tokens = sorted(tokens, key=lambda t: t.id)\n\n # sent = Sentence(''.join([w.word for w in tokens]))\n sent = Sentence(sent_txt)\n\n sent.tokens = tokens\n sent.relations = sent_relations\n\n sents.append(sent)\n\n return sents\n\n\nclass StandfordParser(Parser):\n\n def segment(self, txt):\n pass\n\n def pos(self, txt, cache=False):\n pass\n\n def ner(self, txt):\n pass\n\n def parse2relations(self, txt):\n pass\n\n def parse2sents(self, txt):\n pass\n\n\nclass CombinationParser(LTPParser):\n\n def segment(self, txt):\n return jieba.lcut(txt)\n\n def pos(self, txt, cache=False, revise=False):\n result = []\n\n tags = [(w, PosNormalizer.normalize(p)) for w, p in posseg.cut(txt, HMM=False)]\n for i, tp in enumerate(tags):\n result.append(Token(tp[0], tp[1], i))\n\n if revise and len(result) < 10:\n result = PosReviser.revise(result)\n\n return result\n\n\nclass Token(object):\n\n def __init__(self, word, pos, index=-1):\n self.word = word\n self.pos = pos\n self.id = index\n\n def __eq__(self, other):\n if other is None or not isinstance(other, Token):\n return False\n return self.id == other.id and self.word == other.word and self.pos == other.pos\n\n def __str__(self):\n return '%s/%d/%s' % (self.word, self.id, self.pos)\n\n def __hash__(self):\n return hash('%s/%d/%s' % (self.word, self.id, self.pos))\n\n\nclass Relation(object):\n\n def __init__(self, relation, token1, token2):\n self.relation = relation\n self.token1 = token1\n self.token2 = token2\n\n self.format = '%s(%s, %s)' % (self.relation, self.token1.pos, self.token2.pos)\n\n def match(self, relation_format):\n return self.format.lower() == relation_format.lower()\n\n def __str__(self):\n return '%s(%s/%s, %s/%s)' % (self.relation, self.token1.word, self.token1.pos, self.token2.word, self.token2.pos)\n\n\nclass Sentence(object):\n\n def __init__(self, raw):\n self.__raw = raw\n self.__tokens = []\n self.__relations = []\n\n @property\n def raw(self):\n return self.__raw\n\n @raw.setter\n def raw(self, value):\n self.__raw = value\n\n @property\n def tokens(self):\n return self.__tokens\n\n @tokens.setter\n def tokens(self, value):\n self.__tokens = value\n\n @property\n def relations(self):\n \"\"\"\n :rtype: list of Relation\n \"\"\"\n return self.__relations\n\n @relations.setter\n def relations(self, value):\n self.__relations = value\n\n def get_token(self, id_or_word):\n\n if isinstance(id_or_word, int):\n return self.__tokens[id_or_word - 1]\n\n if isinstance(id_or_word, str):\n for token in self.__tokens:\n if token.word == id_or_word:\n return token\n\n return None\n\n def get_root_relation(self):\n for relation in self.__relations:\n if relation.relation in ['HED', '核心关系']:\n return relation\n\n return None\n\n def find_relations(self, rel, token1_pos=None, token2_pos=None):\n for relation in self.__relations:\n\n cdn = relation.relation == rel\n cdn1 = (relation.token1.pos == token1_pos) if token1_pos else True\n cdn2 = (relation.token2.pos == token2_pos) if token2_pos else True\n\n if cdn and cdn1 and cdn2:\n return relation\n\n return None\n\n def __str__(self):\n return ' '.join([str(relation) for relation in self.__relations])\n\n\nadd_user_words([tuple(l.split()) for l in iter_file(os.path.join(RESOURCE_DIR, 'nlp', 'lexicon', 'jieba', 'user1.dict'))])\nadd_user_words([tuple(l.split()) for l in iter_file(os.path.join(RESOURCE_DIR, 'nlp', 'lexicon', 'jieba', 'user2.dict'))])\nadd_user_words([(w, None, max(ps.items(), key=lambda tp: tp[1])[0]) for w, ps in PosReviser.revise_map.items()])\n\ncombParser = CombinationParser(LTP_MODEL_DIR, custom_seg_file=CUSTOM_TOKEN_FILE, custom_pos_file=CUSTOM_POS_FILE)\nltpParser = LTPParser(LTP_MODEL_DIR, custom_seg_file=CUSTOM_TOKEN_FILE, custom_pos_file=CUSTOM_POS_FILE)\n\nif DEFAULT_PARSER == 'comb':\n default_parser = combParser\nelif DEFAULT_PARSER == 'ltp':\n default_parser = ltpParser\nelif DEFAULT_PARSER == 'hanlp':\n default_parser = HanLPParser(HANLP_MODEL_DIR)\nelse:\n default_parser = None\n","sub_path":"nlp/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":12582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"43073358","text":"import re\n\nfrom livestreamer.plugin import Plugin\nfrom livestreamer.plugin.api import http, validate\nfrom livestreamer.stream import HTTPStream\n\nAPI_URL = \"http://www.panda.tv/api_room?roomid={0}\"\nSTATUS_ONLINE = 2\nSTATUS_OFFLINE = 3\n\n_url_re = re.compile(\"\"\"\n http(s)?://(www\\.)?panda.tv\n /(?P[^/]+)\n\"\"\", re.VERBOSE)\n\n_room_schema = validate.Schema(\n {\n \"data\": {\n \"videoinfo\": validate.any(None, {\n \"status\": validate.all(\n validate.text,\n validate.transform(int)\n ),\n \"room_key\": validate.text\n })\n }\n },\n validate.get(\"data\"),\n validate.get(\"videoinfo\")\n)\n\n\nclass Pandatv(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n channel = match.group(\"channel\")\n\n res = http.get(API_URL.format(channel))\n room = http.json(res, schema=_room_schema)\n if not room:\n return\n\n if room[\"status\"] != STATUS_ONLINE:\n return\n\n http_url = \"http://pl3.live.panda.tv/live_panda/{room[room_key]}.flv\".format(room=room)\n http_stream = HTTPStream(self.session, http_url)\n if http_stream:\n yield \"http\", http_stream\n\n__plugin__ = Pandatv\n","sub_path":"src/livestreamer/plugins/panda.py","file_name":"panda.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"274802572","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom . import views\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^$', views.index, name='flightplan'),\n url(r'^templates/(.*)/', views.templates),\n\n url(r'^route/list/', views.list_routes),\n url(r'^route/save/', views.save_route),\n url(r'^route/(.*)/', views.load_route),\n\n url(r'^flight/list/', views.list_flights),\n url(r'^flight/(.*)/', views.load_flight),\n\n # ajax webservices\n url(r'^map/getfacilities/', views.getfacilities),\n url(r'^map/getcycles/', views.getcycles),\n url(r'^map/search/', views.search),\n]\n","sub_path":"flightplan/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"175217533","text":"import databasemanager\nimport random\nfrom datetime import datetime\n\nSECONDS_WAIT = 60 * 60\n\n\ndef current():\n\tboss = databasemanager.get_variable('boss')\n\n\tif boss is None:\n\t\treturn create()\n\n\tif need_to_reborn(boss):\n\t\treturn create(boss)\n\n\treturn boss\n\n\ndef create(old_boss = None):\n\tboss_id = 1\n\n\tif old_boss is not None:\n\t\tboss_id = old_boss['id']\n\n\t# Костыли, велосипеды\n\troom_name, hp = random.choice([('black_knight', 129500), ('hellkite_dragon', 175500), ('moonlight_butterfly', 49500), ('naping_dragon', 77500)])\n\tnew_boss = {\n\t\t'id': boss_id + 1,\n\t\t'name': room_name,\n\t\t'alive': True,\n\t\t'hp': hp,\n\t\t'die_seconds': None\n\t}\n\n\tsave(new_boss)\n\n\treturn new_boss\n\n\ndef die(boss):\n\tnew_boss = {\n\t\t'id': boss['id'],\n\t\t'name': boss['name'],\n\t\t'alive': False,\n\t\t'hp': 0,\n\t\t'die_seconds': (datetime.now() - datetime(1970,1,1)).total_seconds()\n\t}\n\n\tsave(new_boss)\n\n\ndef save(boss):\n\tdatabasemanager.set_variable('boss', boss)\n\ndef need_to_reborn(boss):\n\tif boss['die_seconds'] is None:\n\t\treturn False\n\n\tseconds_passed = (datetime.now() - datetime(1970,1,1)).total_seconds() - boss['die_seconds']\n\n\treturn boss['alive'] is False and seconds_passed > SECONDS_WAIT\n","sub_path":"bossmanager.py","file_name":"bossmanager.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"383213530","text":"class ListNode:\n \"\"\"\n A node in a singly-linked list.\n \"\"\"\n def __init__(self, data=None, next=None):\n\n self.data=data\n self.next=next\n\nclass SinglyLinkedList:\n def __init__(self):\n \"\"\"\n Create a new singly-linked list.\n Takes O(1) time.\n \"\"\"\n self.head = None\n self.curr=None\n\n def append(self, data):\n \"\"\"\n Insert a new element at the end of the list.\n Takes O(n) time.\n \"\"\"\n newNode=ListNode(data=data)\n\n if self.head is None:\n self.head=newNode\n self.curr=self.head\n else:\n self.curr.next=newNode\n self.curr=self.curr.next\n\n def find(self, key):\n \"\"\"\n Search for the first element with `data` matching\n `key`. Return the element or `None` if not found.\n Takes O(n) time.\n \"\"\"\n\n temp=self.head\n\n while temp is not None:\n\n if temp.data==key:\n print('Found!')\n return temp.data\n\n temp=temp.next\n\n print('Not found!')\n return None\n\n def remove(self, key):\n \"\"\"\n Remove the first occurrence of `key` in the list.\n Takes O(n) time.\n \"\"\"\n curr=self.head\n prev=None\n\n if self.head and self.head.data==key:\n self.head=self.head.next\n self.curr=self.head\n else:\n while curr is not None:\n if curr.data==key:\n prev.next=curr.next\n break\n prev=curr\n curr=curr.next\n\n\nlinkedlist=SinglyLinkedList()\n\nlinkedlist.append(1)\nlinkedlist.append(2)\nlinkedlist.append(3)\nlinkedlist.append(4)\nlinkedlist.find(2)\nlinkedlist.remove(2)\nlinkedlist.remove(4)\nlinkedlist.remove(1)\nlinkedlist.remove(3)\nlinkedlist.remove(3)\nlinkedlist.find(6)\nlinkedlist.append(5)\nlinkedlist.find(6)\nlinkedlist.append(6)\nlinkedlist.append(7)\nlinkedlist.find(6)\n","sub_path":"Exercise_3.py","file_name":"Exercise_3.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"247334799","text":"def posOfRightMostDiffBit(m,n):\n #Your code here\n if m==0 or n==0:\n res = m|n\n else:\n res = m^n\n c = 1\n while res!=0:\n if res&1 == 1:\n return c\n res>>=1\n print(res)\n c+=1\n return -1\n\nprint(posOfRightMostDiffBit(8,2))","sub_path":"rightmostbit.py","file_name":"rightmostbit.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"80582240","text":"#Zipfの法則\n#単語の出現頻度順位を横軸,その出現頻度を縦軸として,両対数グラフをプロットせよ.\n\n\n\nimport knock30\nimport knock36\nimport matplotlib.pyplot as plt\n\n\ndef plot_words_hist_log(counter, file):\n\n plt.figure()\n plt.xscale('log')\n plt.yscale('log')\n plt.plot(sorted(list(counter.values()), reverse=True), range(1, len(list(counter))+1))\n plt.savefig(file)\n\n\nif __name__ == '__main__':\n inputfile = 'neko.txt.mecab'\n outputfile = 'neko.mecab_words_hist_log.png'\n f = open(inputfile, 'r')\n words = []\n counts = []\n sentences = knock30.mecab_reader(f)\n counter = knock36.count_word(sentences)\n plot_words_hist_log(counter, outputfile)\n f.close()","sub_path":"yuting/chapter04/knock39.py","file_name":"knock39.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"631620991","text":"import os\n\nfrom api import session_oracle, sentry\nfrom api.patrimony.models.type_patrimony import TypePatrimony\n\n\ndef select_type_patrimony():\n return session_oracle.query(TypePatrimony).all()\n\n\ndef select_id_patrimony_by_type(cursor, type_patrimony):\n query = \"\"\"SELECT * FROM \"\"\" + \\\n os.environ.get('ORACLE_DB_NAME', '') + \\\n \"\"\".bc_tipo_patrimonio WHERE tipo_patrimonio = :TYPE_PATRIMONY\"\"\"\n try:\n cursor.execute(query, {\n \"TYPE_PATRIMONY\": type_patrimony\n })\n return cursor.fetchone()\n except Exception:\n sentry.captureException()\n return None\n\n\ndef exec_proc_insert_patrimony(cursor, id_person, id_type_patrimony, description_patrimony, value_patrimony):\n try:\n # conn = session_oracle.bind.raw_connection()\n # cursor = conn.cursor()\n cursor.callproc(os.environ.get('ORACLE_DB_NAME', '') + \".pbcapimanipulapatrimonio.insere\",\n parameters=[id_person, id_type_patrimony, description_patrimony, value_patrimony])\n # conn.commit()\n return True\n except Exception as e:\n # session_oracle.rollback()\n sentry.captureException()\n return False\n","sub_path":"Allgoo/andbank-service/api/patrimony/business/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"127427698","text":"from .base import AutoFocusAPI\nfrom ..exceptions import ClientError\n\n\nclass ThreatIntelFactory(AutoFocusAPI):\n\n def _parse_response_data(self, resp_data):\n from ..models.tic import ThreatIntelCard\n return ThreatIntelCard(resp_data[\"json\"][\"indicator\"])\n\n def _get_tic_card(self, params):\n try:\n resp = self._api_fetch(\"/tic\", params=params)\n except ClientError as e:\n if e.response[\"status_code\"] == 404:\n raise ClientError(\"Threat Intel Summary card unavailable for sample\")\n raise\n\n return self._parse_response_data(resp)\n\n def get_tic_summary(self, sha256=None, ipv4=None, ipv6=None, domain=None, url=None, include_tags=True):\n \"\"\"\n Args:\n sha256 (str): sample sha256 to pull indicator summary for\n ipv4 (str): ipv4 address to pull indicator summary for\n ipv6 (str): ipv6 address to pull indicator summary for\n domain (str): domain to pull indicator summary for\n url (str): url to pull indicator summary for\n include_tags (Optional[bool]): include AF tag data in response (default True)\n\n Returns:\n ThreatIntelCard: Object containing summary information\n\n Raises:\n ClientError: In the case that the client did something unexpected\n ServerError: In the case that the server did something unexpected\n \"\"\"\n params = {\n \"includeTags\": include_tags\n }\n\n if not any([sha256, ipv4, ipv6, domain, url]):\n raise ClientError(\"you must provide one of: sha256, ipv4, ipv6, domain, url\")\n\n if sha256:\n params.update({\n \"indicatorType\": \"FILEHASH\",\n \"indicatorValue\": sha256\n })\n elif ipv4:\n params.update({\n \"indicatorType\": \"ipv4_address\",\n \"indicatorValue\": ipv4\n })\n elif ipv6:\n params.update({\n \"indicatorType\": \"ipv6_address\",\n \"indicatorValue\": ipv6\n })\n elif url:\n params.update({\n \"indicatorType\": \"URL\",\n \"indicatorValue\": url\n })\n elif domain:\n params.update({\n \"indicatorType\": \"DOMAIN\",\n \"indicatorValue\": domain\n })\n else:\n raise NotImplementedError(\"Unexpected indicator type provided\")\n\n return self._get_tic_card(params)\n","sub_path":"autofocus/factories/tic.py","file_name":"tic.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"605745821","text":"# CSV Cleaner \n# Used to remove unessecery data from a csv before it is used to train a model\n# Will return the CSV as a dataframe\n# Lee O'Connell 2019\n\nimport pandas as pd\nimport numpy as np\nimport re\n\n\n\n\nclass Clean:\n def __init__(self, csv):\n self.csv = csv\n try:\n self.df = pd.read_csv(csv)\n print(\"{} lines read.\".format(self.df.size))\n except:\n # print(self.df.head(5))\n print(\"Erorr reading {}, please try again.\".format(csv))\n \n def show(self, n):\n print(self.df.head(n))\n \n def remove_col(self, columns):\n # need to add try execpt on this to avoid incorrect inputs\n print(columns)\n column_list = columns.split(',')\n count = 0 \n for column in column_list:\n try:\n self.df = self.df.drop([column], axis=1)\n count += 1 \n except: \n print(\"Incorrect parameters, try again.\")\n return\n \n print(\"{} columns removed.\".format(count))\n\n\n def remove_na(self):\n self.df.dropna()\n for column in self.df.columns:\n try:\n if(self.df[column].dtype == np.object):\n print(column)\n print(self.df[column].dtype)\n column = \"'{}'\".format(column)\n print(column)\n self.df[column] = self.df[column].astype('str')\n print(self.df[column].dtype)\n self.df[column] = self.df.apply(lambda x: x.lower())\n self.df[column] = self.df.apply(lambda x: re.sub('[^a-zA-z0-9\\s]','',x))\n except:\n print(\"skip\")\n continue\n \n# test code\ntest = Clean(\"/Users/leeoconnell/Documents/Python Projects/AI Projects/CSV_cleaner/top_100_canadian_beers.csv\")\n \ntest.remove_col(\"name,rank,style\")\ntest.show(5)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"23014968","text":"\"\"\"\n Файл, который управляет запуском сервисов\n\"\"\"\n# Необходимые библиотеки\nfrom framework.apploader import AppLauncher # Загрузчик приложения\n\n# Запускаем приложение\nif __name__ == \"__main__\":\n app = AppLauncher()\n try:\n app.start()\n except KeyboardInterrupt:\n app.stop()\n else:\n print(\"Somthing wrong...\")\n app.stop()\n\nprint(\"\\nService '{}' was down!\\n\".format(app.cmd.module_name))\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"397755498","text":"from unittest import TestCase\nimport pandas as pd\nimport os\nfrom hydrocomp.iha.iha import IHA\n\n\nclass TestIHA(TestCase):\n\n path = os.path.abspath(os.path.join('Medicoes', 'dadosXingo_nat.csv'))\n data = pd.read_csv(path, ',', index_col=0, parse_dates=True)\n\n iha_obj_nat = IHA(data, month_water=1, status='pre', statistic='non-parametric', central_metric='mean',\n variation_metric='cv', type_criterion=None, type_threshold=\"stationary\", duration=0,\n threshold_high=4813, threshold_low=569.5, source='ONS', station='XINGO')\n\n @staticmethod\n def read_iha(file):\n path = os.path.abspath(os.path.join('test_data', file))\n data = pd.read_csv(path, ';', index_col=0)\n return data\n\n def test(self, data, data2):\n for i in data.index:\n self.assertEqual(data.Means[i], data2.Means[i])\n self.assertEqual(data['Coeff. of Var.'][i], data2['Coeff. of Var.'][i])\n\n def test_mean_month(self):\n data = self.read_iha('Group1.csv')\n data_group, data2 = self.iha_obj_nat.magnitude()\n print(data_group)\n print(data2)\n self.test(data, data2)\n\n def test_moving_averages(self):\n data = self.read_iha('Group2.csv')\n data_group, data2 = self.iha_obj_nat.magnitude_and_duration()\n print(data)\n print(data2)\n self.test(data, data2)\n\n def test_year_water(self):\n year_water = self.iha_obj_nat.get_month_start()\n self.assertEqual((9, 'AS-SEP'), year_water, 'Year Water: %s, %s' % (9, 'SEP'))\n\n def test_days_julian(self):\n data = self.read_iha('Group3.csv')\n data_group, data2 = self.iha_obj_nat.timing_extreme()\n print(data)\n print(data2)\n self.test(data, data2)\n\n def test_pulse(self):\n data = self.read_iha('Group4.csv')\n data_group, data2, partial_high_nat, partial_low_nat = self.iha_obj_nat.frequency_and_duration()\n print(data)\n print(data2)\n self.test(data, data2)\n\n def test_rise_fall(self):\n data = self.read_iha('Group5.csv')\n data_group, data2 = self.iha_obj_nat.rate_and_frequency()\n self.test(data, data2)\n","sub_path":"hydrocomp/iha/test_iha.py","file_name":"test_iha.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"496396228","text":"import numpy as np\nimport re\n\nfilestart_g1_nodust= \"C:\\\\Users\\\\Kristiina\\\\Documents\\\\GitHub\\\\Cosmic-Reionization\\\\Data\\\\GalaxySpectraTrain\\\\Shimizu_z7_Geneva_lowZextended_z7_cov\"\nfilestart_g2_nodust= \"C:\\\\Users\\\\Kristiina\\\\Documents\\\\GitHub\\\\Cosmic-Reionization\\\\Data\\\\GalaxySpectraTest\\\\Shimizu_New_z7_Geneva_lowZextended_z7_cov\"\nfilestart_g2_dust= \"C:\\\\Users\\\\Kristiina\\\\Documents\\\\GitHub\\\\Cosmic-Reionization\\\\Data\\\\GalaxySpectraTrainDust\\\\Shimizu_New_z7_Geneva_lowZextended_z7_cov\"\nfileend_g1_nodust = \"_int4_nodust_ForML.txt\"\nfileend_g2_nodust = \"_int4_nodust_ML.txt\"\nfileend_g2_dust = \"_int4_dust43_ML.txt\"\n\nwavelengths = np.array([])\ng1_nodust = np.array([])\ng2_nodust = np.array([])\ng2_dust = np.array([])\n\n######################### Read training data #################\n\n\t\n\t\n\t\nx = []\ng = -1\nwl = []\t\nfor i in range(1100):\n\tx.append([])\n\nfor i in np.arange(0,1.1,0.1):\n\tfilename = filestart_g1_nodust + str(i) + fileend_g1_nodust\n\twith open(filename) as infile:\n\t\tfor line in infile:\n\t\t\tif len(re.split(' |x',line)) == 2:\n\t\t\t\tif g == 1:\n\t\t\t\t\twl.append(float(line.split()[0]))\n\t\t\t\tx[g].append(float(line.split()[1]))\n\t\t\telif line.partition(' ')[0] == \"SPH\":\n\t\t\t\tg = g+1\n\ng1_nodust = np.array(x)\nwavelengths = np.array(wl)\n#############################################################\t\t\t\t\n\t\t\t\t\n\n\n######################### Read test data #################\n\n\t\t\nxt = []\ng = -1\n\nfor i in range(1100):\n\txt.append([])\n\nfor i in np.arange(0,1.1,0.1):\n\tfilename = filestart_g2_nodust+str(i)+fileend_g2_nodust\n\twith open(filename) as infile:\n\t\tfor line in infile:\n\t\t\tif len(re.split(' |x',line)) == 2:\n\t\t\t\tif float(line.split()[0]) in wavelengths:\n\t\t\t\t\txt[g].append(float(line.split()[1]))\n\t\t\telif line.partition(' ')[0] == \"SPH\":\n\t\t\t\tg = g+1\n\t\t\t\t\n\ng2_nodust = np.array(xt)\n\n###############################################################\n\n\n\n######################### Read train data with dust #################\n\nx = []\ng = -1\nwl = []\t\nfor i in range(1100):\n\tx.append([])\n\nfor i in np.arange(0,1.1,0.1):\n\tfilename = filestart_g2_dust + str(i) + fileend_g2_dust\n\twith open(filename) as infile:\n\t\tfor line in infile:\n\t\t\tif len(re.split(' |x',line)) == 2:\n\t\t\t\tif g == 1:\n\t\t\t\t\twl.append(float(line.split()[0]))\n\t\t\t\tx[g].append(float(line.split()[1]))\n\t\t\telif line.partition(' ')[0] == \"SPH\":\n\t\t\t\tg = g+1\n\ng2_dust = np.array(x)\n#############################################################\n\n","sub_path":"Code/galaxy_parser.py","file_name":"galaxy_parser.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505039760","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 25 14:27:28 2015\n\n@author: patriciawiharto\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.lda import LDA\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nimport pandas as pd\n\n\n########################################################################\n# UNIT 4 LESSON 7 PART 2\n# PCA\n########################################################################\n\n### LOAD DATA\niris = datasets.load_iris()\nx = iris.data\ny = iris.target\nspecies = iris.target_names\ndf = pd.DataFrame(x, columns = ['SL', 'SW', 'PL', 'PW'])\ndf['Species'] = y\n#print species\n\n### PCA\npca = PCA(n_components = 2)\nreduced_x1 = pca.fit_transform(x)\n#print reduced_x1\n\n### LDA\nlda = LDA(n_components = 2)\nreduced_x2 = lda.fit_transform(x, y)\n#print reduced_x2\n\nprint('explained variance ratio (first two components): %s'\n % str(pca.explained_variance_ratio_))\n\n### PLOT ORIGINAL AND PCA SAMPLES\n# ORIGINAL\n#plt.figure(figsize = (8,6))\nplt.plot(x[0:51, 2], x[0:51, 3], 'ro', label = 'setosa')\nplt.plot(x[51:101, 2], x[51:101, 3], 'go', label = 'versicolor')\nplt.plot(x[101:, 2], x[101:, 3], 'bo', label = 'virginica')\nplt.legend(loc = 'best')\nplt.title('Raw Plot of Iris Dataset')\nplt.show()\n\n# PCA\nfor c, i, s in zip('rgb', [0, 1, 2], species):\n plt.scatter(reduced_x1[y == i, 0], reduced_x1[y == i, 1], c = c, label = s)\nplt.legend()\nplt.title('PCA of Iris Data')\nplt.show()\n\n### KNN USING RAW DATA\n## Predict the best number of neighbours\ntrain, test = train_test_split(df)\n\nfor k in range(5, 100, 5):\n model = KNeighborsClassifier(k)\n model.fit(train[:, :4], train[:, 4])\n # Make predictions\n expected = test[:, 4]\n predicted = model.predict(test[:, :4])\n # Misclassification rate\n error_rate = (predicted != expected).mean()\n print('%d: %.2f' % (k, error_rate))\n ","sub_path":"reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"335478733","text":"#!/usr/bin/env python3\n'''\n解析 pubmed 导出的xml文件,并且转换成xlsx格式\n第二版本:\n\nElementTree 对象的使用:\n两个对象, ElelmentTree 和 Element对象\nElementTree对象是整个XML的对象, 一般使用parse 方法解析整个xml文件,使用 getroot()方法获得根节点\nElement 对象包含以下三个层面的知识点:\n1. 属性:\ntag: 元素名称\nattrib: 属性 遵循 Key:Value 的字典格式, 有 Keys() Items() 以及get(key, default=\"\") 方法\ntext: 文本元素\n其他子元素\n\n2. 针对属性的操作(仅针对读取操作)\nkeys()\nitems()\nget()\n\n3. 针对后代子元素操作\nfind()\n返回tag匹配的下一级别子元素,如果有多个子元素共享一个tag,则返回第一个\n\nfindall\n返回tag匹配的下一级别子元素,如果有多个子元素共享一个tag,则列表形式返回所有\n\n\n'''\nimport sys\nimport xml.etree.ElementTree as ET\nimport pandas as pd\nimport numpy as np\nimport re\n\ndef main(xmlfile, out):\n\n tree = ET.parse(xmlfile)\n root = tree.getroot()\n author_dict = {} # key: last authr value:{\"count\":\"num of publications\",\"publication\":{pandas dataframe\"}\n df = pd.DataFrame(columns=[\"corrauthor\",\"affli_corrauthor\",\"PMID\", \"DOI\", \"Journal\", \"Journal_abbreviation\", \"Year\",\n \"First_author\", \"First_author_affiliation\", \"Title\", \"Abstract\",\"PubType\"])\n null_str = \"\"\n for record in root:\n \n PMID = null_str \n DOI = null_str \n Journal_name = null_str \n Jounral_abbr = null_str \n Year = null_str \n First_author = null_str \n First_author_Affiliation = null_str \n ArticleTitle = null_str \n Abstract = null_str \n PubType = null_str\n corrspondingauthor = null_str\n corrspondingauthor_aff = null_str\n line = {} \n \n if record.tag == \"PubmedArticle\":\n try:\n ePMID = record.find(\"MedlineCitation/PMID\")\n PMID = ePMID.text\n except:\n PMID = null_str\n try:\n '''\n 更新:\n 防止Title中出现特殊标记, 使用与Abstact相同处理的方式\n '''\n eTitle = record.find(\"MedlineCitation/Article/ArticleTitle\")\n ArticleTitle = construct_full_text_sep( eTitle, sep = \" \")\n\n except:\n ArticleTitle = null_str\n try:\n ePubtype= record.find(\"MedlineCitation/Article/PublicationTypeList\")\n PubType = construct_full_text_sep(ePubtype,sep=\"\\n\")\n except:\n PubType = null_str\n try:\n eJournalName = record.find(\n \"MedlineCitation/Article/Journal/Title\")\n Journal_name = eJournalName.text\n except:\n Journal_name = null_str\n\n try:\n eJournal_abbr = record.find(\n \"MedlineCitation/Article/Journal/ISOAbbreviation\")\n Jounral_abbr = eJournal_abbr.text\n except:\n Jounral_abbr = null_str\n\n try:\n Abstract = \"\"\n eAbstract = record.findall(\n \"MedlineCitation/Article/Abstract/AbstractText\")\n for i in eAbstract:\n if \"Label\" in i.attrib.keys():\n Abstract = Abstract + str(i.attrib['Label']) + \"\\n\"\n Abstract = Abstract + construct_full_text_sep(i,sep = \" \") + \"\\n\"\n\n except:\n Abstract = null_str\n try:\n eFisrt_author = record.find(\n \"MedlineCitation/Article/AuthorList/Author\")\n First_author = eFisrt_author.find(\n \"ForeName\").text + \" \" + eFisrt_author.find(\"LastName\").text\n except:\n First_author = null_str\n\n try:\n ecorrpondingauthor = record.findall(\"MedlineCitation/Article/AuthorList/Author\")\n for i,data in enumerate(ecorrpondingauthor):\n if i == len(ecorrpondingauthor) -1:\n corrspondingauthor = data.find(\"ForeName\").text + \" \" + data.find(\"LastName\").text\n corrspondingauthor_aff = data.find(\"AffiliationInfo/Affiliation\").text\n except:\n pass \n\n\n try:\n eFirst_author_Affiliation = record.find(\n \"MedlineCitation/Article/AuthorList/Author\")\n First_author_Affiliation = eFirst_author_Affiliation.find(\n \"AffiliationInfo/Affiliation\").text\n except:\n First_author_Affiliation = null_str\n try:\n eDOI = record.findall(\"MedlineCitation/Article/ELocationID\")\n for i in eDOI:\n if i.attrib[\"EIdType\"] == \"doi\":\n DOI = i.text\n else:\n DOI = null_str\n except:\n DOI = null_str\n try:\n eYear = record.find(\"MedlineCitation/Article/Journal/JournalIssue/PubDate/Year\")\n Year = eYear.text\n except:\n Year = null_str\n line = {\"corrauthor\":corrspondingauthor,\"affli_corrauthor\":corrspondingauthor_aff ,\"PMID\": PMID, \"DOI\": DOI, \"Journal\": Journal_name, \"Journal_abbreviation\": Jounral_abbr, \"Year\": Year, \"First_author\": First_author,\n \"First_author_affiliation\": First_author_Affiliation, \"Title\": ArticleTitle, \"Abstract\": Abstract,\"PubType\":PubType} # 在pandas中添加1列的方法,构建一个字典.\n \n df = df.append(line, ignore_index=True)\n\n df.sort_values(by=\"corrauthor\",inplace=True)\n df.to_excel(out, index=False)\n\n\ndef construct_full_text_sep(et,sep=\"\"):\n ''' et is element, not list'''\n tmp = []\n for i in et.itertext():\n if not re.match(r\"^\\n\",i):\n tmp.append(i.strip())\n return(sep.join(tmp))\n\nif __name__ == \"__main__\":\n #main(\"/Disk168G/sunyl_workspace/pubmed/essential_thrombocythemia.xml\",\"/Disk168G/sunyl_workspace/pubmed/essential_thrombocythemia2.xlsx\")\n main(\"/Disk168G/libzhengxc/libzhengxc/test/plosbio_bioinfor_BMC_bioinfor.xml\",\"/Disk168G/libzhengxc/libzhengxc/test/pubmed_Bioinformatcs_plos_BMC_corrauthor.xlsx\")\n \n","sub_path":"scripts/Parse_PubmedXml_Journal_bioinformatcs_plos_BMC.py","file_name":"Parse_PubmedXml_Journal_bioinformatcs_plos_BMC.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"606375939","text":"def sendMail():\r\n global wnd, mailEntry\r\n\r\n tkinter.messagebox.showinfo(\"정보\", \"지도에 표시된 지역의 정보를 메일로 전송합니다.\\n 만약, 가게를 선택하지 않았다면 먼저 선택해주세요.\")\r\n wnd = Tk()\r\n TempFont = font.Font(wnd, size=15, weight='bold', family='Consolas')\r\n\r\n Label(wnd, font=TempFont, width=10, borderwidth=12, text=\"받는 사람\").grid(row=0, column=0)\r\n mailEntry = Entry(wnd, font=TempFont, width=40, borderwidth=10, relief='sunken')\r\n mailEntry.grid(row=0, column=1)\r\n\r\n send = Button(wnd, font=TempFont, text=\"보내기\", command=Send_To_user)\r\n send.grid(row=1, column=1, sticky='e')\r\n\r\ndef Send_To_user():\r\n global wnd, mailEntry, MapEntry\r\n\r\n import smtplib\r\n from email.mime.text import MIMEText # 본문내용을 전송할 때 사용되는 모듈\r\n from email.mime.multipart import MIMEMultipart # 메시지를 보낼 때 메시지에 대한 모듈\r\n\r\n\r\n if not MapEntry.get().isdecimal():\r\n tkinter.messagebox.showerror(\"에러\", \"가게를 선택해주세요.\")\r\n wnd.destroy()\r\n return\r\n\r\n msg = MIMEMultipart('alternative')\r\n\r\n host = \"smtp.gmail.com\"\r\n port = \"587\"\r\n title = \"패스트푸드점 검색 결과\"\r\n num = int(MapEntry.get())\r\n\r\n senderAddr = \"moss2223@gmail.com\"\r\n password = \"비밀번호\"\r\n recipientAddr = mailEntry.get()\r\n\r\n if DataList[num][1] is None:\r\n dataAddr = DataList[num][2]\r\n else:\r\n dataAddr =DataList[num][1]\r\n\r\n msgtext = \"시설명: \" + DataList[num][0] + \"\\n\" + \"주소: \" + dataAddr \\\r\n + \"\\n\"\r\n\r\n msg['Subject'] = title\r\n msg['From'] = senderAddr\r\n msg['To'] = recipientAddr\r\n\r\n msgPart = MIMEText(msgtext, 'plain')\r\n msg.attach(msgPart)\r\n\r\n s = smtplib.SMTP(host, port)\r\n s.ehlo()\r\n s.starttls()\r\n s.ehlo()\r\n s.login(senderAddr, password)\r\n s.sendmail(senderAddr, [recipientAddr], msg.as_string(()))\r\n s.close()\r\n wnd.destroy()\r\n\r\n tkinter.messagebox.showinfo(\"메일 보내기\", \"메일 전송을 완료하였습니다.\")\r\n\r\ndef Init_Mail():\r\n global mailButton\r\n TempFont = font.Font(g_Tk, size=15, weight='bold', family='Consolas')\r\n\r\n mailButton = Button(g_Tk, font=TempFont, text=\"Gmail\", command=sendMail)\r\n mailButton.place(x=w/2- 50, y=h/10*9)\r\n","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182281907","text":"##\n# This module defines extensions to the graphics module for drawing\n# a pie chart and a chart legend.\n#\n\n## Draws a pie chart on a canvas inside an invisible bounding square.\n# @param x x-coord of the upper-left corner of the bounding square\n# @param y y-coord of the upper-left corner of the bounding square\n# @param diameter the diameter of the bounding square\n# @param slices a list of dictionaries that specify the \"size\" and\n# \"color\" of each slice.\n# @param canvas the canvas on which to draw the pie chart\n#\ndef drawPieChart(x, y, diameter, slices, canvas) :\n startAngle = 0\n for piece in slices :\n extent = 360 * piece[\"size\"]\n canvas.setColor(piece[\"color\"])\n canvas.drawArc(x, y, diameter, startAngle, extent)\n startAngle = startAngle + extent\n \n## Draws a legend, consisting of a colored box and text, on a canvas.\n# @param x x-coord of the starting position of the entries\n# @param y y-coord of the top position of the first entry\n# @param entries a list of dictionaries that specify the information \n# for each entry: \"color\", \"label\", \"size\"\n# @param canvas the canvas on which to draw the legend\n#\ndef drawLegend(x, y, entries, canvas) :\n for entry in entries :\n canvas.setColor(entry[\"color\"])\n canvas.drawRect(x, y, 10, 10)\n canvas.setColor(\"black\")\n text = entry[\"label\"] + \" (%.1f%%)\" % (entry[\"size\"] * 100)\n canvas.drawText(x + 15, y, text)\n y = y + 20 \n","sub_path":"P4EO_source/ch08/worked_example_3/piechart.py","file_name":"piechart.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"13892303","text":"import threading\nimport time\nimport os\nimport datetime as dt\nimport logging\nimport uuid\nfrom config import HISTORY_DIR, TEST_HISTORY_DIR, SESSION_REMOVE_TIMEOUT, SESSION_RESET_TIMEOUT\nfrom response_cache import ResponseCache\n\nlogger = logging.getLogger('hr.chatbot.server.session')\n\nclass SessionData(object): pass\n\nclass Session(object):\n def __init__(self, sid):\n self.sid = sid\n self.sdata = SessionData()\n self.cache = ResponseCache()\n self.created = dt.datetime.now()\n self.init = self.created\n self.characters = []\n dirname = os.path.join(HISTORY_DIR, self.created.strftime('%Y%m%d'))\n test_dirname = os.path.join(TEST_HISTORY_DIR, self.created.strftime('%Y%m%d'))\n self.fname = os.path.join(dirname, '{}.csv'.format(self.sid))\n self.test_fname = os.path.join(test_dirname, '{}.csv'.format(self.sid))\n self.dump_file = None\n self.removed = False\n self.active = False\n self.last_active_time = None\n self.test = False\n\n def set_test(self, test):\n if test:\n logger.info(\"Set test session\")\n self.test = test\n\n def add(self, question, answer, **kwargs):\n if not self.removed:\n self.cache.add(question, answer, **kwargs)\n self.last_active_time = self.cache.last_time\n self.active = True\n return True\n return False\n\n def rate(self, rate, idx):\n return self.cache.rate(rate, idx)\n\n def reset(self):\n self.active = False\n self.dump()\n self.cache.clean()\n self.init = dt.datetime.now()\n for c in self.characters:\n try:\n c.refresh(self.sid)\n except NotImplementedError:\n pass\n\n def check(self, question, answer, lang):\n return self.cache.check(question, answer, lang)\n\n def dump(self):\n if self.test:\n self.dump_file = self.test_fname\n else:\n self.dump_file = self.fname\n return self.cache.dump(self.dump_file)\n\n def get_session_data(self):\n return self.sdata\n\n def since_idle(self, since):\n if self.last_active_time is not None:\n return (since - self.last_active_time).total_seconds()\n else:\n return (since - self.created).total_seconds()\n\n def __repr__(self):\n return \"\".format(\n self.sid, self.init, self.cache.last_time)\n\nclass Locker(object):\n def __init__(self):\n self._lock = threading.RLock()\n\n def lock(self):\n self._lock.acquire()\n\n def unlock(self):\n self._lock.release()\n\nclass SessionManager(object):\n\n def __init__(self, auto_clean=True):\n self._sessions = dict()\n self._users = dict()\n self._locker = Locker()\n self._session_cleaner = threading.Thread(\n target=self._clean_sessions, name=\"SessionCleaner\")\n self._session_cleaner.daemon = True\n if auto_clean:\n self._session_cleaner.start()\n\n def _threadsafe(f):\n def wrap(self, *args, **kwargs):\n self._locker.lock()\n try:\n return f(self, *args, **kwargs)\n finally:\n self._locker.unlock()\n return wrap\n \n @_threadsafe\n def remove_session(self, sid):\n if sid in self._sessions:\n session = self._sessions.pop(sid)\n session.dump()\n session.removed = True\n del session\n logger.info(\"Removed session {}\".format(sid))\n\n def reset_session(self, sid):\n if sid in self._sessions:\n session = self._sessions.get(sid)\n if session.active:\n session.reset()\n logger.info(\"Resetted session {}\".format(sid))\n\n def get_session(self, sid):\n if sid is not None:\n return self._sessions.get(sid, None)\n\n def get_sid(self, user):\n if user in self._users:\n sid = self._users.get(user)\n session = self._sessions.get(sid)\n if session:\n return sid\n\n def gen_sid(self):\n return str(uuid.uuid1())\n\n @_threadsafe\n def add_session(self, user, sid):\n if sid in self._sessions:\n return False\n self._sessions[sid] = Session(sid)\n self._users[user] = sid\n return True\n\n def start_session(self, user, test=False):\n _sid = self.get_sid(user)\n if not _sid:\n _sid = self.gen_sid()\n self.add_session(user, _sid)\n session = self.get_session(_sid)\n assert(session is not None)\n session.set_test(test)\n return _sid\n\n def has_session(self, sid):\n return sid in self._sessions\n\n def _clean_sessions(self):\n while True:\n reset_sessions, remove_sessions = [], []\n since = dt.datetime.now()\n for sid, s in self._sessions.iteritems():\n if SESSION_RESET_TIMEOUT < s.since_idle(since) < SESSION_REMOVE_TIMEOUT:\n reset_sessions.append(sid)\n if s.since_idle(since) > SESSION_REMOVE_TIMEOUT:\n remove_sessions.append(sid)\n for sid in reset_sessions:\n self.reset_session(sid)\n for sid in remove_sessions:\n self.remove_session(sid)\n time.sleep(0.1)\n\nclass ChatSessionManager(SessionManager):\n def __init__(self, auto_clean=True):\n super(ChatSessionManager, self).__init__(auto_clean)\n\n def dump_all(self):\n fnames = []\n for sid, sess in self._sessions.iteritems():\n if sess and sess.dump():\n fnames.append(sess.dump_file)\n return fnames\n\n def dump(self, sid):\n fname = None\n sess = self._sessions.get(sid)\n if sess and sess.dump():\n fname = sess.dump_file\n return fname\n","sub_path":"src/chatbot/src/chatbot/server/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"503593485","text":"#2.Напишіть програму, яка пропонує користувачу ввести свій вік, \n#після чого виводить повідомлення про те чи вік є парним чи непарним числом. \n#Необхідно передбачити можливість введення від’ємного числа, в цьому випадку \n#згенерувати власну виняткову ситуацію. Головний код має викликати функцію, \n#яка обробляє введену інформацію.\n\nclass CustomError(Exception): \n def __init__(self, data): \n self.data = data\n def __str__(self):\n return repr(self.data) \n\ndef age ():\n try:\n userNumber = int(input(\"Enter your age \"))\n if userNumber < 0:\n raise CustomError(\"That is not a positive number!\")\n if userNumber == 0:\n raise ValueError(\"Zero not allowed!\")\n if userNumber % 2 == 0:\n print(\"This age is EVEN - {}\".format(userNumber))\n if userNumber % 2 != 0:\n print(\"This age is ODD - {}\".format(userNumber)) \n except ValueError as e:\n print(e)\n except CustomError as d:\n print(d) \n finally:\n print(\"You won!\") \n\nage() \n\n\n\n\n","sub_path":"ClassWork_9/oop_9_2.py","file_name":"oop_9_2.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"616872823","text":"import os\n\nfrom distutils.core import Command\nimport setuptools\nfrom setuptools.command.test import test\n\n\ndef read(filename):\n return open(os.path.join(os.path.dirname(__file__), filename)).read()\n\n\ndef get_version():\n with open('VERSION', 'r') as f:\n return f.readline().rstrip().split('=')[1]\n\n\ndef get_tarball_filename():\n return 'django-rest-sessions-{}.tar.gz'.format(get_version())\n\n\nclass TestXMLOutput(test):\n @staticmethod\n def _resolve_as_ep(val):\n from xmlrunner import XMLTestRunner\n if val is None:\n return XMLTestRunner(output='test-reports')\n return test._resolve_as_ep(val)\n\npackages = ['rest_sessions']\n\nsetup_kwargs = dict(\n name='django-rest-sessions',\n version=get_version(),\n description= \"Rest Session Backend For Django\",\n long_description=read(\"README.rst\"),\n keywords='django, sessions,',\n author='Hodur Sigurdor Heidarsson',\n author_email='hodur@temposoftware.com',\n url='https://stash.temposoftware.com/projects/TMC/repos/django-rest-sessions/browse',\n license='MIT',\n packages=packages,\n zip_safe=False,\n install_requires=['Django >= 1.8', 'requests>=2.7.0'],\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Intended Audience :: Developers\",\n \"Framework :: Django\",\n \"Environment :: Web Environment\",\n ],\n test_suite='tests',\n cmdclass={\n 'xml_test': TestXMLOutput\n }\n)\n\nsetuptools.setup(**setup_kwargs)\n","sub_path":"pypi_install_script/django-rest-sessions-0.2.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216950804","text":"\ntry:\n from argparse import ArgumentParser\n from argcomplete import autocomplete\nexcept:\n err = \"\"\"\n You haven't installed the required dependencies.\n \"\"\"\n print(err)\n sys.exit(0)\n\n\n\nclass Args:\n def validate_crop_args(args):\n return\n\n def _is_valid_file(parser, arg):\n if not os.path.exists(arg):\n parser.error(\"The file %s does not exist!\" % arg)\n else:\n return arg\n\n def get_parser():\n parser = ArgumentParser()\n parser.add_argument(\"inp\",nargs='?',\n type=lambda x: Args._is_valid_file(trim_parser,x),\n help=\"input video file ex: input.mp4\")\n subparsers = parser.add_subparsers(dest='action')\n\n trim_parser = subparsers.add_parser('trim')\n trim_parser.add_argument(\"-s\", \"--start_time\", help=\"start time for cuting in format hh:mm:ss or mm:ss\")\n time_group = trim_parser.add_mutually_exclusive_group()\n time_group.add_argument(\"-e\", \"--end_time\", help=\"end time for cuting in format hh:mm:ss or mm:ss\")\n time_group.add_argument(\"-t\", \"--time\", help=\"clip duration in format hh:mm:ss or mm:ss\")\n trim_parser.add_argument(\"-o\", \"--output\",help=\"output file name, ex: output.mp4\")\n\n crop_parser = subparsers.add_parser('crop')\n crop_parser.add_argument(\"-w\", \"--width\",type=int,default=100,help=\"crop video window width (on scale of 100)\")\n crop_parser.add_argument(\"-l\", \"--height\",type=int,default=100,help=\"crop video window height (on scale of 100)\")\n crop_parser.add_argument(\"-x\", \"--x_point\",type=int,default=0,help=\"crop video window top-left points x (on scale of 100)\")\n crop_parser.add_argument(\"-y\", \"--y_point\",type=int,default=0,help=\"crop video window top-left points y (on scale of 100)\")\n crop_parser.add_argument(\"-o\", \"--output\",help=\"output file name, ex: output.mp4\")\n\n resize_parser = subparsers.add_parser('resize')\n resize_parser.add_argument(\"-q\", \"--quality\",type=int,default=100,help=\"output video quality (on scale of 100)\")\n resize_parser.add_argument(\"-o\", \"--output\",help=\"output file name, ex: output.mp4\")\n\n\n autocomplete(parser)\n parser = parser.parse_args()\n return parser\n","sub_path":"medipack/lib/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"130650723","text":"class RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity #the fixed max-size, which in test is 5\n self.buffer = [] #the values in the buffer array\n self.index_targeted = 0 #the index of the item that will be popped out of the array, initialized at 0 because once array is at capacity, its going to overwrite the 0 index first\n\n\n #FIFO, first in first out\n #Queue with a fixed max-size\n def append(self, item):\n #check if the length of buffer array is less than the capacity allowed (fixed max-size)\n if len(self.buffer) < self.capacity:\n #if length is smaller, can go ahead and add item to array (up to 5 items in this case)\n self.buffer.append(item)\n\n #if the length of buffer array is at capacity, need to overwrite the oldest item with the newest item\n elif len(self.buffer) == self.capacity:\n #and if the index is the same as capacity, 5, \n if self.index_targeted == self.capacity:\n #then we are going to set the index 5 back to 0\n self.index_targeted = 0\n #we pop the item out at the index specified\n self.buffer.pop(self.index_targeted)\n #at the location of the index specified, insert the item \n self.buffer.insert(self.index_targeted, item)\n #increase the index by 1 each time so that it keeps going in a loop. It won't go forever because it will reach capacity once its at 5\n self.index_targeted += 1\n\n def get(self):\n return self.buffer\n\n\n#--- testing ---#\nbuffer = RingBuffer(5)\n\nprint(buffer.get())\n\n(buffer.append('a'))\n(buffer.append('b'))\n(buffer.append('c'))\n(buffer.append('d'))\n(buffer.append('e'))\n(buffer.append('f'))\n(buffer.append('g'))\n\nprint(buffer.get())","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"175874452","text":"\"\"\"Pyramid views\"\"\"\nimport json\nfrom bson import json_util\nfrom datetime import datetime\nfrom bson.json_util import JSONOptions\nfrom dateutil import parser\nfrom pyramid.view import view_config, view_defaults\nfrom bills_paid.mongo import MongoClient\n\n\n@view_defaults(route_name='apiAccount', renderer='json')\nclass AccountApi(object):\n\t\"\"\"API methods for /account\"\"\"\n\tdef __init__(self, request):\n\t\tself.request = request\n\t\tself.mongo_client = MongoClient()\n\n\t@view_config(route_name='apiAccountCreate', request_method='POST')\n\tdef create_account(self):\n\t\t\"\"\"Creates a new account\"\"\"\n\t\tres = json.loads(self.request.body)\n\t\tself.mongo_client.create_account(\n\t\t\t{\n\t\t\t\t'Name': res['Name'],\n\t\t\t\t'DayOfMonth': res['DayOfMonth'],\n\t\t\t\t'Amount': res['Amount'],\n\t\t\t\t'Active': res['Active']\n\t\t\t})\n\t\treturn {'Success': True}\n\n\t# This needs to be updated\n\t# If an account was used in a bill, it should deny deletion\n\t@view_config(route_name='apiAccountDelete', request_method='DELETE')\n\tdef delete_account(self):\n\t\t\"\"\"Deletes an existing account\"\"\"\n\t\taccount_id = self.request.matchdict[\"accountId\"]\n\n\t\tif self.mongo_client.count_bills_for_account(account_id):\n\t\t\treturn {'Success': False, 'Message': 'Account appears in a billing month'}\n\n\t\tself.mongo_client.delete_account(account_id)\n\t\treturn {'Success': True}\n\n\t@view_config(request_method='GET')\n\tdef get_accounts(self):\n\t\t\"\"\"Retrieve all accounts\"\"\"\n\t\treturn [\n\t\t\tjson.dumps\n\t\t\t(\n\t\t\t\taccount,\n\t\t\t\tdefault=json_util.default\n\t\t\t) for account in self.mongo_client.get_all_accounts()\n\t\t]\n\n\t@view_config(route_name='apiAccountCount', request_method='GET')\n\tdef get_accounts_count(self):\n\t\t\"\"\"Retrieve number of accounts\"\"\"\n\t\treturn json.dumps(self.mongo_client.get_accounts_count(), default=json_util.default)\n\n\t@view_config(route_name='apiAccountUpdate', request_method='PUT')\n\tdef update_account(self):\n\t\t\"\"\"Updates an existing account\"\"\"\n\t\taccount_id = self.request.matchdict[\"accountId\"]\n\t\tres = json.loads(self.request.body)\n\t\tself.mongo_client.update_account(\n\t\t\taccount_id,\n\t\t\t{\n\t\t\t\t'Name': res['Name'],\n\t\t\t\t'DayOfMonth': res['DayOfMonth'],\n\t\t\t\t'Amount': res['Amount'],\n\t\t\t\t'Active': res['Active']\n\t\t\t}\n\t\t)\n\t\treturn {'Success': True}\n\n\n@view_defaults(route_name='billsPaidApi', renderer='json')\nclass BillsPaidApi(object):\n\t\"\"\"API methods for /billspaid\"\"\"\n\tdef __init__(self, request):\n\t\tself.request = request\n\t\tself.mongo_client = MongoClient()\n\n\t@view_config(route_name=\"apiBillsCreate\", request_method=\"POST\")\n\tdef create_bill(self):\n\t\t\"\"\"Creates a line item for a bill\"\"\"\n\t\tres = json.loads(self.request.body)\n\t\tself.mongo_client.create_bill(res[\"Date\"], res['Amount'], res['Posted'], res['AccountId']['$oid'])\n\t\treturn {'Success': True}\n\n\t@view_config(route_name='apiBillsDelete', request_method='DELETE')\n\tdef delete_bill(self):\n\t\t\"\"\"Deletes an existing account\"\"\"\n\t\tbill_id = self.request.matchdict[\"billId\"]\n\t\tself.mongo_client.delete_bill(bill_id)\n\t\treturn {'Success': True}\n\n\t@view_config(route_name='apiBillsGetMonth', request_method='GET')\n\tdef get_billing_month(self):\n\t\t\"\"\"\n\t\t\tRetrieve bills for a specific month\n\t\t\tURL Input: /{date}: Any date within the month\n\t\t\tOutput: The full month object\n\t\t\"\"\"\n\t\tdate = self.request.matchdict[\"date\"]\n\t\tdate_parsed = parser.parse(date)\n\t\tbilling_months = self.mongo_client.get_billing_month(date_parsed.month, date_parsed.year)\n\n\t\taccounts = self.mongo_client.get_all_accounts()\n\t\taccounts_list = {}\n\t\tfor account in accounts:\n\t\t\taccounts_list[account['_id']] = account['Name']\n\n\t\tbills_paid = 0\n\t\tbills_pending = 0\n\n\t\t# Funky logic\n\t\tto_return = {}\n\t\tif billing_months:\n\t\t\tfor billing_month in billing_months:\n\t\t\t\tto_return = billing_month\n\t\t\t\tif 'Bills' in to_return:\n\t\t\t\t\tfor bill in to_return['Bills']:\n\t\t\t\t\t\tbill['AccountName'] = accounts_list[bill['AccountId']]\n\t\t\t\t\t\tif bill['Posted']:\n\t\t\t\t\t\t\tbills_paid += bill['Amount']\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbills_pending += bill['Amount']\n\n\t\tto_return['BillsPaid'] = bills_paid\n\t\tto_return['BillsPending'] = bills_pending\n\n\t\toptions = JSONOptions(datetime_representation=json_util.DatetimeRepresentation.ISO8601)\n\t\treturn json_util.dumps(to_return, json_options=options)\n\n\t@view_config(route_name='apiBillsGetUpcoming', request_method='GET')\n\tdef get_upcoming_bills(self):\n\t\tcurrent_bills = self.mongo_client.get_billing_month(datetime.now().month, datetime.now().year)\n\n\t\taccounts = list(self.mongo_client.get_active_accounts())\n\n\t\tfor current_bill in current_bills:\n\t\t\tif 'Bills' in current_bill:\n\t\t\t\tfor bill in current_bill['Bills']:\n\t\t\t\t\tfor account in accounts:\n\t\t\t\t\t\tif account['_id'] == bill['AccountId']:\n\t\t\t\t\t\t\taccount['Amount'] -= bill['Amount']\n\n\t\taccounts = [account for account in accounts if int(account['Amount']) > 0]\n\t\tbills_total = sum([account['Amount'] for account in accounts])\n\t\tto_return = {'Accounts': accounts, 'BillsTotal': bills_total}\n\n\t\toptions = JSONOptions(datetime_representation=json_util.DatetimeRepresentation.ISO8601)\n\t\treturn json_util.dumps(to_return, json_options=options)\n\n\t@view_config(route_name=\"apiBillsUpdate\", request_method=\"PUT\")\n\tdef update_bill(self):\n\t\t\"\"\"Creates and updates a line item for a bill\"\"\"\n\t\tres = json.loads(self.request.body)\n\t\tself.mongo_client.update_bill(\n\t\t\tres[\"Date\"],\n\t\t\tres['Amount'],\n\t\t\tres['Posted'],\n\t\t\tres['AccountId'],\n\t\t\tres['_id'],\n\t\t\tself.request.matchdict['billId'])\n\t\treturn {'Success': True}\n\n\n@view_defaults(route_name='apiPaycheck', renderer='json')\nclass PaycheckPaidApi(object):\n\t\"\"\"API methods for /billspaid\"\"\"\n\tdef __init__(self, request):\n\t\tself.request = request\n\t\tself.mongo_client = MongoClient()\n\n\t@view_config(route_name=\"apiPaycheckCreate\", request_method=\"POST\")\n\tdef create_paycheck(self):\n\t\t\"\"\"Creates a new paycheck\"\"\"\n\t\tres = json.loads(self.request.body)\n\t\tself.mongo_client.create_paycheck(\n\t\t\t{\n\t\t\t\t'Date': res['Date'],\n\t\t\t\t'Amount': res['Amount']\n\t\t\t})\n\t\treturn {'Success': True}\n\n\t@view_config(route_name='apiPaycheckDelete', request_method='DELETE')\n\tdef delete_paycheck(self):\n\t\t\"\"\"Deletes an existing paycheck\"\"\"\n\t\tpaycheck_id = self.request.matchdict[\"paycheckId\"]\n\n\t\tself.mongo_client.delete_paycheck(paycheck_id)\n\t\treturn {'Success': True}\n\n\t@view_config(route_name='apiPaycheck', request_method='GET')\n\tdef get_paychecks(self):\n\t\t\"\"\"Retrieve all paychecks\"\"\"\n\t\treturn [\n\t\t\tjson.dumps\n\t\t\t(\n\t\t\t\tpaycheck,\n\t\t\t\tdefault=json_util.default\n\t\t\t) for paycheck in self.mongo_client.get_all_paychecks()\n\t\t]\n\n\t@view_config(route_name=\"apiPaycheckUpdate\", request_method=\"PUT\")\n\tdef update_paycheck(self):\n\t\t\"\"\"Updates an existing paycheck\"\"\"\n\t\tpaycheck_id = self.request.matchdict[\"paycheckId\"]\n\t\tres = json.loads(self.request.body)\n\t\tself.mongo_client.update_paycheck(\n\t\t\tpaycheck_id,\n\t\t\t{\n\t\t\t\t'Date': res['Date'],\n\t\t\t\t'Amount': res['Amount']\n\t\t\t}\n\t\t)\n\t\treturn {'Success': True}\n\n\n@view_defaults(renderer='index.html')\nclass BillsPaidViews(object):\n\t\"\"\"View routes\"\"\"\n\tdef __init__(self, request):\n\t\t\tself.request = request\n\n\t@view_config(route_name='home')\n\tdef home_view(self):\n\t\t\"\"\"Routes requests for /home to the home route\"\"\"\n\t\treturn {'project': 'Bills-Paid'}\n\n\t@view_config(route_name='accounts')\n\tdef accounts_view(self):\n\t\t\"\"\"Routes requests for /accounts to the accounts route\"\"\"\n\t\treturn {'project': 'Bills-Paid'}\n\n\t@view_config(route_name='bills')\n\tdef bills_view(self):\n\t\t\"\"\"Routes requests for /bills to the bills route\"\"\"\n\t\treturn {'project': 'Bills-Paid'}\n\n\t@view_config(route_name='dashboard')\n\tdef dashboard_view(self):\n\t\t\"\"\"Routes requests for /dashboard to the dashboard route\"\"\"\n\t\treturn {'project': 'Bills-Paid'}\n","sub_path":"bills_paid/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"259094063","text":"from Utils import *\nfrom classifier.CNNClassifier import CNNClassifier\nfrom classifier.Classifier import Classifier\nfrom classifier.ConstantClassifier import ConstantClassifier\nfrom classifier.LinearClassifier import LinearClassifier\nfrom classifier.RFClassifier import RFClassifier\nfrom classifier.SNNClassifier import SNNClassifier\nfrom Settings import *\nfrom sklearn.model_selection import train_test_split\nfrom main import run_for_classifier\nfrom auditok import ADSFactory, AudioEnergyValidator, StreamTokenizer, player_for, dataset\n\nSAVE=True\nLOAD=True\n#classifier = CNNClassifier(verbose=1)\n#classifier = SNNClassifier(num_units=64, verbose=1)\nclassifier = RFClassifier(n_estimators=100)\npathGender = MODELS_DIR + classifier.get_classifier_name() + DUMP_EXT\nprint(pathGender)\nif os.path.isfile(pathGender):\n print(\"Begin to Load model\")\n classifier.load(pathGender)\n #model.save_weights(\"gender_cnn_model_weight.h5\")\n #json_string = model.to_json()\n #open('gender_cnn_model_json.json','w').write(json_string)\n #model.save(\"gender_cnn_model.h5\")\nelse:\n print(\"Model file not exist.{}\".format(path))\n\ndef f(path):\n #if path.endswith(AUDIO_EXT) == False and path.endswith(AUDIO_EXT_FLAC) == False and path.endswith(AUDIO_EXT_MP3) == False and path.endswith(AUDIO_EXT_PCM) == False:\n if path.endswith(AUDIO_EXT_PCM) == False:\n return False\n return True\n\ndef list_files(dir_name: str, ext=AUDIO_EXT) -> np.ndarray:\n \"\"\"\n List the files in a directory recursively for a given extension.\n :param dir_name: The directory to search\n :param ext: The extension of the files to search for\n :return: The array of filenames\n \"\"\"\n return np.asarray(list(map(lambda path: path.replace(\"\\\\\", PATH_SEPARATOR),\n filter(lambda path: f(path),\n [os.path.join(dp, f) for dp, dn, fn in os.walk(dir_name) for f in fn]))))\n\ndef convertPCM(destPath, srcPath):\n #print(srcPath)\n outPath = destPath\n fout = open(outPath,'wb') #用二进制的写入模式\n #fout.write(struct.pack('4s','\\x66\\x6D\\x74\\x20'))\n #写入一个长度为4的串,这个串的二进制内容为 66 6D 74 20\n #Riff_flag,afd,fad,afdd, = struct.unpack('4c',fin.read(4))\n #读入四个字节,每一个都解析成一个字母\n #open(sys.argv[4],'wb').write(struct.pack('4s','fmt '))\n #将字符串解析成二进制后再写入\n #open(sys.argv[4],'wb').write('\\x3C\\x9C\\x00\\x00\\x57')\n #直接写入二进制内容:3C 9C 00 00 57\n #fout.write(struct.pack('i',6000)) #写入6000的二进制形式\n #check whether inFile has head-Info\n inPath= srcPath\n fin = open(inPath,'rb')\n Riff_flag, = struct.unpack('4s',fin.read(4))\n if Riff_flag == 'RIFF':\n #print(\"%s have head\" % inPath)\n fin.close()\n #sys.exit(0)\n else:\n #print(\"%s no head\" % inPath)\n fin.close()\n #采样率\n sampleRate = int(16000)\n #bit位\n bits = int(16)\n fin = open(inPath,'rb')\n startPos = fin.tell()\n fin.seek(0,os.SEEK_END)\n endPos = fin.tell()\n sampleNum = int(endPos - startPos)\n #print(sampleNum)\n #headInfo = geneHeadInfo(sampleRate,bits,sampleNum)\n #fout.write(headInfo)\n fout.write('\\x52\\x49\\x46\\x46'.encode())\n fout.write(struct.pack('i',sampleNum + 36))\n #fout.write(fileLength)\n fout.write('\\x57\\x41\\x56\\x45\\x66\\x6D\\x74\\x20\\x10\\x00\\x00\\x00\\x01\\x00\\x01\\x00'.encode())\n fout.write(struct.pack('i',sampleRate))\n fout.write(struct.pack('i',int(sampleRate * bits / 8)))\n fout.write('\\x02\\x00'.encode())\n fout.write(struct.pack('H',bits))\n fout.write('\\x64\\x61\\x74\\x61'.encode())\n fout.write(struct.pack('i',sampleNum))\n fin.seek(os.SEEK_SET)\n fout.write(fin.read())\n fin.close()\n fout.close()\n # We set the `record` argument to True so that we can rewind the source\n asource = ADSFactory.ads(filename=destPath, record=True)\n validator = AudioEnergyValidator(sample_width=asource.get_sample_width(), energy_threshold=50)\n # Default analysis window is 10 ms (float(asource.get_block_size()) / asource.get_sampling_rate())\n # min_length=20 : minimum length of a valid audio activity is 20 * 10 == 200 ms\n # max_length=400 : maximum length of a valid audio activity is 400 * 10 == 4000 ms == 4 seconds\n # max_continuous_silence=30 : maximum length of a tolerated silence within a valid audio activity is 30 * 30 == 300 ms\n tokenizer = StreamTokenizer(validator=validator, min_length=50, max_length=400, max_continuous_silence=1)\n asource.open()\n tokens = tokenizer.tokenize(asource)\n # Play detected regions back\n #player = player_for(asource)\n #print(\"\\n ** playing detected regions...\\n\")\n data = b''\n\n for i,t in enumerate(tokens):\n #print(\"Token [{0}] starts at {1} and ends at {2}\".format(i+1, t[1], t[2]))\n data = data + b''.join(t[0])\n #player.play(data)\n\n sampleNum = len(data)\n if sampleNum>1000:\n #采样率\n sampleRate = int(16000)\n #bit位\n bits = int(16)\n fout = open(srcPath+\".wav\",'wb')\n fout.write('\\x52\\x49\\x46\\x46'.encode())\n fout.write(struct.pack('i',sampleNum + 36))\n #fout.write(fileLength)\n fout.write('\\x57\\x41\\x56\\x45\\x66\\x6D\\x74\\x20\\x10\\x00\\x00\\x00\\x01\\x00\\x01\\x00'.encode())\n fout.write(struct.pack('i',sampleRate))\n fout.write(struct.pack('i',int(sampleRate * bits / 8)))\n fout.write('\\x02\\x00'.encode())\n fout.write(struct.pack('H',bits))\n fout.write('\\x64\\x61\\x74\\x61'.encode())\n fout.write(struct.pack('i',sampleNum))\n fout.write(data)\n fout.close()\n #assert len(tokens) == 8\n asource.close()\n\ndef read_audio(path, target_fs=None):\n #print(path)\n if path.upper().endswith(\".PCM\"):\n convertPCM(TEMP_WAV, path)\n path = path+\".wav\"\n (audio, fs) = soundfile.read(path)\n if audio.ndim > 1:\n audio = np.mean(audio, axis=1)\n if target_fs is not None and fs != target_fs:\n audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return audio, fs\n\ndef audio_to_features(filename: str, n_features: int = FEATURES_NUMBER) -> np.ndarray:\n \"\"\"\n Extract MFCC features from audio file using librosa.\n :param filename: The name of the file\n :param n_features: The number of features to extract\n :return: An ndarray of features\n \"\"\"\n #data, samplerate = librosa.load(filename, sr=None)\n fs = cfg.sample_rate\n print(filename)\n data, samplerate = read_audio(filename, target_fs=fs)\n mfcc_features = np.asarray(librosa.feature.mfcc(data, samplerate, n_mfcc=n_features))\n global min_shape\n if mfcc_features.shape[1] < min_shape: # Keep track of min_shape for 2D input\n min_shape = mfcc_features.shape[1]\n return mfcc_features.transpose()\n\ndef pre_file_to_features_with_labels(filename: str) -> Any:\n \"\"\"\n Extract features and label from an audio file.\n :param filename: The filename\n :return: A tuple (features, label)\n \"\"\"\n features = audio_to_features(filename)\n #print(features)\n return features, 0\n\ndef pre_files_to_features(filenames: Iterable[str]) -> np.ndarray:\n \"\"\"\n Extract features and labels from a list of files.\n :param filenames: The filenames to use\n :return: An array of (features, label) tuples\n \"\"\"\n features_with_label = np.asarray([pre_file_to_features_with_labels(file) for file in filenames])\n flattened_features = flatten(extract_features(features_with_label))\n #print(flattened_features.min(axis=0))\n #print(flattened_features.max(axis=0))\n flattened_features_min_f = flattened_features.min(axis=0)\n flattened_features_max_f = flattened_features.max(axis=0)\n if not os.path.isfile(MIN_FEATURES_FILE) or not os.path.isfile(MAX_FEATURES_FILE):\n min_f = flattened_features.min(axis=0)\n #save_nparray(min_f, MIN_FEATURES_FILE)\n max_f = flattened_features.max(axis=0)\n #save_nparray(max_f, MAX_FEATURES_FILE)\n else:\n min_f = load_nparray(MIN_FEATURES_FILE)\n max_f = load_nparray(MAX_FEATURES_FILE)\n #print(min_f)\n #print(max_f)\n min_f = np.asarray([min(min_f[i] , flattened_features_min_f[i]) for i in range(len(min_f))])\n max_f = np.asarray([max(max_f[i] , flattened_features_max_f[i]) for i in range(len(max_f))])\n save_nparray(min_f, MIN_FEATURES_FILE)\n save_nparray(max_f, MAX_FEATURES_FILE)\n #print(min_f)\n #print(max_f)\n #print(\"end\")\n # Normalize the features\n features_with_label = np.asarray(list(\n map(lambda feat_label_tuple: (\n np.asarray(list(map(lambda sample: (sample - min_f) / (max_f - min_f), feat_label_tuple[0]))),\n feat_label_tuple[1]),\n features_with_label)))\n #print(features_with_label)\n #print(\"feature\")\n return features_with_label\n\ndef pre_PredictGender(path):\n #print(path)\n print(\"Begin to load audio & extract features\")\n pre_features_with_label = pre_files_to_features(list_files(path))\n #pre_set = to_2d(pre_features_with_label)\n pre_set = to_1d(pre_features_with_label)\n pre_features = extract_features(pre_set)\n pre_label = extract_labels(pre_set)\n print(\"Begin to predict\")\n #results_pre = modelGender.predict(pre_features, batch_size=64, verbose=1)\n results_pre = classifier.predict(pre_features)\n #print(results_pre)\n res = clamp(results_pre)\n print(res)\n print(\"male:{}\".format(np.sum(res)))\n print(\"female:{}\".format(len(res)-np.sum(res)))\n return return_majority(res)\n #transformed_test_set = to_1d(pre_features_with_label)\n #samples_features = extract_features(transformed_test_set)\n #samples_predictions = classifier.predict(samples_features)\n #print(samples_predictions)\n #print(samples_predictions[0])\n\ndef predictGender(path):\n try:\n value = pre_PredictGender(path)\n result = \"Unknown\"\n if value == 1:\n result = \"Male\"\n elif value == 0:\n result = \"Female\"\n print(result)\n return result\n except Exception as e:\n print(\"Error occur, Unknown\")\n return \"Unknown\"\n\ndef main():\n return predictGender(\"predict_dir/Random_A5\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Predict.py","file_name":"Predict.py","file_ext":"py","file_size_in_byte":10580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"372147037","text":"import tqdm\nimport numpy as np\nfrom skimage import morphology, segmentation\nfrom skimage.measure import regionprops\nimport shapely.geometry\nimport formatting\nimport nbburntbuildings\nfrom rasterio import features\n\n\ndef get_aoi(image, bbox):\n aoi = image.aoi(bbox=bbox)\n return aoi\n\n\ndef get_rgb_aoi(image, bbox):\n \"\"\"\n Get \"true color\" image of our Area of Interest\n :param image: CatalogImage object\n :param bbox: length 4 tuple of (xmin, ymin, xmax, ymax)\n :return: rgb aoi of AOI from image\n \"\"\"\n aoi = get_aoi(image, bbox)\n rgb_aoi = aoi.rgb()\n return rgb_aoi\n\n\ndef get_true_color_image_tiles(aoi):\n \"\"\"\n Create 256x256 tiles of AOI chip\n :return: list of pixel tile locations\n \"\"\"\n tiles = []\n for x in range(0, aoi.shape[1], 256):\n for y in range(0, aoi.shape[2], 256):\n tiles.append(aoi[:, x:x+256, y:y+256])\n return tiles\n\n\ndef classify_pixels(tiles, model):\n \"\"\"\n Loop through all the tiles and label each pixel in that tile as boolean burn/not-burn\n :param tiles: list of pixel tile locations\n :return:\n \"\"\"\n # compute pixel-wise prediction for each tile\n classified_pixels = []\n for tile in tqdm.tqdm_notebook(tiles):\n # create test dataset\n x_test = nbburntbuildings.pixels_as_features(tile)\n\n # clean the test dataset\n loc_nans = np.isnan(x_test)\n loc_infs = np.isinf(x_test)\n x_test[loc_nans] = 0\n x_test[loc_infs] = 0\n\n # make pixel-wise prediction\n classified_pixels.append(model.predict(x_test).reshape(tile.shape[1], tile.shape[2]))\n return classified_pixels\n\n\ndef rebuild_image(classified_pixels, rgb_image):\n \"\"\"\n Puts tiles' masks together to rebuild binary image\n \"\"\"\n full_image = nbburntbuildings.create_mask(classified_pixels, len(range(0, rgb_image.shape[1], 256)),\n len(range(0, rgb_image.shape[2], 256)), rgb_image.shape)\n return full_image\n\n\ndef clean_mask(mask):\n \"\"\"\n Remove small holes and objects\n \"\"\"\n new_mask_clean = morphology.remove_small_holes(mask == 1.0, 500)\n clean = morphology.remove_small_objects(new_mask_clean, 500)\n return clean\n\n\ndef segment_image(rgb_image):\n \"\"\"\n Break image into similar segments\n \"\"\"\n return segmentation.slic(rgb_image, n_segments=2000, compactness=10, sigma=1)\n\n\ndef classify_burnt_segments(img_segmented, classified_pixels):\n \"\"\"\n Determine which image segments contain at least 50% burnt pixels and convert results to binary\n :param img_segmented: output of segment_image()\n :param classified_pixels: boolean burn/not-burn for each pixel in the full image\n :return: boolean burn/not-burn for each image segment, and non-binary burn segments\n \"\"\"\n # classify burnt segments\n burnt_regions = [r.label for r in regionprops(img_segmented, intensity_image=classified_pixels) if\n r.mean_intensity > 0.5]\n burnt_segments = np.isin(img_segmented, burnt_regions)\n # binary mask\n binary_segments = burnt_segments.astype(np.uint8)\n return [binary_segments, burnt_segments]\n\n\ndef extract_burnt_shapes(rgb_aoi, classified_pixels, aoi):\n \"\"\"\n segment_image() + classify_burnt_segments() + convert output to a list of shapely polygons\n \"\"\"\n segmented = segment_image(rgb_aoi)\n binary_mask, burnt_segments = classify_burnt_segments(segmented, classified_pixels)\n # simplify geometries before storing as shapes\n shapes = [(shapely.geometry.shape(g).simplify(aoi.affine.a), v) for g, v in features.shapes(binary_mask,\n mask=burnt_segments,\n transform=aoi.affine)]\n return shapes\n\n\ndef shapely_to_geojson(shapes):\n \"\"\"\n convert list of shapely polygons to geojson\n \"\"\"\n if type(shapes) == list:\n results = ({\n 'type': 'Feature',\n 'properties': {'raster_val': v, 'color': 'red'},\n 'geometry': s.__geo_interface__}\n for i, (s, v)\n in enumerate(shapes))\n else:\n results = ({\n 'type': 'Feature',\n 'properties': {'raster_val': v, 'color': 'red'},\n 'geometry': s}\n for i, (s, v)\n in enumerate(shapes))\n list_results = list(results)\n geojson = formatting.feature_to_geojson(list_results)\n return geojson\n","sub_path":"nbburndetectionzillow/burndetect.py","file_name":"burndetect.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"384982150","text":"# !python 3\nimport datetime\nimport email.message\nimport smtplib\nimport sys\n\nimport bs4\nimport requests\n\nfrom password import password\n\nurl = 'https://www.musiciansfriend.com/stupid'\nres = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\nres.raise_for_status()\nsoup = bs4.BeautifulSoup(res.text, 'html.parser')\n\nfeature = soup.select('.feature-title')[0].select('h2')[0].text\nfeature = feature.replace('\\n', ' ')\nfeature = feature.replace(u'\\xa0', '').encode('utf-8')\n\noldPrice = soup.select('.regular-price')[0].text\noldPrice = oldPrice.replace('\\n', '')\noldPrice = oldPrice.replace(u'\\xa0', ' ').encode('utf-8')\n\nnewPrice = soup.select('.feature-price')[0].text\nnewPrice.replace('\\n', '')\nnewPrice = newPrice.replace(u'\\xa0', ' ').encode('utf-8')\n\nemailContent = \"\"\"\n \n \n \n \n \n \n Item: \"\"\" + str(feature.decode()) + \"\"\"
\n Was: \"\"\" + str(oldPrice.decode()) + \"\"\"
\n Now: \"\"\" + str(newPrice.decode()) + \"\"\"
\n \n

\n Link\n \n \n\"\"\"\n\nsubject = \"Stupid Deal of the Day \" + datetime.datetime.now().strftime(\"%m-%d-%y %H:%M%p\")\nmsg = email.message.Message()\nmsg['SUBJECT'] = subject\nmsg['From'] = 'shicks255@yahoo.com'\nmsg['To'] = 'shicks255@yahoo.com'\nmsg.add_header('Content-Type', 'text/html')\nmsg.set_payload(emailContent)\n\n# start the sending of emails\ntry:\n smtpObj = smtplib.SMTP('smtp.mail.yahoo.com', 587)\nexcept smtplib.SMTPException as e:\n smtpObj.quit()\n sys.exit()\n\nsmtpObj.ehlo()\nsmtpObj.starttls()\n\ntry:\n smtpObj.login('shicks255@yahoo.com', password)\n smtpObj.send_message(msg)\nexcept smtplib.SMTPAuthenticationError as e:\n smtpObj.quit()\n\nsys.exit()","sub_path":"StupidDealOfTheDay/sdotd.py","file_name":"sdotd.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343085752","text":"#!/usr/bin/env python\n# coding: utf8\n\n\"\"\" Unit testing for Separator class. \"\"\"\n\n__email__ = 'research@deezer.com'\n__author__ = 'Deezer Research'\n__license__ = 'MIT License'\n\nimport filecmp\nimport itertools\nfrom os import makedirs\nfrom os.path import splitext, basename, exists, join\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom spleeter.audio.adapter import get_default_audio_adapter\nfrom spleeter.commands import create_argument_parser\n\nfrom spleeter.commands import evaluate\n\nfrom spleeter.utils.configuration import load_configuration\n\nres_4stems = { \"vocals\": {\n \"SDR\": -0.007,\n \"SAR\": -19.231,\n \"SIR\": -4.528,\n \"ISR\": 0.000\n },\n \"drums\": {\n \"SDR\": -0.071,\n \"SAR\": -14.496,\n \"SIR\": -4.987,\n \"ISR\": 0.001\n },\n \"bass\":{\n \"SDR\": -0.001,\n \"SAR\": -12.426,\n \"SIR\": -7.198,\n \"ISR\": -0.001\n },\n \"other\":{\n \"SDR\": -1.453,\n \"SAR\": -14.899,\n \"SIR\": -4.678,\n \"ISR\": -0.015\n }\n }\n\n\ndef generate_fake_eval_dataset(path):\n aa = get_default_audio_adapter()\n n_songs = 2\n fs = 44100\n duration = 3\n n_channels = 2\n rng = np.random.RandomState(seed=0)\n for song in range(n_songs):\n song_path = join(path, \"test\", f\"song{song}\")\n makedirs(song_path, exist_ok=True)\n for instr in [\"mixture\", \"vocals\", \"bass\", \"drums\", \"other\"]:\n filename = join(song_path, f\"{instr}.wav\")\n data = rng.rand(duration*fs, n_channels)-0.5\n aa.save(filename, data, fs)\n\n\ndef test_evaluate(path=\"FAKE_MUSDB_DIR\"):\n generate_fake_eval_dataset(path)\n p = create_argument_parser()\n arguments = p.parse_args([\"evaluate\", \"-p\", \"spleeter:4stems\", \"--mus_dir\", path])\n params = load_configuration(arguments.configuration)\n metrics = evaluate.entrypoint(arguments, params)\n for instrument, metric in metrics.items():\n for metric, value in metric.items():\n assert np.allclose(np.median(value), res_4stems[instrument][metric], atol=1e-3)","sub_path":"tests/test_eval.py","file_name":"test_eval.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"220126008","text":"n = int(input())\npoint = []\nans = -1\nfor i in range(n):\n point.append(list(map(int, input().split())))\nx_sub_y = []\nx_plus_y = []\nfor i in range(n):\n x_sub_y.append(point[i][0] - point[i][1])\n x_plus_y.append(point[i][0] + point[i][1])\nx_sub_y.sort()\nx_plus_y.sort()\nans = max(ans, x_sub_y[-1] - x_sub_y[0], x_plus_y[-1] - x_plus_y[0])\nprint(ans)","sub_path":"Python_codes/p02556/s916071972.py","file_name":"s916071972.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612233275","text":"import torch\nimport torch.nn as nn\n\nclass Actor(nn.Module):\n def __init__(self, rule):\n super(Actor, self).__init__()\n state_dim = rule.state_dim\n action_dim = rule.action_dim\n self.FCNet = nn.Sequential(nn.Linear(state_dim, rule.fc1_dim),\n nn.LayerNorm(rule.fc1_dim),\n nn.ReLU(True),\n nn.Linear(rule.fc1_dim, rule.fc2_dim),\n nn.LayerNorm(rule.fc2_dim),\n nn.ReLU(True),\n nn.Linear(rule.fc2_dim, action_dim),\n nn.Tanh(),\n )\n self.cuda()\n \n def forward(self, state):\n return self.FCNet(state)\n\n\nclass Critic(nn.Module):\n def __init__(self, rule):\n super(Critic, self).__init__()\n state_dim = rule.state_dim\n action_dim = rule.action_dim\n self.FCNet = nn.Sequential(nn.Linear(state_dim + action_dim, rule.fc1_dim),\n nn.LayerNorm(rule.fc1_dim),\n nn.ReLU(True),\n nn.Linear(rule.fc1_dim, rule.fc2_dim),\n nn.LayerNorm(rule.fc2_dim),\n nn.ReLU(True),\n nn.Linear(rule.fc2_dim, 1),\n )\n self.cuda()\n \n def forward(self, state, action):\n inputs = torch.cat((state,action), dim = -1)\n return self.FCNet(inputs)\n\ndef init_weights(params):\n if type(params) == nn.Linear:\n params.weight.data.uniform_(-3e-3, 3e-3)\n params.bias.data.uniform_(-3e-4, 3e-4)","sub_path":"Python/DDPG/ACNet.py","file_name":"ACNet.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367067676","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom rest_framework import viewsets, permissions\n\n\nfrom .forms import (\nEventForm, PublicationsForm, InnovationHubForm,\nNewsForm, StaffsForm, CollaboratingPartnersForm,\nFundingPartnersForm, ProjectsForm\n)\n\n\nfrom .models import (\n Events, Staffs,\n Publications, News,\n Projects, InnovationHub,\n FunndingPartners,\n CollaboratingInstitutions\n)\nfrom .serializers import(\n EventsSerializer,\n EventsLimitSerializer,\n StaffsSerializer,\n StaffsLimitSerializer,\n PublicationsSerializer,\n NewsSerializer,\n NewsLimitSerializer,\n ProjectsSerializer,\n ProjectsLimitSerializer,\n ProjectsEHSerializer,\n ProjectsICSerializer,\n ProjectsHSSerializer,\n ProjectsTCSerializer,\n InnovationHubSerializer,\n FundingPartnersSerializer,\n CollaboratingInstitutionsSerializer,\n)\n# Create your views here.\n\nclass EventsView(viewsets.ModelViewSet):\n queryset = Events.objects.all().order_by('-id')\n serializer_class = EventsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass EventsViewLimit(viewsets.ModelViewSet):\n queryset = Events.objects.all().order_by('-id')[:1]\n serializer_class = EventsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass StaffsView(viewsets.ModelViewSet):\n queryset = Staffs.objects.all().order_by('-id')\n serializer_class = StaffsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass StaffsViewLimit(viewsets.ModelViewSet):\n queryset = Staffs.objects.all().order_by('-id')[:4]\n serializer_class = StaffsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass PublicationsView(viewsets.ModelViewSet):\n queryset = Publications.objects.all().order_by('-id')\n serializer_class = PublicationsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\n\nclass NewsView(viewsets.ModelViewSet):\n queryset = News.objects.all().order_by('-id')\n serializer_class = NewsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\n\nclass NewsViewLimit(viewsets.ModelViewSet):\n queryset = News.objects.all().order_by('-id')[:1]\n serializer_class = NewsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass ProjectsView(viewsets.ModelViewSet):\n queryset = Projects.objects.all().order_by('-id')\n serializer_class = ProjectsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\n\n# start views for each project\n\n\nclass ProjectsEHView(viewsets.ModelViewSet):\n queryset = Projects.objects.filter(department = 'EH').order_by('-id')\n serializer_class = ProjectsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass ProjectsICView(viewsets.ModelViewSet):\n queryset = Projects.objects.filter(department = 'IC').order_by('-id')\n serializer_class = ProjectsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass ProjectsHSView(viewsets.ModelViewSet):\n queryset = Projects.objects.filter(department = 'HS').order_by('-id')\n serializer_class = ProjectsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass ProjectsTCView(viewsets.ModelViewSet):\n queryset = Projects.objects.filter(department = 'TC').order_by('-id')\n serializer_class = ProjectsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\n# end views for each project\n\nclass ProjectsViewLimit(viewsets.ModelViewSet):\n queryset = Projects.objects.all().order_by('-id')[:3]\n serializer_class = ProjectsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass InnovationHubView(viewsets.ModelViewSet):\n queryset = InnovationHub.objects.all().order_by('-id')\n serializer_class = InnovationHubSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass FundingPartnerView(viewsets.ModelViewSet):\n queryset = FunndingPartners.objects.all().order_by('-id')\n serializer_class = FundingPartnersSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\nclass CollaboratingInstitutionsView(viewsets.ModelViewSet):\n queryset = CollaboratingInstitutions.objects.all().order_by('-id')\n serializer_class = CollaboratingInstitutionsSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\n#\n# @login_required\n# class home_view(request):\n# template_view = 'pages/dashboard.html'\n# return render(request, template_view, {})@login_required\n\n@login_required\ndef home_view(request):\n template_name = 'pages/dashboard.html'\n return render(request, template_name, {})\n\n\n\n@login_required\ndef staff_list_view(request):\n get_staff = Staffs.objects.all().order_by('-id')\n template_name = 'pages/staffList.html'\n context = {\n 'staffs':get_staff,\n }\n return render(request, template_name, context)\n\n\n\n@login_required\ndef project_list_view(request):\n get_project = Projects.objects.all().order_by('-id')\n template_name = 'pages/projectList.html'\n context = {\n 'projects':get_project,\n }\n return render(request, template_name, context)\n\n\n@login_required\ndef news_list_view(request):\n get_news = News.objects.all().order_by('-id')\n template_name = 'pages/newsList.html'\n context = {\n 'allnews':get_news,\n }\n return render(request, template_name, context)\n\n\n@login_required\ndef event_list_view(request):\n get_events = Events.objects.all().order_by('-id')\n template_name = 'pages/eventList.html'\n context = {\n 'allevents':get_events,\n }\n return render(request, template_name, context)\n\n\n@login_required\ndef publication_list_view(request):\n get_publications = Publications.objects.all().order_by('-id')\n template_name = 'pages/publicationsList.html'\n context = {\n 'allpublication':get_publications,\n }\n return render(request, template_name, context)\n\n\n@login_required\ndef add_partner_view(request):\n if request.method == 'POST':\n form = FundingPartnersForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, f'New funding partner added successfully')\n return redirect('add_partner_view')\n else:\n form = FundingPartnersForm()\n context = {\n 'form':form\n }\n template_name = 'pages/addPartners.html'\n return render(request, template_name, context)\n\n\n\n@login_required\ndef add_collaborating_view(request):\n if request.method == 'POST':\n form = CollaboratingPartnersForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, f'New collaborating institutions added successfully')\n return redirect('add_partner_view')\n else:\n form = CollaboratingPartnersForm()\n context = {\n 'form':form\n }\n template_name = 'pages/addPartners.html'\n return render(request, template_name, context)\n\n\n\n@login_required\ndef add_staff(request):\n if request.method == 'POST':\n form = StaffsForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, f'New staff added successfully')\n return redirect('add_staff')\n else:\n form = StaffsForm()\n context = {\n 'form':form\n }\n template_name = 'pages/addStaff.html'\n return render(request, template_name, context)\n\n\n@login_required\ndef add_projects(request):\n if request.method == 'POST':\n form = ProjectsForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, f'New project added successfully')\n return redirect('add_projects')\n else:\n form = ProjectsForm()\n context = {\n 'form':form\n }\n template_name = 'pages/addproject.html'\n return render(request, template_name, context)\n\n\n@login_required\ndef add_publications(request):\n if request.method == 'POST':\n form = PublicationsForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, f'New publications added successfully')\n return redirect('add_publications')\n else:\n form = PublicationsForm()\n context = {\n 'form':form\n }\n template_name = 'pages/addPublications.html'\n return render(request, template_name, context)\n\n\n@login_required\ndef add_news(request):\n if request.method == 'POST':\n form = NewsForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, f'New news added successfully')\n return redirect('add_news')\n else:\n form = NewsForm()\n context = {\n 'form':form\n }\n template_name = 'pages/addNews.html'\n return render(request, template_name, context)\n\n\n@login_required\ndef add_event(request):\n if request.method == 'POST':\n form = EventForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, f'New event added successfully')\n return redirect('add_news')\n else:\n form = EventForm()\n context = {\n 'form':form\n }\n template_name = 'pages/addEvent.html'\n return render(request, template_name, context)\n\n\n@login_required\ndef delete_staff(request, id):\n get_staff = Staffs.objects.get(pk = id)\n get_staff.delete()\n messages.success(request, f'staff deleted successfully')\n return redirect('staff_list_view')\n\n\n@login_required\ndef delete_project(request, id):\n get_project = Projects.objects.get(pk = id)\n get_project.delete()\n messages.success(request, f'project deleted successfully')\n return redirect('project_list_view')\n\n\n@login_required\ndef delete_news(request, id):\n get_news = News.objects.get(pk = id)\n get_news.delete()\n messages.success(request, f'news deleted successfully')\n return redirect('news_list_view')\n\n\n@login_required\ndef delete_event(request, id):\n get_event = Events.objects.get(pk = id)\n get_event.delete()\n messages.success(request, f'event deleted successfully')\n return redirect('event_list_view')\n\n\n@login_required\ndef update_staff(request, id):\n # get_staff = Staffs.objects.get(pk = id)\n instance = get_object_or_404(Staffs, pk = id)\n form = StaffsForm(request.POST or None, request.FILES or None, instance = instance)\n if form.is_valid():\n instance = form.save(commit = False)\n instance.save()\n messages.success(request, f'staff updated successfully')\n return redirect('staff_list_view')\n context = {\n 'form':form\n }\n template_name = 'pages/addStaff.html'\n return render(request, template_name, context)\n\n\n@login_required\ndef update_project(request, id):\n # get_staff = Staffs.objects.get(pk = id)\n instance = get_object_or_404(Projects, pk = id)\n form = ProjectsForm(request.POST or None, request.FILES or None, instance = instance)\n if form.is_valid():\n instance = form.save(commit = False)\n instance.save()\n messages.success(request, f'project updated successfully')\n return redirect('project_list_view')\n context = {\n 'form':form\n }\n template_name = 'pages/addproject.html'\n return render(request, template_name, context)\n\n\n@login_required\ndef update_news(request, id):\n # get_staff = Staffs.objects.get(pk = id)\n instance = get_object_or_404(News, pk = id)\n form = NewsForm(request.POST or None, request.FILES or None, instance = instance)\n if form.is_valid():\n instance = form.save(commit = False)\n instance.save()\n messages.success(request, f'news updated successfully')\n return redirect('news_list_view')\n context = {\n 'form':form\n }\n template_name = 'pages/addNews.html'\n return render(request, template_name, context)\n\n\n@login_required\ndef update_event(request, id):\n # get_staff = Staffs.objects.get(pk = id)\n instance = get_object_or_404(Events, pk = id)\n form = EventForm(request.POST or None, request.FILES or None, instance = instance)\n if form.is_valid():\n instance = form.save(commit = False)\n instance.save()\n messages.success(request, f'event updated successfully')\n return redirect('event_list_view')\n context = {\n 'form':form\n }\n template_name = 'pages/addEvent.html'\n return render(request, template_name, context)\n","sub_path":"ihi_wepapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47656134","text":"def main():\n # 사용자로부터 문자열을 입력받는다.\n s = input(\"문자열을 입력하세요: \").strip()\n\n if isPalindrome(s):\n print(s, \"은/는 회문입니다.\")\n else:\n print(s, \"은/는 회문이 아닙니다.\")\n\n# 문자열이 회문인지 검사한다.\ndef isPalindrome(s):\n # 문자열의 첫 문자 인덱스\n low = 0\n\n # 문자열의 마지막 문자 인덱스\n high = len(s) - 1\n\n while low < high:\n if s[low] != s[high]:\n return False # 입력한 문자열은 회문이 아니다.\n\n low += 1\n high -= 1\n\n return True # 입력한 문자열은 회문이다.\n\nmain() # main 함수를 호출한다.\n","sub_path":"lec08/CheckPalindrome.py","file_name":"CheckPalindrome.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381831277","text":"\n# To convert ICAMS-formatted catalog to \n# catalog format acceptable by helmsman,\n# Prep.For.Running.helmsman.R needs to be run\n# before running this script.\n\n#################################################################################################\n###### load prerequisites\n#################################################################################################\nimport os,sys,subprocess\n\n\n\n#### Read old working directory\noldWorkingDir = os.getcwd()\n\n\n# Set working directory to the folder which contains results of\n# computational approaches on SBS1-SBS5-correlated data sets\n# before running this script.\n#\n# PATH = \"\"\n#\n# os.setcwd(PATH)\ntopLevelFolder4Data = \"./0.Input_datasets\"\ntopLevelFolder4Run = \"./2a.Full_output_K_unspecified\"\n\n\n\n#### Naming the seeds\nseedNumbers = (1, 691, 1999, 3511, 8009,\n 9902, 10163, 10509, 14476, 20897,\n 27847, 34637, 49081, 75679, 103333,\n 145879, 200437, 310111, 528401, 1076753)\n\n\n#### Naming the datasets for cycling\nslopes = (0.1,0.5,1,2,10)\nRsqs = (0.1,0.2,0.3,0.6)\ndatasetNames = tuple()\nfor slope in slopes:\n for Rsq in Rsqs:\n datasetNames = datasetNames + (\"S.\"+str(slope)+\".Rsq.\"+str(Rsq),)\n\nCPUNumber = 5\n\n\nfor seedNumber in seedNumbers:\n for datasetName in datasetNames:\n inputPath = \"/\".join([topLevelFolder4Run,\"helmsman.NMF.results\",datasetName,\"seed.\"+str(seedNumber)])\n inputCatalog = inputPath+\"/ground.truth.syn.catalog.tsv\"\n outputPath = inputPath\n ## The path of helmsman should be replaced by the locatation of helmsman.py in your machine.\n arguments = ['python3','/home/wuyang/practice/3_Signature_Challenge/helmsman/helmsman.py',\n '--cpus',str(CPUNumber),\n '--seed',str(seedNumber),'--mode','agg',\n '--input',inputCatalog,\n '--projectdir',outputPath,\n '--verbose',\n '--decomp','nmf','--rank','0']\n process = subprocess.Popen(\" \".join(arguments),shell = True)\n process.wait()\n\n## Restore old working directory\t\t\nos.chdir(oldWorkingDir)\n","sub_path":"data-raw/Wu_2022/1_scripts.for.SBS1SBS5/2_running_approaches_without_knowing_K/Run.helmsman.py","file_name":"Run.helmsman.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"44211729","text":"# -*- coding: utf-8 -*-\nfrom flask.ext.wtf import Form\nfrom wtforms import StringField, RadioField, SubmitField, TextAreaField, \\\n SelectMultipleField, widgets, PasswordField\nfrom wtforms.validators import DataRequired, Email, Length, Optional\n\n\nclass IntroQuestionnaireForm(Form):\n news_frequency = RadioField(\n 'How often do you access news websites like BBC, Guardian, CNN etc.?',\n choices=[\n ('multiple_daily', 'Multiple times a day'),\n ('daily', 'Approximately once a day'),\n ('weekly', 'Weekly'),\n ('never', 'Don\\'t read the news')],\n validators=[DataRequired()])\n\n twitter_type = SelectMultipleField(\n 'Which of these describes the best way you use Twitter for?',\n choices=[\n ('news', 'News'),\n ('entertainment', 'Entertainment'),\n ('friends', 'Friends')],\n validators=[DataRequired()],\n option_widget=widgets.CheckboxInput(),\n widget=widgets.ListWidget(prefix_label=False))\n\n twitter_frequency = RadioField('How often do you access Twitter?',\n choices=[\n ('multiple_daily',\n 'Multiple times a day'),\n ('daily', 'Approximately once a day'),\n ('weekly', 'Weekly'),\n ('rarely', 'Rarely')],\n validators=[DataRequired()])\n\n twitter_trending = RadioField('Do you follow trending topics on Twitter?',\n choices=[\n ('yes', 'Yes'),\n ('no', 'No')],\n validators=[DataRequired()])\n\n age = RadioField('Please provide your AGE GROUP:',\n choices=[('18_25', '18-25'),\n ('26_35', '26-35'),\n ('36_45', '36-45'),\n ('46_55', '46-55'),\n ('55+', '55+')],\n validators=[Optional()])\n\n gender = RadioField('Please indicate your GENDER:',\n choices=[('male', 'Male'),\n ('female', 'Female'),\n ('other', 'Other')],\n validators=[Optional()])\n\n education = RadioField('What is your EDUCATION level:',\n choices=[('nodegree', 'No Degree'),\n ('undergrad', 'Undergraduate'),\n ('grad', 'Graduate'),\n ('research', 'Researcher')],\n validators=[Optional()])\n\n english_proficiency = RadioField(\n 'How would you describe your proficiency in English?',\n choices=[('native', 'Native speaker'),\n ('advanced', 'Advanced'),\n ('intermediate', 'Intermediate'),\n ('beginner', 'Beginner')],\n validators=[Optional()])\n\n email = StringField('Please provide your EMAIL:',\n validators=[DataRequired(), Email()])\n\n submit = SubmitField('Proceed')\n\n\nclass RelevancyForm(Form):\n relevancy = RadioField(\n 'How do you feel the ORDERING of the events are relevant to you?', #\n choices=[('very', 'Very relevant'),\n ('somewhat', 'Somewhat relevant'),\n ('none', 'Not relevant at all')],\n validators=[DataRequired()])\n submit = SubmitField('Submit')\n\n\nclass ExitQuestionnaireForm(Form):\n use_real_life = RadioField(\n 'Would you use system like this in real life?', #\n choices=[('cyes', 'Certainly yes'),\n ('yes', 'Yes'),\n ('maybe', 'Maybe'),\n ('no', 'No'),\n ('cno', 'Certainly no')],\n validators=[])\n liked = TextAreaField(\n 'What did you LIKE about the system (e.g. how relevant the ordering '\n 'of events was to you, ease of use etc) ?',\n validators=[Length(max=1000)])\n disliked = TextAreaField('What did you NOT LIKE about the system?',\n validators=[Length(max=1000)])\n other = TextAreaField('Any other comments to the system?',\n validators=[Length(max=1000)])\n submit = SubmitField('Submit')\n\n\nclass LoginAsForm(Form):\n user_id = StringField('User id', validators=[DataRequired()])\n submit = SubmitField('Submit')\n\n\nclass AdminLogin(Form):\n admin_password = PasswordField('Password', validators=[DataRequired()])\n submit = SubmitField('Submit')","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164819657","text":"#!/usr/bin/python\n#!/usr/bin/env python\nimport os, sys\nprint(\"Python Version: %s.%s.%s\" % sys.version_info[:3])\nos.chdir('/home/pi/homecontrol/') # Change working directory\nimport time\nsys.path.append('/home/pi/PythonUtilities')\nimport ThreadHelper\n\nexit_loop = False\n\n\n@ThreadHelper.threaded\ndef loop(n=0):\n global exit_loop\n i = n\n while not exit_loop:\n print(i)\n time.sleep(1)\n i += 1\n exit_loop = False\n\n\nloop()\n\nloop(10)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"525253979","text":"meals = {'Tea': 10, 'Pizza': 100, 'Samosa': 10, 'Coffee': 20, 'Ice-Cream': 30}\n\nfor item in meals:\n\tprint(item, end=' ')\n\n\ndef resaurent():\n\ttotal_cost = 0\n\twhile(True):\n\t\tmeal_name = input('\\nEnter meal name for buy them or q for Quit: ')\n\t\tif meal_name in meals:\n\t\t\tmeal_quantity = int(input('Enter meal quantity: '))\n\t\t\ttotal_cost = total_cost + (meals[meal_name] * meal_quantity)\n\t\t\t\n\t\telif meal_name == 'q':\n\t\t\tprint(f'Total Cost is {total_cost}!')\n\t\t\tbreak\n\n\t\telse:\n\t\t\tprint('Enter Correct meal name or Quit Key!')\n\n\nif __name__ == \"__main__\":\n\tresaurent()\n\t\t\t\n\t\n","sub_path":"Crash-Course/restaurent.py","file_name":"restaurent.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374350293","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.http import Http404\nfrom .models import Company, General, Code, Type, Contact, Notes\nfrom .serializers import CompanySerializer, GeneralSerializer\n# Create your views here.\n\n\nclass CompanyInfo(APIView):\n \"\"\"\n GET or POST company information\n \"\"\"\n\n def get(self, request):\n company = Company.objects.all()\n serializer = CompanySerializer(company, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n serializer = CompanySerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass CompanyDetail(APIView):\n \"\"\"\n GET PUT OR DELETE object based on id\n \"\"\"\n\n def get_object(self, id):\n try:\n return Company.objects.get(pk=id)\n except Company.DoesNotExist:\n raise Http404\n\n\n def get(self, request,id):\n company = self.get_object(id)\n serializer = CompanySerializer(company)\n return Response(serializer.data)\n\n\n def put(self, request, id):\n obj = self.get_object(id)\n serializer = CompanySerializer(obj, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,id):\n company=self.get_object(id)\n company.delete()\n return Response({\"success\":\"object deleted\"})\n\n \n \n\n\nclass GeneralInfo(APIView):\n \"\"\"\n GET or POST general information of company\n \"\"\"\n\n def get(self, request):\n general_info = General.objects.all()\n serializer = GeneralSerializer(general_info, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n serializer = GeneralSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"company/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234334240","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 6 08:56:39 2020\n\n@author: zhangxi\n\"\"\"\nimport pandas as pd\nimport pymysql\nimport numpy as np\nimport os\nimport datetime\nimport time\nimport functools\nimport sys\nimport re\nfrom gadgets import timer\n\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\n\n#月累积柱状图&总额线图\nthis_path='E:/OneDrive/国寿养老工作/财务部工作/财务分析/财务收入分析/'\norigin=pd.read_excel(this_path+'三栏账数据源.xlsx',sheet_name='数据源',dtype={'日期':str,'��务摘要':str,'凭证号':str})\n \ncost_account='业务宣传费'\n\ncost=origin[origin.二级科目==cost_account]\ncost=cost[cost['年度']>2016]\n\ncost['科目余额计算列']=cost['科目余额计算列']*-1/10000\nfor index,row in cost.iterrows():\n \n if re.search('冲销(\\d*)号凭证',cost.loc[index,'业务摘要']):\n acc_num=re.search('冲销(\\d*)号凭证',cost.loc[index,'业务摘要']).group(1)\n temp_index=\"%s-%s\" %(cost.loc[index,'年度'],acc_num)\n \n temp_dpt=cost[cost.凭证索引==temp_index]['可辨认的成本中心'].values\n cost.loc[index,'可辨认的成本中心']=temp_dpt\n \npt=cost.pivot_table('科目余额计算列',index=['年度',\"月度\"],columns='可辨认的成本中心',aggfunc=np.sum)\npt.fillna(0,inplace=True)\ndpt=['综合管理部','市场一部','市场二部','市场三部','职业年金部','业务运营部','财务会计部']\n\nindex_1=pt.index.levels[0]\nindex_2=[1,2,3,4,5,6,7,8,9,10,11,12]\ncol=pt.columns\nmulti_index=[]\nfor i in index_1:\n for j in index_2:\n multi_index.append((i,j))\nptt=pd.DataFrame(index=multi_index,columns=col)\nfor index,row in ptt.iterrows():\n ptt.loc[index,'年度']=(index[0])\n ptt.loc[index,'月度']=(index[1])\nptt.set_index(['年度','月度'],drop=True,inplace=True)\n\na=set(pt.index)\nb=set(ptt.index)\nc=b-a\ndff=pd.DataFrame(index=c,columns=col)\npt=pd.concat([pt,dff],axis=0,join='outer')\npt.reset_index(inplace=True)\npt.sort_values(by=['年度','月度'],ascending=True,inplace=True)\n\npt['月度']=pt['月度'].astype(int).astype(str)\npt['年度']=pt['年度'].astype(int).astype(str)\n\npt.set_index(['年度','月度'],drop=True,inplace=True)\n\npt=pt[dpt] #pivot_tab\npct=pt.copy() #pivot_cumsum_tab\n#制作累积柱状图的bottom\npct.iloc[:,0]=0\nfor i in range(len(pct)):\n for j in range(1,len(pct.columns)):\n pct.iloc[i,j]=pct.iloc[i,j-1]+pt.iloc[i,j-1]\n\n#预处理x轴的标签\nx_label=[]\nfor i in range(len(pt.index)):\n if pt.index[i][1]=='1':\n x_label.append(\"%s\\n%s\" %(pt.index[i][1],pt.index[i][0]))\n else:\n x_label.append(\"%s\" %(pt.index[i][1]))\n\nplt.rcParams['font.sans-serif']=['SimHei']#这两句作用为防止中文乱码\nplt.rcParams['axes.unicode_minus']=False\n\nfig,ax1=plt.subplots()\nax1.set_ylabel('当月金额')\n#/画图\n\nfor dpt in pt.columns.values:\n plt.bar(x=range(len(pt.index)),height=pt.loc[:,dpt],bottom=pct.loc[:,dpt],label=dpt)\n \nplt.legend(loc='upper left',ncol=3,bbox_to_anchor=(0,0.96))\n\n#按年求出累积数并画图\npt['本月求和']=pt.sum(axis=1)\npt['当年累积求和']=pt['本月求和'].groupby('年度').cumsum()\nax2=ax1.twinx()\nax2.set_ylabel('当年累积金额')\nax2=plt.plot(range(len(pt.index)),pt['当年累积求和'],'r-.',label='当年累积')\n\nfor s in range(len(pt.index)):\n if (s+1)%3==0:\n plt.text(s,pt.iloc[s,-1]*1.02,round(pt.iloc[s,-1],2))\n\nplt.legend(loc='upper left')\nplt.xticks(range(len(pt.index)),labels=x_label,fontsize=12,rotation=45)\n","sub_path":"fee_matplotlib_plot.py","file_name":"fee_matplotlib_plot.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269456223","text":"import simpy\nclass Car(object):\n def __init__(self, env):\n \tself.env = env\n \tself.action = env.process(self.run())\n #start the run process an instance is created.\n def run(self):\n \twhile True:\n print('Start parking and washing at %d' % self.env.now)\n wash_duration = 5\n \n yield self.env.process(self.wash(wash_duration))\n # We yield the process that returns to wait for it to finish.\n # The wash process has finished and we can start driving again.\n print('Start driving at %d' % self.env.now)\n trip_duration = 2\n yield self.env.timeout(trip_duration)\n\n def wash(self, duration):\n yield self.env.timeout(duration)\n\n\n\ndef main():\n env = simpy.Environment()\n car = Car(env) \n car.run()\n env.run(until=60)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"204113/Lab09/simulation code/wash_with_interrupt.py","file_name":"wash_with_interrupt.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"110088887","text":"# -*- coding: utf-8 -*-\n#\n# format du probleme de Cauchy :\n#\t\t\t\ty(0) = b\n#\t\t\t\ty'=f(y,t)\n#\t\t\t\t\t|\n#\t\t\t\t\tv\n#\t\t\t\ty0 <- np.array\n#\t\t\t\tf : np.array,t --> np.array\n#\n\nimport numpy as np\nimport matplotlib.pyplot as mp\n\ndef step_Euler(y,t,h,f):\n\t\"\"\"Retourne un pas de la résolution d'une équation differencielle avec la méthode d'Euler\n\ty : ordonnee du point précédent\n\tt : abscisse du point précédent\n\th : longueur d'un pas\n\tf : y'(x) = f(y,t)(x) ou y est la fonction recherchée\n\t\"\"\"\n\treturn y + h * f(y,t)\n\t\ndef step_milieu(y,t,h,f):\n\t\"\"\"Retourne un pas de la résolution d'une équation differencielle avec la méthode des milieux\n\ty : ordonnee du point précédent\n\tt : abscisse du point précédent\n\th : longueur d'un pas\n\tf : y'(x) = f(y,t)(x) ou y est la fonction recherchée\n\t\"\"\"\n\treturn y + h * f(y + h * f(y,t)/2. ,t+(h/2.))\n\ndef step_Heun(y,t,h,f):\n\t\"\"\"Retourne un pas de la résolution d'une équation differencielle avec la méthode de Heun\n\ty : ordonnee du point précédent\n\tt : abscisse du point précédent\n\th : longueur d'un pas\n\tf : y'(x) = f(y,t)(x) ou y est la fonction recherchée\n\t\"\"\"\n\tk = f(y,t)\n\treturn y + (h/2.*(k+f(y+h*k,t+h)))\n\ndef step_RK4(y,t,h,f):\n\t\"\"\"Retourne un pas de la résolution d'une équation differencielle avec la méthode de Runge Kutta d'ordre 4\n\ty : ordonnee du point précédent\n\tt : abscisse du point précédent\n\th : longueur d'un pas\n\tf : y'(x) = f(y,t)(x) ou y est la fonction recherchée\n\t\"\"\"\n\tk1 = f(y,t)\n\tk2 = f(y+h*k1/2.,t+h/2.)\n\tk3 = f(y+h*k2/2.,t+h/2.)\n\tk4 = f(y+h*k3,t+h)\n\treturn y +h*(k1 + 2* (k2+k3)+k4)/6.\n\t\n\t\ndef meth_n_steps(y0,t0,n,h,f,meth):\n\t\"\"\"Retourne le tableau constitué des différents points de la\n\tfonction solution entre t0 et t0+n*h.\n\ty0 : ordonnee initiale (np.array)\n\tt0 : abscisse initiale (reel)\n\tn : nombre de points\n\th : longueur d'un pas\n\tf : fonction telle que y'=f(y,t) y : np.array, t:reel, renvoie : np.array \n\tmeth : méthode utilisee\n\t\"\"\"\n\ti = 0;\n\ty = np.zeros((n+1,)+(np.asarray(y0).shape))\n\ty = np.asarray(y)\n\tt = np.arange(0,(n+1)*h,h) + t0\n\ty[0] = y0;\n\tfor i in range(1,n+1):\n\t\ty[i] = meth(y[i-1],t[i-1],h,f)\n\treturn y\n\n\n\ndef test_meth(y0,t0,f,xn=3,H=0.01,step_meth=step_Euler,name='test_meth.png',clear=False):\n\t\"\"\"Trace la fonction solution de l'équation différentielle proposée\n\tLe tracé s'effectue entre t0 et xn.\n\t\"\"\"\n\tX = np.arange(t0,xn,H)\n\tY = meth_n_steps(y0,t0,len(X)-1,H,f,step_meth)\n\tX = np.arange(t0,xn,H)\n\tmp.plot(X,Y)\n\tmp.savefig(name)\n\tif(clear):\n\t\tmp.clf();\n\ndef tangent_field(f,xbound=[-3,3],h=.5,name='tan_field.png',clear=False):\n\t\"\"\"Trace le champ des tangeantes de l'équation différencielle y'=f(y,t)\n\t\"\"\"\n\tybound=xbound;\n\tX = np.arange(xbound[0],xbound[1]+h,h);\n\tY = np.arange(ybound[0],ybound[1]+h,h);\n\tU = [];\n\tV = [];\n\tfor i,t in enumerate(X):\n\t\tU.append([]);\n\t\tV.append([]);\n\t\tfor j,y in enumerate(Y):\n\t\t\ttemp = f(np.asarray([y,t]),t)\n\t\t\tU[i].append(temp[1]);\n\t\t\tV[i].append(temp[0]);\n\tV=np.transpose(V);\n\tmp.quiver(X,Y,U,V,pivot='middle');\n\tmp.savefig(name);\n\tif(clear):\n\t\tmp.clf();\n\n\n\t\t#test_meth(1,1.5,lambda y,t : y)\n\t\n\t\t#tangent_field((lambda y,t : np.array([y[0],1])),h=.5)\n","sub_path":"partie1/resdiff.py","file_name":"resdiff.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"178363622","text":"import collections\n# import discord\nban_purple = ['ピカチュウ','ピカチュウ','ピカチュウ','ピカチュウ']\nban_orange = ['カビゴン','カビゴン','リザードン','リザードン'] # Test List\nCharacter = ['ピカチュウ','ゲッコウガ','カビゴン','ワタシラガ','リザードン','ルカリオ','ファイアロー']\n\nwhile len(ban_purple) + len(ban_orange) < 11: # purple orangeの配列要素数合計が11未満の時ずっと実行\n if len(ban_purple) < 5 or len(ban_orange) < 5: # どちらかが満タンかどうか検知\n TeamColor = input(\"Please select your team color:Purple or Orange \") # チーム名の入力を求める →リアクションで検知に変更\n\n if TeamColor == ('Purple'): # 紫チームの処理\n if len(ban_purple) == 5: # ちなみにさっきから出てるlenは配列に何個値が入ってるか調べてくれる関数です これが5であれば既に5人入ってることになる\n print(\"This team is full. Try another one.\")\n continue # ループの先頭に戻る\n\n else: # 5人じゃなかった場合の処理\n BanCharacter = input(\"Please select your ban character:\") # banしたいキャラクターの入力を求める →コマンドで入力に変更\n if BanCharacter in Character: # さっき入力された値がCharacter配列に存在するか調べる \n ban_purple.append(BanCharacter) # 入力された値を紫チームのban配列に追加\n print(\"Selected ban character:\" + BanCharacter)\n else: # 入ってなかった時\n print(\"Such character does not exist. Try again.\")\n continue\n\n elif TeamColor == ('Orange'): # オレンジチームの処理 全部一緒\n if len(ban_orange) == 5:\n print(\"This team is full. Try another one.\")\n continue\n\n else:\n BanCharacter = input(\"Please select your ban character:\")\n if BanCharacter in Character:\n ban_orange.append(BanCharacter)\n print(\"Selected ban character: \" + BanCharacter)\n else:\n print(\"Such character does not exist. Try again.\")\n continue\n else: # PurpleかOrangeが入力されなかった時\n print(\"Such team does not exist. Try again.\")\n continue\n\n else: # いっぱいになった時\n print(\"Game is full.\")\n print(ban_purple)\n print(ban_orange)\n break # ループが暴れたので途中で終了させる\n\nbanned_purple = collections.Counter(ban_purple).most_common()[0][0] # 配列内の最頻値をbanned変数に入れる\nbanned_orange = collections.Counter(ban_orange).most_common()[0][0]\nprint(banned_purple)\nprint(banned_orange)\n","sub_path":"ban.py","file_name":"ban.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"469818553","text":"import re\nprint(\"\\033[1;31;40m----------[+] CSRF POC GENERATOR BY RC [+]----------\")\nres=0\ndef get(url,method):\n\tif method==\"1\":\n\t\to = open(\"output.html\",\"w\")\n\t\tdata = open(\"temp.html\").read()\n\t\to.write(re.sub(\"change\",url,data))\n\t\to.close()\n\t\tdata=open(\"output.html\").read()\n\t\tprint(\"\\033[1;37;40m\"+\"Copy The HTML CODE BELOW\\n\"+data)\n\t\tprint(\"\\nAlso Saved As output.html\")\n\telif method=='2':\n\t\to = open(\"output.html\",\"w\")\n\t\tdata = open(\"temp1.html\").read()\n\t\to.write(re.sub(\"change\",url,data))\n\t\to.close()\n\t\tdata=open(\"output.html\").read()\n\t\tprint(\"\\033[1;37;40m\"+\"Copy The HTML CODE BELOW\\n\"+data)\n\t\tprint(\"\\nAlso Saved As output.html\")\n\telse:\n\t\tprint(\"[+] Wrong Input [+]\")\ndef post(atc,name,val):\n\t\to = open(\"postoutput.html\",\"w\")\n\t\tdata = open(\"post.html\").read()\n\t\to.write(re.sub(\"njk\",act,data))\n\t\t#o.write(data)\n\t\tdata=open(\"postoutput.html\").read()\n\t\to=open(\"postoutput.html\",\"a\")\n\t\tttw=f\"\\n \"\n\t\to.write(ttw)\n\t\to.close()\n\t\t#print(\"\\033[1;33;40m [+] POC File Saved As outputpost.html [+]\")\n\t\t#a=open(\"postoutput.html\").read()\n\t\t#print(a)\n\n\n\nprint('\\nSelect Option From Below : ')\nprint(\"\\n\\033[1;33;40m1. For GET Request\")\nprint(\"\\n\\033[1;34;40m2. For POST Request\")\nask = input(\"\\n\\033[1;32;40mEnter Here : \")\nif ask == \"1\":\n\tprint(\"\\n\\033[1;31;40m[+]------- Enter The Request URL --------[+]\")\n\turl = input(\"\\n\\033[1;32;40mEnter Here : \")\n\tprint(\"\\n\\033[1;31;40mSelect From The Below\")\n\tprint(\"\\n\\033[1;33;40m1. For Based \")\n\tprint(\"\\n\\033[1;34;40m2. For Based\")\n\tmethod = input(\"\\n\\033[1;32;40mEnter Here : \")\n\tget(url,method)\nelif ask==\"2\":\n\tprint(\"\\n\\033[1;31;40m[+]---------- Enter The Action URL ----------[+]\")\n\tact=input(\"\\n\\033[1;32;40mEnter Here : \")\n\tprint(\"\\n\\033[1;31;40m[+]---------- How Much Inputs You Want? ----------[+]\")\n\tinp=int(input(\"\\n\\033[1;32;40mEnter Here : \"))\n\tinps=0\n\tfor i in range(inp):\n\t\tprint(\"\\n\\033[1;31;40m[+]---------- Enter The name= ----------[+]\")\n\t\tname=input(\"\\n\\033[1;32;40mEnter Here : \")\n\t\tprint(\"\\n\\033[1;31;40m[+]---------- Enter The Value For It ----------[+]\")\n\t\tval=input(\"\\n\\033[1;32;40mEnter Here : \")\n\t\tinps+=1\n\t\timport time\n\t\ttime.sleep(.21)\n\t\tpost(act,name,val)\n\t\tif inps==inp:\n\t\t\tf=open('postoutput.html','a')\n\t\t\tTTw=\"\\n\"\n\t\t\tTtw=\"\\n \\n \\n\"\n\t\t\tf.write(TTw + Ttw)\n\t\t\tprint(\"\\033[1;33;40m [+] POC File Saved As outputpost.html [+]\")\n\t\telse:\n\t\t\t#post(act,name,val)\n\t\t\tf=open('postoutput.html','a')\n\t\t\tr=0\n\t\t\twhile r!=1:\n\t\t\t\tttk=f\"\\n\"\n\t\t\t\tf.write(ttk)\n\t\t\t\tr+=1","sub_path":"pocgen.py","file_name":"pocgen.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"568603639","text":"import argparse\nfrom split_coordinates import split_coords\nfrom remove_duplicates import remove_duplicates\n\n\n# Set arguments\ndef argparser():\n parser = argparse.ArgumentParser(description=\"Ask for paths\")\n parser.add_argument(\"-c\", \"--coords\", help=\"circRNA coordinates\")\n parser.add_argument(\"-i\", \"--infile\", help=\"ENCORI infile\")\n parser.add_argument(\"-A\", \"--annotation\", help=\"path to intron file\")\n parser.add_argument(\"-E\", \"--exon\", help=\"Path to exon file\")\n parser.add_argument(\"-o\", \"--outfile\", help=\"Out Directory\")\n args = parser.parse_args()\n\n return args\n\n\n# Set coordinates of the downstream intron\ndef down_intron(args):\n annotation = open(args.annotation, \"r\").read().splitlines()\n for line in annotation:\n col = line.split(\"\\t\")\n if int(col[2]) == split_coords(args.coords)[1] - 1:\n downstream_intron = (int(col[1]))\n\n return downstream_intron\n\n\n# Set coordinates of upstream intron\ndef up_intron(args):\n annotation = open(args.annotation, \"r\").read().splitlines()\n for line in annotation:\n col = line.split(\"\\t\")\n if int(col[1]) == split_coords(args.coords)[2]:\n upstream_intron = (int(col[2]))\n\n return upstream_intron\n\n\n# indexing gtf for first and last exons\ndef exon_coords(args):\n line_number1 = []\n line_number2 = []\n exon_file = open(args.exon).read().splitlines()\n for line in exon_file:\n col = line.split(\"\\t\")\n if col[0] == split_coords(args.coords)[0] and col[2] == \"exon\" and col[3] == str(split_coords(args.coords)[1]):\n line_number1.append(exon_file.index(line))\n if col[0] == split_coords(args.coords)[0] and col[2] == \"exon\" and col[4] == str(split_coords(args.coords)[2]):\n line_number2.append(exon_file.index(line))\n\n\n # Making list of exons composing circRNA\n exon_list = []\n for line in exon_file[line_number1[0]:line_number2[0]+1]:\n col = line.split(\"\\t\")\n if col[2] == \"exon\":\n exon_list.append(col[0] + \":\" + col[3] + \"-\" + col[4])\n\n exon_list = remove_duplicates(exon_list)\n\n return exon_list\n\n\n\n# Search within ENCORI output to find binding sites of RBP\ndef ENCORI(args, downstream_intron, upstream_intron):\n outlist = []\n infile = open(args.infile, \"r\").read().splitlines()\n for line in infile:\n if \"#\" not in line:\n col = line.split(\"\\t\")\n if col[8] == split_coords(args.coords)[0] \\\n and int(col[9]) >= downstream_intron + 1 \\\n and int(col[10]) <= upstream_intron:\n\n outlist.append(line)\n\n return outlist\n\n\n# Determine where proteins are binding to; Downstream intron, within circRNA, BSJ, exon, or Upstream intron\ndef location_of_binding(args, outlist, exon_list):\n finaloutlist = []\n for line in outlist:\n col = line.split(\"\\t\")\n if int(col[9]) < split_coords(args.coords)[1]:\n line = line + \"\\t\" + \"Downstream\"\n elif int(col[9]) > split_coords(args.coords)[2]:\n line = line + \"\\t\" + \"Upstream\"\n elif int(col[9]) > split_coords(args.coords)[1] and int(col[10]) < split_coords(args.coords)[2]:\n line = line + \"\\t\" + \"circRNA\"\n elif int(col[9]) == split_coords(args.coords)[1] or int(col[10]) == split_coords(args.coords)[2]:\n line = line + \"\\t\" + \"BSJ\"\n elif int(col[10]) > split_coords(args.coords)[1] or int(col[9]) < split_coords(args.coords)[2]:\n line = line + \"\\t\" + \"BSJ\"\n finaloutlist.append(line)\n for exon in exon_list:\n if int(col[9]) >= split_coords(exon)[1] and int(col[10]) <= split_coords(exon)[2]:\n line = line + \"\\t\" + \"Exon Binding\"\n finaloutlist.append(line)\n\n return finaloutlist\n\n\n# Count how many are binding within introns\ndef intron_binding(args, finaloutlist):\n yes_count = 0\n for element in finaloutlist:\n col = element.split(\"\\t\")\n if int(col[9]) < split_coords(args.coords)[1] \\\n or int(col[9]) > split_coords(args.coords)[2]:\n yes_count += 1\n\n return yes_count\n\n\n# Write output to new file\ndef write_out(args, finaloutlist, yes_count):\n outfile = open(args.outfile, \"w\")\n for element in finaloutlist:\n outfile.write(element + \"\\n\")\n outfile.write(\"Number of intronic Binding sites\" + \"\\t\" + str(yes_count) + \"\\n\")\n outfile.write(\"Number of circRNA Binding sites\" + \"\\t\" + str(len(finaloutlist)-yes_count) + \"\\n\")\n\n\n# Set main arguments\ndef main():\n\n args = argparser()\n\n downstream_intron = down_intron(args)\n\n upstream_intron = up_intron(args)\n\n exon_list = exon_coords(args)\n\n outlist = ENCORI(args, downstream_intron, upstream_intron)\n\n finaloutlist = location_of_binding(args, outlist, exon_list)\n\n yes_count = intron_binding(args, finaloutlist)\n\n write_out(args, finaloutlist, yes_count)\n\nif __name__ == '__main__':\n main()\n\n\n\n\n","sub_path":"RBP_binding.py","file_name":"RBP_binding.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"198915285","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport glob \nimport os\nimport sys\n\ndef decode_adpcm():\n files = glob.glob('./input/*.opus')\n for f in files:\n cmd = '%s %s'\n os.system(cmd % ('./tdec', f))\n os.rename('decode.pcm', os.path.basename(f).split('.')[0]+'.pcm')\n os.system(\"mv *.pcm output_wav\")\n\nif __name__ == '__main__':\n decode_adpcm()\n\n","sub_path":"python/adpcm2pcm/adpcm_dec.py","file_name":"adpcm_dec.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"192714434","text":"# Little guided workshop on how to alter existing code\n# Keywords: electricity, field, vector, charge, force\n# Licence: MIT Licence\n# Author: tbisig@gmail.com\n\nimport matplotlib.pyplot as plt\nfrom numpy import minimum\nfrom numpy import absolute\nfrom math import atan2\nfrom math import sqrt\nfrom math import cos\nfrom math import sin\nfrom math import pi\nimport random\n\nepsilon_0 = 8.854187817*10E-12\nscale = 0.5\ncharge_scale = 10**(-9)\n\n# define the charges\ncharges = [[0.2,0.2,2.0],[0.7,0.7,-1.0],[0.5,0.3,1.0],[0.1,0.8,1.5],[0.8,0.1,-1.5],[0.4,0.3,2]]\n\nsum_charges = 0\nloc_charges = []\nfor c in charges:\n loc_charges.append([c[0],c[1]])\n sum_charges += absolute(c[2])\n\nmax_field = 1/(4*pi*epsilon_0)*sum_charges*charge_scale/(0.5*scale)**2\nmin_field = 0\n\ndef alp(w):\n return minimum(1.0, w/(max_field-min_field))\n\n# this defines the grid of Points Of Interest (poi)\npois = []\nfor i in range(20):\n for j in range(20):\n t = [i/20.0,j/20.0]\n if t not in loc_charges:\n pois.append(t)\n \ndef getEFieldComponents(ch,p):\n\n field_x = 0.0\n field_y = 0.0\n \n # loop over all charges\n for c in ch:\n # calculate electrostatic field at point\n # p from charge \n\n ####### -- for the students -- #######\n d_x = p[0]-c[0]\n d_y = p[1]-c[1]\n r = sqrt(d_x**2+d_y**2)\n f = 1/(4*pi*epsilon_0)*c[2]*charge_scale/r**2\n angle = atan2(d_y,d_x)\n \n field_x += f*cos(angle)\n field_y += f*sin(angle)\n\n ####### ---------------------- #######\n\n #field_x = max_field*random.random()\n #field_y = max_field*random.random()\n \n return [field_x, field_y]\n\n# setting up the stage\nax = plt.axes()\nfig, ax = plt.subplots()\n\n# draw charges\nfor c in charges:\n if c[2]>=1:\n col = 'r'\n else:\n col = 'b'\n ax.add_artist(plt.Circle((c[0],c[1]), 0.02*sqrt(abs(c[2])), color=col))\n\n# draw arrows\nfor l in pois:\n # get e-field components for a given location l\n f = getEFieldComponents(charges, l)\n # calculate 'length' of e-field arrow, to calculate relative strength\n abs_val = sqrt(f[0]**2+f[1]**2)\n ax.arrow(l[0], l[1], 0.005*f[0]/abs_val, 0.005*f[1]/abs_val, head_width=0.01, head_length=0.02, fc='k', ec='k', lw=0.02, alpha=alp(abs_val))\n\n# remove axes in plot\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n\n# set the plotting area to 'quadratic'\nplt.gca().set_aspect('equal', adjustable='box')\n\n# save picture\nplt.savefig('field_vectors.png')\n#plt.show() #uncomment if you run the code on a local python installation","sub_path":"e-field-solution.py","file_name":"e-field-solution.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"467480266","text":"import math\nfrom scipy import misc\nimport numpy as np\nfrom matplotlib import pyplot\n\ndef fetch_data(filename, n, m, ratio):\n image = misc.imread(filename)\n side, _, _ = image.shape\n data = np.empty([n, m, image.shape[2]], dtype=np.uint8)\n for i in xrange(n):\n for j in xrange(m):\n r = (i + 0.5) / n * (side * ratio * 0.5) + side * (1.0 - ratio) * 0.5\n t = (j + 0.5) / m * 2.0 * math.pi + math.pi\n x = int(math.sin(t) * r + side * 0.5)\n y = int(math.cos(t) * r + side * 0.5)\n data[i, j] = image[x, y]\n return data\n\ndef main():\n data = fetch_data('y_600.png', 29, 29, 0.85)\n pyplot.imshow(data)\n pyplot.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"MegCup/2017/TaiChi/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"331661646","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport argparse\nimport unittest\n\nfrom compat import TemporaryDirectory, makedirs\nfrom vndk_definition_tool import GenericRefs\n\n\ntest_dir = None\n\nclass GenericRefsTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n if test_dir:\n cls.test_dir = test_dir\n else:\n cls.tmp_dir = TemporaryDirectory()\n cls.test_dir = cls.tmp_dir.name\n\n cls._build_fixtures()\n\n @classmethod\n def tearDownClass(cls):\n if not test_dir:\n cls.tmp_dir.cleanup()\n\n @classmethod\n def _build_fixture(cls, path, content):\n makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, 'w') as f:\n f.write(content)\n\n @classmethod\n def _build_fixtures(cls):\n lib32 = os.path.join(cls.test_dir, 'system', 'lib')\n lib64 = os.path.join(cls.test_dir, 'system', 'lib64')\n\n for lib_dir in (lib32, lib64):\n cls._build_fixture(os.path.join(lib_dir, 'libc.so.sym'),\n 'fclose\\nfopen\\nfread\\nfwrite\\n')\n cls._build_fixture(os.path.join(lib_dir, 'libm.so.sym'),\n 'cos\\nsin\\ntan\\n')\n\n def test_create_from_dir(self):\n g = GenericRefs.create_from_dir(self.test_dir)\n self.assertEqual(4, len(g.refs))\n\n self.assertIn('/system/lib/libc.so', g.refs)\n self.assertIn('/system/lib/libm.so', g.refs)\n self.assertIn('/system/lib64/libc.so', g.refs)\n self.assertIn('/system/lib64/libm.so', g.refs)\n\n self.assertEqual({'fclose', 'fopen', 'fread', 'fwrite'},\n g.refs['/system/lib/libc.so'])\n self.assertEqual({'fclose', 'fopen', 'fread', 'fwrite'},\n g.refs['/system/lib64/libc.so'])\n\n self.assertEqual({'cos', 'sin', 'tan'},\n g.refs['/system/lib/libm.so'])\n self.assertEqual({'cos', 'sin', 'tan'},\n g.refs['/system/lib64/libm.so'])\n\n\n def test_is_equivalent_lib(self):\n g = GenericRefs.create_from_dir(self.test_dir)\n\n class MockELF(object):\n def __init__(self, exported_symbols):\n self.exported_symbols = exported_symbols\n\n class MockLib(object):\n def __init__(self, path, exported_symbols):\n self.path = path\n self.elf = MockELF(exported_symbols)\n\n libc_sub = MockLib('/system/lib/libc.so', {'fclose', 'fopen', 'fread'})\n libc_sup = MockLib('/system/lib/libc.so',\n {'fclose', 'fopen', 'fread', 'fwrite', 'open'})\n libc_eq = MockLib('/system/lib/libc.so',\n {'fclose', 'fopen', 'fread', 'fwrite'})\n\n self.assertFalse(g.is_equivalent_lib(libc_sub))\n self.assertFalse(g.is_equivalent_lib(libc_sup))\n\n self.assertTrue(g.is_equivalent_lib(libc_eq))\n\n\ndef main():\n # Parse command line arguments.\n parser = argparse.ArgumentParser()\n parser.add_argument('--test-dir', help='directory for temporary files')\n args, unittest_args = parser.parse_known_args()\n\n # Convert command line options.\n global test_dir\n\n if args.test_dir:\n test_dir = args.test_dir\n\n # Run unit test.\n unittest.main(argv=[sys.argv[0]] + unittest_args)\n\nif __name__ == '__main__':\n main()\n","sub_path":"vndk/tools/definition-tool/tests/test_generic_refs.py","file_name":"test_generic_refs.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"435720167","text":" \nimport os\n \nimport mock\n \nfrom scalarizr import storage2\nfrom nose.tools import raises\n \n \nclass TestLvmVolume(object):\n \n @mock.patch.multiple('scalarizr.linux.coreutils',\n dd=mock.DEFAULT,\n losetup=mock.DEFAULT,\n losetup_all=mock.DEFAULT)\n def test_ensure_new(self, dd, losetup, losetup_all):\n losetup_all.return_value.__getitem__.return_value = '/dev/loop0'\n \n vol = storage2.volume(type='loop', size=1, zerofill=True)\n vol.ensure()\n \n assert vol.device == '/dev/loop0'\n assert vol.file.startswith('/mnt/loopdev')\n dd.assert_called_once_with(**{\n 'if': '/dev/zero',\n 'of': vol.file,\n 'bs': '1M',\n 'count': 1024})\n losetup.assert_called_with(vol.file, find=True)\n \n \n @raises(storage2.StorageError)\n def test_ensure_new_not_enough_args(self):\n vol = storage2.volume(type='loop')\n vol.ensure()\n \n @mock.patch.multiple('scalarizr.linux.coreutils',\n dd=mock.DEFAULT,\n losetup=mock.DEFAULT,\n losetup_all=mock.DEFAULT)\n @mock.patch.object(os, 'statvfs')\n def test_ensure_new_with_parametrized_size(self, statvfs,\n losetup_all, losetup, dd):\n statvfs.return_value=mock.Mock(\n f_bsize=4096,\n f_blocks=13092026,\n f_bfree=10613528)\n losetup_all.return_value.__getitem__.return_value = '/dev/loop0'\n \n vol = storage2.volume(type='loop', size='25%root')\n vol.ensure()\n \n dd.assert_called_once_with(**{\n 'if': '/dev/zero',\n 'of': vol.file,\n 'bs': '1M',\n 'seek': 12784,\n 'count': 1})\n \n \n @mock.patch.multiple('scalarizr.linux.coreutils',\n losetup_all=mock.DEFAULT)\n @mock.patch('scalarizr.linux.coreutils.losetup')\n @mock.patch.object(os.path, 'exists')\n @mock.patch.object(os, 'stat')\n def test_ensure_existed(self, stat, exists, losetup, losetup_all):\n stat.return_value = mock.Mock(st_size=1073741931)\n exists.return_value=True\n losetup_all.return_value.__getitem__.return_value = '/mnt/loopdev0'\n \n vol = storage2.volume(\n type='loop',\n device='/dev/loop0',\n file='/mnt/loopdev0'\n )\n vol.ensure()\n \n losetup_all.assert_called_once_with()\n \n def test_restore(self):\n pass\n \n","sub_path":"tests/unit/scalarizrtests/storage2/volumes/test_loop.py","file_name":"test_loop.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"334079395","text":"\"\"\"\nRecursive fibonacci sequence\n\"\"\"\n\n#print(\"This program calcuates number of ways to ascend a given number of stairs with a combination of 1, 2, or 3 steps\")\n#user_input = input(\"Please enter number of stairs: \")\n\nfibonacci_cache = {}\n\ndef fibonacci(n):\n\tvalue = 0\n\tif n == 1:\n\t\tvalue = 1\n\telif n == 2:\n\t\tvalue = 1\n\telif n > 2:\n\t\tvalue = fibonacci(n-1) + fibonacci(n-2)\n\n\t#cache the value and return it\n\tfibonacci_cache[n] = value\n\treturn value\n\n#for n in range (1,100):\n#\tprint(n, \":\", fibonacci(n))\n","sub_path":"fibonacci_recursive.py","file_name":"fibonacci_recursive.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"205213179","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport matplotlib.dates as mdates\r\ndef bytespdatenum(fmt,encoding='utf-8'):\r\n\tstrconverter=mdates.strpdate2num(fmt)\r\n\tdef bytesconverter(b):\r\n\t\ts=b.decode(encoding)\r\n\t\treturn strconverter(s)\r\n\treturn bytesconverter\r\ndef convert(li):\r\n pi=[]\r\n for x in li:\r\n tes=x[0]+x[1]+x[2]\r\n if tes=='Jan':\r\n tes='01'\r\n elif tes=='Feb':\r\n tes='02'\r\n elif tes=='Mar':\r\n tes='03'\r\n elif tes=='Apr':\r\n tes='04'\r\n elif tes=='May':\r\n tes='05'\r\n elif tes=='Jun':\r\n tes='06'\r\n elif tes=='Jul':\r\n tes='07'\r\n elif tes=='Aug':\r\n tes='08'\r\n elif tes=='Sep':\r\n tes='09'\r\n elif tes=='Oct':\r\n tes='10'\r\n elif tes=='Nov':\r\n tes='11'\r\n elif tes=='Dec':\r\n tes='12'\r\n x=tes+x[3:]\r\n pi.append(x)\r\n return pi \r\nfile1=open('BTC-profit.csv','r');\r\nli=file1.readlines()\r\nli=[x.split('\\n')[0] for x in li ]\r\nli=[(x.split(',')[0],x.split(',')[1]) for x in li]\r\ndate=[]\r\nprofit=[]\r\nfor x in li:\r\n\tdate.append(x[0])\r\n\tprofit.append(x[1])\r\n\r\nprofit=[int(x) for x in profit]\r\n#profit=[-x for x in profit]\r\n#date=convert(date)\r\ndate=np.loadtxt(date,unpack=True,converters={0:bytespdatenum('%Y%m%d')})\r\nplt.plot_date(date,profit,'-',label='Price')\r\n#plt.plot(date,profit)\r\nplt.xlabel('Year')\r\nplt.title('BTC-profit')\r\nplt.show()\r\nfile1.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"ThemeBased/maxgrpah.py","file_name":"maxgrpah.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"250520887","text":"import copy\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom network.base_net import MLP, RNN\nfrom network.predict_net import Predict_Network1, Predict_Network1_combine\nfrom network.QPLEX.dmaq_general import DMAQer\nfrom network.QPLEX.dmaq_qatten import DMAQ_QattenMixer\nfrom torch.optim import RMSprop\nfrom torch.utils.data.sampler import BatchSampler, SubsetRandomSampler\n\n\nclass DMAQ_qattenLearner_SDQ_intrinsic:\n\n def __init__(self, args):\n self.n_actions = args.n_actions\n self.n_agents = args.n_agents\n self.state_shape = args.state_shape\n self.obs_shape = args.obs_shape\n input_shape = self.obs_shape\n\n if args.last_action:\n input_shape += self.n_actions\n if args.reuse_network:\n input_shape += self.n_agents\n\n self.args = args\n\n if args.QPLEX_mixer == \"dmaq\":\n self.eval_mix_net = DMAQer(args)\n self.target_mix_net = DMAQer(args)\n elif args.QPLEX_mixer == 'dmaq_qatten':\n self.eval_mix_net = DMAQ_QattenMixer(args)\n self.target_mix_net = DMAQ_QattenMixer(args)\n else:\n raise ValueError(\n \"Mixer {} not recognised.\".format(args.QPLEX_mixer))\n\n self.eval_rnn = RNN(input_shape, args)\n self.target_rnn = RNN(input_shape, args)\n\n self.eval_mlp = nn.ModuleList([MLP(args)\n for _ in range(args.n_agents)])\n self.target_mlp = nn.ModuleList(\n [MLP(args) for _ in range(args.n_agents)])\n\n self.eval_predict_withoutid = Predict_Network1(\n args.rnn_hidden_dim + args.obs_shape + args.n_actions, 128, args.obs_shape, False)\n self.target_predict_withoutid = Predict_Network1(\n args.rnn_hidden_dim + args.obs_shape + args.n_actions, 128, args.obs_shape, False)\n\n self.eval_predict_withid = Predict_Network1_combine(args.rnn_hidden_dim + args.obs_shape + args.n_actions + args.n_agents, 128,\n args.obs_shape, args.n_agents, False)\n self.target_predict_withid = Predict_Network1_combine(args.rnn_hidden_dim + args.obs_shape + args.n_actions + args.n_agents, 128,\n args.obs_shape, args.n_agents, False)\n\n if self.args.cuda:\n self.eval_rnn.to(torch.device(self.args.GPU))\n self.target_rnn.to(torch.device(self.args.GPU))\n self.eval_mix_net.to(torch.device(self.args.GPU))\n self.target_mix_net.to(torch.device(self.args.GPU))\n\n self.eval_mlp.to(torch.device(self.args.GPU))\n self.target_mlp.to(torch.device(self.args.GPU))\n\n self.eval_predict_withid.to(torch.device(self.args.GPU))\n self.target_predict_withid.to(torch.device(self.args.GPU))\n\n self.eval_predict_withoutid.to(torch.device(self.args.GPU))\n self.target_predict_withoutid.to(torch.device(self.args.GPU))\n\n self.target_rnn.load_state_dict(self.eval_rnn.state_dict())\n self.target_mix_net.load_state_dict(self.eval_mix_net.state_dict())\n self.target_mlp.load_state_dict(self.eval_mlp.state_dict())\n self.target_predict_withid.load_state_dict(\n self.eval_predict_withid.state_dict())\n self.target_predict_withoutid.load_state_dict(\n self.eval_predict_withoutid.state_dict())\n\n self.eval_parameters = list(self.eval_mix_net.parameters(\n )) + list(self.eval_rnn.parameters()) + list(self.eval_mlp.parameters())\n\n if args.optimizer == \"RMS\":\n self.optimizer = torch.optim.RMSprop(\n self.eval_parameters, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)\n self.model_dir = f'{args.model_dir}/{args.env}/seed_{args.seed}'\n\n self.eval_hidden = None\n self.target_hidden = None\n\n def learn(self, batch, max_episode_len, train_step, t_env, epsilon=None):\n\n episode_num = batch['o'].shape[0]\n self.init_hidden(episode_num)\n for key in batch.keys():\n if key == 'u':\n batch[key] = torch.tensor(batch[key], dtype=torch.long)\n else:\n batch[key] = torch.tensor(batch[key], dtype=torch.float32)\n s, s_next, u, r, avail_u, avail_u_next, terminated = batch['s'], batch['s_next'], batch['u'], \\\n batch['r'], batch['avail_u'], batch['avail_u_next'],\\\n batch['terminated']\n mask = 1 - batch[\"padded\"].float()\n obs, obs_next = batch['o'], batch['o_next']\n\n q_evals, q_targets, q_evals_local, q_evals_last, intrinsic_rewards = self.get_q_values(\n batch, max_episode_len)\n if t_env > self.args.start_anneal_time:\n if self.args.anneal_type == 'linear':\n intrinsic_rewards = max(1 - self.args.anneal_rate * (\n t_env - self.args.start_anneal_time) / 1000000, 0) * intrinsic_rewards\n elif self.args.anneal_type == 'exp':\n exp_scaling = (-1) * (1 / self.args.anneal_rate) / np.log(0.01)\n TTT = (t_env - self.args.start_anneal_time) / 1000000\n intrinsic_rewards = intrinsic_rewards * \\\n min(1, max(0.01, np.exp(-TTT / exp_scaling)))\n\n mac_out = q_evals.clone().detach()\n\n if self.args.cuda:\n obs = obs.to(torch.device(self.args.GPU))\n obs_next = obs.to(torch.device(self.args.GPU))\n s = s.to(torch.device(self.args.GPU))\n u = u.to(torch.device(self.args.GPU))\n r = r.to(torch.device(self.args.GPU))\n s_next = s_next.to(torch.device(self.args.GPU))\n terminated = terminated.to(torch.device(self.args.GPU))\n mask = mask.to(torch.device(self.args.GPU))\n\n max_action_qvals, _ = q_evals.max(dim=3)\n q_evals = torch.gather(q_evals, dim=3, index=u).squeeze(3)\n curr_actions_onehot = torch.zeros(\n u.squeeze(3).shape + (self.n_actions,))\n if self.args.cuda:\n curr_actions_onehot = curr_actions_onehot.to(\n torch.device(self.args.GPU))\n\n curr_actions_onehot = curr_actions_onehot.scatter_(3, u, 1)\n\n with torch.no_grad():\n\n q_targets[avail_u_next == 0.0] = -9999999\n\n if self.args.double_q:\n # Get actions that maximise live Q (for double q-learning)\n mac_out[avail_u == 0] = -9999999\n cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1]\n target_last_max_actions = q_evals_last.unsqueeze(\n 1).max(dim=3, keepdim=True)[1]\n double_max_actions = torch.cat(\n [cur_max_actions, target_last_max_actions], dim=1)\n target_max_qvals = q_targets.max(dim=3)[0]\n q_targets = torch.gather(\n q_targets, 3, double_max_actions).squeeze(3)\n\n cur_max_actions_onehot = torch.zeros(\n double_max_actions.squeeze(3).shape + (self.n_actions,))\n if self.args.cuda:\n cur_max_actions_onehot = cur_max_actions_onehot.to(\n torch.device(self.args.GPU))\n cur_max_actions_onehot = cur_max_actions_onehot.scatter_(\n 3, double_max_actions, 1)\n\n else:\n q_targets = q_targets.max(dim=3)[0]\n target_max_qvals = q_targets.max(dim=3)[0]\n\n if self.args.QPLEX_mixer == \"dmaq_qatten\":\n ans_chosen, q_attend_regs, _ = self.eval_mix_net(\n q_evals, s, obs, is_v=True)\n ans_adv, _, _ = self.eval_mix_net(\n q_evals, s, obs, actions=curr_actions_onehot, max_q_i=max_action_qvals, is_v=False)\n chosen_action_qvals = ans_chosen + ans_adv\n else:\n ans_chosen = self.eval_mix_net(q_evals, s, is_v=True)\n ans_adv = self.eval_mix_net(\n q_evals, s, actions=curr_actions_onehot, max_q_i=max_action_qvals, is_v=False)\n chosen_action_qvals = ans_chosen + ans_adv\n\n with torch.no_grad():\n if self.args.double_q:\n if self.args.QPLEX_mixer == \"dmaq_qatten\":\n target_chosen, _, _ = self.target_mix_net(\n q_targets, s_next, obs_next, is_v=True)\n target_adv, _, _ = self.target_mix_net(\n q_targets, s_next, obs_next, actions=cur_max_actions_onehot, max_q_i=target_max_qvals, is_v=False)\n target_max_qvals = target_chosen + target_adv\n else:\n target_chosen = self.target_mix_net(\n q_targets, s_next, is_v=True)\n target_adv = self.target_mix_net(\n q_targets, s_next, actions=cur_max_actions_onehot, max_q_i=target_max_qvals, is_v=False)\n target_max_qvals = target_chosen + target_adv\n else:\n target_max_qvals = self.target_mix_net(\n target_max_qvals, s_next, is_v=True)\n\n # Calculate 1-step Q-Learning targets\n targets = r + self.args.beta * \\\n intrinsic_rewards.mean(\n dim=1) + self.args.gamma * (1 - terminated) * target_max_qvals\n\n # Td-error\n td_error = (chosen_action_qvals - targets.detach())\n\n # 0-out the targets that came from padded data\n masked_td_error = td_error * mask\n update_prior = (masked_td_error**2).squeeze().sum(dim=-1,\n keepdim=True) / mask.squeeze().sum(dim=-1, keepdim=True)\n\n # Normal L2 loss, take mean over actual data\n if self.args.QPLEX_mixer == \"dmaq_qatten\":\n loss = (masked_td_error**2).sum() / mask.sum() + q_attend_regs\n else:\n loss = (masked_td_error**2).sum() / mask.sum()\n\n norm_loss = F.l1_loss(q_evals_local, target=torch.zeros_like(\n q_evals_local), size_average=True)\n loss += self.args.norm_weight * norm_loss\n\n # Optimise\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(\n self.eval_parameters, self.args.grad_norm_clip)\n self.optimizer.step()\n\n if train_step > 0 and train_step % self.args.target_update_cycle == 0:\n self.target_rnn.load_state_dict(self.eval_rnn.state_dict())\n self.target_mix_net.load_state_dict(self.eval_mix_net.state_dict())\n self.target_mlp.load_state_dict(self.eval_mlp.state_dict())\n\n self.target_predict_withid.load_state_dict(\n self.eval_predict_withid.state_dict())\n self.target_predict_withoutid.load_state_dict(\n self.eval_predict_withoutid.state_dict())\n\n return update_prior.squeeze().detach()\n\n def _get_inputs_matrix(self, batch):\n obs, obs_next = batch['o'], batch['o_next']\n\n obs_clone = obs.clone()\n obs_next_clone = obs_next.clone()\n\n if self.args.last_action:\n u_onehot = batch['u_onehot']\n u_onehot_f = torch.zeros_like(u_onehot)\n u_onehot_f[:, 1:, :, :] = u_onehot[:, :-1, :, :]\n\n obs = torch.cat([obs, u_onehot_f], dim=-1)\n obs_next = torch.cat([obs_next, u_onehot], dim=-1)\n\n add_id = torch.eye(self.args.n_agents).type_as(obs).expand(\n [obs.shape[0], obs.shape[1], self.args.n_agents, self.args.n_agents])\n\n if self.args.reuse_network:\n obs = torch.cat([obs, add_id], dim=-1)\n obs_next = torch.cat([obs_next, add_id], dim=-1)\n\n return obs, obs_next, obs_clone, obs_next_clone, add_id\n\n def get_q_values(self, batch, max_episode_len):\n\n inputs, inputs_next, obs, obs_next, add_id = self._get_inputs_matrix(\n batch)\n inputs = torch.cat([inputs, inputs_next[:, -1].unsqueeze(1)], dim=1)\n inputs_shape = inputs.shape\n mask = 1 - batch[\"padded\"].float()\n\n if self.args.cuda:\n inputs = inputs.to(torch.device(self.args.GPU))\n inputs_next = inputs_next.to(torch.device(self.args.GPU))\n obs = obs.to(torch.device(self.args.GPU))\n obs_next = obs_next.to(torch.device(self.args.GPU))\n mask = mask.to(torch.device(self.args.GPU))\n\n self.eval_hidden = self.eval_hidden.to(torch.device(self.args.GPU))\n self.target_hidden = self.target_hidden.to(\n torch.device(self.args.GPU))\n\n u_onehot = batch['u_onehot']\n u_onehot = u_onehot.to(inputs.device).permute(0, 2, 1, 3)\n add_id = add_id.to(inputs.device).permute(0, 2, 1, 3)\n\n eval_h = self.eval_hidden.view(-1, self.args.rnn_hidden_dim)\n target_h = self.target_hidden.view(-1, self.args.rnn_hidden_dim)\n\n inputs = inputs.permute(0, 2, 1, 3)\n inputs_next = inputs_next.permute(0, 2, 1, 3)\n\n inputs = inputs.reshape(-1, inputs.shape[2], inputs.shape[3])\n inputs_next = inputs_next.reshape(-1,\n inputs_next.shape[2], inputs_next.shape[3])\n\n q_eval_global, out_eval_h = self.eval_rnn(inputs, eval_h)\n q_target_global, out_target_h = self.target_rnn(inputs_next, target_h)\n\n q_eval_global = q_eval_global.reshape(inputs_shape[0], inputs_shape[2], q_eval_global.shape[-2],\n q_eval_global.shape[-1]).permute(0, 2, 1, 3)\n\n out_eval_h = out_eval_h.reshape(\n inputs_shape[0], inputs_shape[2], out_eval_h.shape[-2], out_eval_h.shape[-1]).permute(0, 2, 1, 3)\n\n q_target_global = q_target_global.reshape(inputs_shape[0], inputs_shape[2], q_target_global.shape[-2],\n q_target_global.shape[-1]).permute(0, 2, 1, 3)\n out_target_h = out_target_h.reshape(inputs_shape[0], inputs_shape[2], out_target_h.shape[-2],\n out_target_h.shape[-1]).permute(0, 2, 1, 3)\n\n q_eval_local = torch.stack(\n [self.eval_mlp[id](out_eval_h[:, :, id].reshape(-1, out_eval_h.shape[-1]))\n for id in range(self.args.n_agents)],\n dim=1).reshape_as(q_eval_global)\n\n q_target_local = torch.stack(\n [self.target_mlp[id](out_target_h[:, :, id].reshape(-1, out_target_h.shape[-1]))\n for id in range(self.args.n_agents)],\n dim=1).reshape_as(q_target_global)\n\n q_eval = q_eval_global + q_eval_local\n q_target = q_target_global + q_target_local\n\n with torch.no_grad():\n mask = mask.unsqueeze(-2).expand(obs.shape[:-1] + mask.shape[-1:])\n mask = mask.permute(0, 2, 1, 3)\n mask = mask.reshape(-1, mask.shape[-2], mask.shape[-1])\n mask = mask.reshape(-1, mask.shape[-1])\n\n obs_intrinsic = obs.clone().permute(0, 2, 1, 3)\n obs_intrinsic = obs_intrinsic.reshape(\n -1, obs_intrinsic.shape[-2], obs_intrinsic.shape[-1])\n\n eval_h_intrinsic = out_eval_h.clone().permute(0, 2, 1, 3)\n eval_h_intrinsic = eval_h_intrinsic.reshape(\n -1, eval_h_intrinsic.shape[-2], eval_h_intrinsic.shape[-1])\n\n h_cat = torch.cat([self.eval_hidden.reshape(-1, self.eval_hidden.shape[-1]\n ).unsqueeze(1), eval_h_intrinsic[:, :-2]], dim=1)\n\n intrinsic_input_1 = torch.cat(\n [h_cat, obs_intrinsic, u_onehot.reshape(-1, u_onehot.shape[-2], u_onehot.shape[-1])], dim=-1)\n intrinsic_input_2 = torch.cat(\n [intrinsic_input_1, add_id.reshape(-1, add_id.shape[-2], add_id.shape[-1])], dim=-1)\n\n intrinsic_input_1 = intrinsic_input_1.reshape(\n -1, intrinsic_input_1.shape[-1])\n intrinsic_input_2 = intrinsic_input_2.reshape(\n -1, intrinsic_input_2.shape[-1])\n\n next_obs_intrinsic = obs_next.clone().permute(0, 2, 1, 3)\n next_obs_intrinsic = next_obs_intrinsic.reshape(\n -1, next_obs_intrinsic.shape[-2], next_obs_intrinsic.shape[-1])\n next_obs_intrinsic = next_obs_intrinsic.reshape(\n -1, next_obs_intrinsic.shape[-1])\n\n log_p_o = self.target_predict_withoutid.get_log_pi(\n intrinsic_input_1, next_obs_intrinsic)\n log_q_o = self.target_predict_withid.get_log_pi(\n intrinsic_input_2, next_obs_intrinsic, add_id.reshape([-1, add_id.shape[-1]]))\n\n mean_p = torch.softmax(q_eval[:, :-1], dim=-1).mean(dim=2)\n q_pi = torch.softmax(self.args.beta1 * q_eval[:, :-1], dim=-1)\n\n pi_diverge = torch.cat(\n [(q_pi[:, :, id] * torch.log(q_pi[:, :, id] / mean_p)\n ).sum(dim=-1, keepdim=True) for id in range(self.n_agents)],\n dim=-1).permute(0, 2, 1).unsqueeze(-1)\n\n intrinsic_rewards = self.args.beta1 * log_q_o - log_p_o\n intrinsic_rewards = intrinsic_rewards.reshape(\n -1, obs_intrinsic.shape[1], intrinsic_rewards.shape[-1])\n intrinsic_rewards = intrinsic_rewards.reshape(\n -1, obs.shape[2], obs_intrinsic.shape[1], intrinsic_rewards.shape[-1])\n intrinsic_rewards = intrinsic_rewards + self.args.beta2 * pi_diverge\n\n # update predict network\n add_id = add_id.reshape([-1, add_id.shape[-1]])\n for index in BatchSampler(SubsetRandomSampler(range(intrinsic_input_1.shape[0])), 256, False):\n self.eval_predict_withoutid.update(\n intrinsic_input_1[index], next_obs_intrinsic[index], mask[index])\n self.eval_predict_withid.update(\n intrinsic_input_2[index], next_obs_intrinsic[index], add_id[index], mask[index])\n\n return q_eval[:, :-1], q_target, q_eval_local[:, :-1], q_eval[:, -1], intrinsic_rewards.detach()\n\n def save_model(self, train_step):\n num = str(train_step // self.args.save_cycle)\n if not os.path.exists(self.model_dir):\n os.makedirs(self.model_dir)\n\n torch.save(self.eval_mlp.state_dict(), self.model_dir +\n '/' + num + '_mlp_net_params.pkl')\n torch.save(self.eval_mix_net.state_dict(), self.model_dir +\n '/' + num + '_qplex_mix_net_params.pkl')\n torch.save(self.eval_rnn.state_dict(), self.model_dir +\n '/' + num + '_rnn_net_params.pkl')\n\n def load_model(self, step):\n self.eval_mlp.load_state_dict(torch.load(\n f\"{self.model_dir}/{step}_mlp_net_params.pkl\", map_location='cpu'))\n self.eval_mix_net.load_state_dict(torch.load(\n f\"{self.model_dir}/{step}_qplex_mix_net_params.pkl\", map_location='cpu'))\n self.eval_rnn.load_state_dict(torch.load(\n f\"{self.model_dir}/{step}_rnn_net_params.pkl\", map_location='cpu'))\n\n def init_hidden(self, episode_num):\n self.eval_hidden = torch.zeros(\n episode_num, self.n_agents, self.args.rnn_hidden_dim)\n self.target_hidden = torch.zeros(\n episode_num, self.n_agents, self.args.rnn_hidden_dim)\n if self.args.cuda:\n self.eval_hidden = self.eval_hidden.to(torch.device(self.args.GPU))\n self.target_hidden = self.target_hidden.to(\n torch.device(self.args.GPU))\n\n for i in range(episode_num):\n for j in range(self.n_agents):\n self.eval_hidden[i, j] = self.eval_rnn.init_hidden()\n self.target_hidden[i, j] = self.target_rnn.init_hidden()\n","sub_path":"CDS_GRF/policy/qplex_sdq_intrinsic.py","file_name":"qplex_sdq_intrinsic.py","file_ext":"py","file_size_in_byte":19687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400127948","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/hugo/developpement/python/libperso/beampy/themes/HipsterChic_theme.py\n# Compiled at: 2018-05-14 16:18:14\nTHEME = {}\nTHEME['document'] = {'format': 'html5', \n 'width': 800, \n 'height': 600}\nTHEME['text'] = {'size': 20, \n 'font': 'CMR', \n 'color': '#000000', \n 'align': '', \n 'x': 'center', \n 'y': 'auto', \n 'width': None, \n 'usetex': True, \n 'va': ''}\nTHEME['title'] = {'size': 28, \n 'font': 'CMR', \n 'color': 'DarkOrange', \n 'x': 'center', \n 'y': {'shift': 1.2, 'unit': 'cm'}, 'reserved_y': '1.5cm', \n 'align': '', \n 'va': 'baseline'}\nTHEME['maketitle'] = {'title_color': THEME['title']['color'], \n 'author_size': THEME['text']['size'], \n 'background-color': 'WhiteSmoke', \n 'date_color': '#888888', \n 'subtitle_color': '#888888'}\nTHEME['link'] = {'fill': THEME['title']['color']}\nTHEME['itemize'] = {'x': 'center', \n 'y': 'auto', \n 'item_style': 'bullet', \n 'item_spacing': '+1cm', \n 'item_indent': '0cm', \n 'item_color': THEME['title']['color'], \n 'text_color': THEME['text']['color'], \n 'width': None}\nfrom beampy.modules.core import group\nfrom beampy.modules.text import text\nfrom beampy.document import document\nfrom beampy.modules.svg import hline, rectangle\n\ndef hipstertitle(titlein, author=None, subtitle=None, date=None):\n args = THEME['maketitle']\n rectangle(x=0, y=0, width=document._width, height=document._height, color=args['background-color'], edgecolor=None)\n with group(x=0.015, y='center', width=document._width - document._width * 0.015):\n t = text('{\\\\scshape %s}' % titlein, x=0, width=document._width * 0.9, y=0, color=args['title_color'], size=args['title_size'], align='left')\n hl = hline(y=t.bottom + '0.2cm', color=args['title_color'], linewidth='1.5pt')\n if author is not None:\n a = text(author, x=0, y=hl.bottom + 20, color=args['author_color'], size=args['author_size'], align='left', width=document._width * 0.45)\n if subtitle is not None:\n st = text('\\\\textit{%s}' % subtitle, x={'align': 'right', 'shift': 0.02, 'anchor': 'right'}, y=hl.bottom + {'anchor': 'top', 'shift': 20}, color=args['subtitle_color'], size=args['subtitle_size'], width=document._width * 0.45, align='left')\n if date is not None:\n text(date, x='center', y={'align': 'bottom', 'shift': 0.05}, color=args['date_color'], size=args['date_size'])\n return\n\n\nTHEME['maketitle']['template'] = hipstertitle","sub_path":"pycfiles/beampy_slideshow-0.5.5.post1-py2-none-any/HipsterChic_theme.py","file_name":"HipsterChic_theme.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"266046960","text":"from enum import Enum, auto\nimport logging\nimport asyncio\nimport struct\nimport msgpack\n\nlogger = logging.getLogger(__name__)\n\n\nclass MessageType(Enum):\n REQUEST_REGISTER = auto()\n REQUEST_PUBLISH = auto()\n REQUEST_FILE_LIST = auto()\n REQUEST_FILE_LOCATION = auto()\n REQUEST_CHUNK_REGISTER = auto()\n REPLY_REGISTER = auto()\n REPLY_FILE_LIST = auto()\n REPLY_PUBLISH = auto()\n REPLY_FILE_LOCATION = auto()\n PEER_REQUEST_CHUNK = auto()\n PEER_REPLY_CHUNK = auto()\n PEER_PING_PONG = auto()\n\n\ndef _message_log(message):\n log_message = {key: message[key] for key in message if key != 'data'}\n log_message['type'] = MessageType(message['type']).name\n return log_message\n\n\nasync def read_message(reader):\n assert isinstance(reader, asyncio.StreamReader)\n # receive length header -> msgpack load (dict)\n raw_msg_len = await reader.readexactly(4)\n msglen = struct.unpack('>I', raw_msg_len)[0]\n raw_msg = await reader.readexactly(msglen)\n\n msg = msgpack.loads(raw_msg)\n logger.debug('Message received {}'.format(_message_log(msg)))\n return msg\n\n\nasync def write_message(writer, message):\n assert isinstance(writer, asyncio.StreamWriter)\n logger.debug('Writing {}'.format(_message_log(message)))\n # use value of enum since Enum is not JSON serializable\n if isinstance(message['type'], MessageType):\n message['type'] = message['type'].value\n # msgpack (bytes) -> add length header (bytes)\n raw_msg = msgpack.dumps(message)\n raw_msg = struct.pack('>I', len(raw_msg)) + raw_msg\n writer.write(raw_msg)\n await writer.drain()\n","sub_path":"p2pfs/core/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499096445","text":"import requests\nclass news:\n def __init__(self):\n print(\"-\")\n def get_news():\n \"\"\"获取金山词霸每日一句,英文和翻译\"\"\"\n url = \"http://open.iciba.com/dsapi/\"\n r = requests.get(url)\n content = r.json()['content']\n note = r.json()['note']\n return content, note\n\n if __name__ == \"__main__\":\n print(get_news())","sub_path":"wxpy/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307707909","text":"import sys\nimport os.path\nimport math\n\nif len(sys.argv) < 2:\n print('Error a .pssm input file is required')\n quit()\nif not sys.argv[1].endswith('.pssm'):\n print('Error incorrect input')\n quit()\nif not os.path.isfile('training.txt'):\n print(\"Error please run Project_3.py before running this file\")\n quit()\n\n\n# Gets the attributes for the for the given protein\n# Returns the list of attributes\n# index - location of the protein in the sequence\n# length - the numbers of proteins in the sequence\n# pssm - the 2D list for the pssm\ndef getAttributes(index, length, pssm):\n nullSet = [-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]\n temp = []\n for j in range(index - 2, index + 3):\n if j < 0 or j >= length:\n temp.extend(nullSet)\n else:\n temp.extend(pssm[j])\n return temp\n\n\n# Calculates the P(x|Y) for each x and the find the product\n# aList - list that holds the attributes of a protein\n# mu - list of mu for the given Y where each element represents the 100 attributes (x)\n# var - list of variance for the given Y where each element represents the 100 attributes (x)\ndef calcP(aList, mu, var):\n temp = 1\n su = 0\n for i in range(len(aList)):\n temp *= (math.exp(-((aList[i] - mu[i]) ** 2 ) / (2 * var[i])) / math.sqrt(2 * math.pi * var[i])) \n return temp\n\noutput = \"\"\nwith open(\"training.txt\") as data, open(sys.argv[1]) as pssm:\n next(data)\n next(pssm)\n next(pssm)\n next(pssm)\n \n\n\n muH = list(map(float, next(data).split()))\n varH = list(map(float, next(data).split()))\n pH = float(next(data).rstrip('\\n'))\n \n next(data)\n muE = list(map(float, next(data).split()))\n varE = list(map(float, next(data).split()))\n pE = float(next(data).rstrip('\\n'))\n \n next(data)\n muC = list(map(float, next(data).split()))\n varC = list(map(float, next(data).split()))\n pC = float(next(data).rstrip('\\n'))\n line = []\n \n pLineSize = 0\n pLine = next(pssm).split()\n while len(pLine) > 0:\n line.append(list(map(int, pLine[2:22])))\n pLine = next(pssm).split()\n pLineSize += 1\n\n for i in range(pLineSize):\n testing = getAttributes(i, pLineSize, line)\n perH = pH * calcP(testing, muH, varH)\n perE = pE * calcP(testing, muE, varE)\n perC = pC * calcP(testing, muC, varC)\n\n maxPer = max(perH,perE,perC) \n if perH == maxPer:\n output += 'H'\n elif perE == maxPer:\n output += 'E'\n else:\n output += 'C' \n\n print(\"Predicted Output: \" + output)\n if(len(sys.argv) >= 3):\n with open(sys.argv[2]) as ss:\n next(ss)\n sLine = next(ss).rstrip('\\n')\n print(\"Actual Sequence: \" + sLine)\n if len(sLine) != len(output):\n print(\"Error\")\n quit()\n correct = 0\n for i in range(len(sLine)):\n if sLine[i] == output[i]:\n correct += 1\n print(correct / len(sLine))","sub_path":"Bio/Projects/5970_6970_SP_19_PROJECT_3/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398027554","text":"import pandas as pd\n\n\ndef load_raw_data(site_data_path):\n if not site_data_path.exists():\n raise ValueError(f\"Expected data at {site_data_path} but \" +\n \"the data was not found\")\n site_data = pd.read_csv(site_data_path)\n \n expected_columns = [\n \"TIMESTAMP_END\",\n \"FCH4\"\n ]\n for expected_column in expected_columns:\n if expected_column not in site_data.columns:\n raise ValueError(f\"CSV is missing column {expected_column}.\")\n\n try:\n pd.to_datetime(site_data[\"TIMESTAMP_END\"], format='%Y%m%d%H%M')\n except:\n raise ValueError(\"TIMESTAMP_END needs to be formatted as \" +\n \"`YYYYMMDDHHmm`\")\n\n return site_data\n","sub_path":"preprocess/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"304864406","text":"class Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n # ans = [0, 0]\n # for i in range(len(s)):\n # j = 0\n # while i - j - 1 >= 0 and i + j < len(s) and s[i-j-1] == s[i+j]:\n # j+=1\n # if 2*j >= ans[1] - ans[0]: ans = [i-j, i+j]\n # j = 1\n # while i - j >= 0 and i + j < len(s) and s[i-j] == s[i+j]:\n # j+=1\n # if 2*j-1 > ans[1] - ans[0]: ans = [i-j+1, i+j]\n # return s[ans[0]:ans[1]]\n\n # fix\n # start, maxl = 0, 1\n # i, l = 0, len(s)\n # while i < l:\n # if l-i <= maxl//2: break\n # j = k = i\n # while j < l-1 and s[j] == s[j+1]: j+=1\n # i = j + 1\n # while k >= 0 and j < l and s[j] == s[k]:\n # j+=1\n # k-=1\n # if j-k-1 > maxl: start, maxl = k+1, j-k-1\n # return s[start:start+maxl]\n\n # manacher\n\nif __name__ == '__main__':\n sol = Solution()\n a = \"abbacdeedc\"\n print(sol.longestPalindrome(a))\n ","sub_path":"Algorithms/5 Longest Palindromic Substring/Longest Palindromic Substring.py","file_name":"Longest Palindromic Substring.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261085078","text":"import numpy as np \nimport os\nimport sys\nroot_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(root_path)\ndata_path = os.path.join(root_path, 'data')\n\n__all__ = ['SimpleRegData']\n\nclass SimpleRegData:\n def __init__(self, dataset='sunspots', train_test_split=0.5):\n np.random.seed(42)\n data = np.genfromtxt(os.path.join(data_path, dataset+'.csv'), delimiter=',', dtype=np.float64) \n self.X = data[:, 0][:, np.newaxis]\n self.y = data[:, 1].squeeze()\n self.N = self.X.shape[0]\n self.tr_idx = np.sort(np.random.choice(self.N, int(self.N*train_test_split), replace=False))\n\n def _samples(self, idx='train'):\n mask = np.zeros(self.X.size, dtype=bool)\n mask[self.tr_idx] = True\n if idx == 'test':\n mask = ~mask\n X = self.X[mask, :]\n y = self.y[mask]\n return X, y\n\n def train_samples(self):\n return self._samples('train')\n \n def test_samples(self):\n return self._samples('test')\n","sub_path":"data/simplereg.py","file_name":"simplereg.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"90981429","text":"\"\"\"\nmap(), filter(), lambda, and list comprehensions provide compact, elegant,\nand efficient ways to encode a few common idioms in programming. \nWe often encounter the following scanarios involving for-loops\n\"\"\"\n\n\"\"\"E.g 1 & 2 looping over a sequence and performing some calculation\"\"\"\n#E.g 1 suppose we want to build a list of the squares of the integers from 0 to 9\n#For loop way\nsquares = []\nfor x in range(10):\n\tsquares.append(x**2)\nprint (\"E.g 1:\", squares)\n\n#E.g 2 Suppose we want to build a list of the lengths of the names in a list:\nnames = ['Anne', 'Amy', 'Bob', 'David', 'Carrie', 'Barbara', 'Zach']\nlengths = []\nfor name in names:\n\tlengths.append(len(name))\nprint (\"E.g 2:\", lengths)\n\n\"\"\"E.g 3&4 Looping over nested sequences\"\"\"\n#E.g 3 For example, suppose we want a list of all possible pairs of drink and food from the lists ['water', 'tea', 'juice'] and ['ham', 'eggs', 'spam'], respectively:\npossible_choices = []\nfor drink in ['water', 'tea', 'juice']:\n\tfor food in ['ham', 'eggs', 'spam']:\n\t\tpossible_choices.append([drink,food])\nprint (\"E.g 3:\", possible_choices)\n\n#E.g 4 Suppose we want a list of coordinates on a rectangular grid:\ncoords = []\nfor x in range(5):\n\tfor y in range(3):\n\t\tcoordinate = (x,y)\n\t\tcoords.append(coordinate)\nprint (\"E.g 4:\", coords)\n\n\"\"\"E.g 5 & 6 filtering a sequence according to some criteria\"\"\"\n#E.g 5 suppose we want a list of the squares of the integers from 0 to 9 where the square is greater than 5 and less than 50:\nspecial_squares = []\nfor x in range(10):\n\tsquare = x**2\n\tif square > 5 and square < 50:\n\t\tspecial_squares.append(square)\nprint (\"E.g 5:\", special_squares) \n\n#E.g 6 Suppose we want to take a list of names and find only those starting with the letter B:\nnames = ['Anne', 'Amy', 'Bob', 'David', 'Carrie', 'Barbara', 'Zach']\nb_names = []\nfor name in names:\n\tif name.startswith('B'):\n\t\tb_names.append(name)\nprint(\"E.g 6:\", b_names)","sub_path":"Maps/ForLoopEg.py","file_name":"ForLoopEg.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"613643354","text":"import json\n\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom shop.models import Shop\n\nfrom ..factories import UserFactory\nfrom ..utils import get_token\n\n\nclass WhenUserCreateShop(TestCase):\n \"\"\"[Tests when the user tries to create a user]\n \n Arguments:\n TestCase {[type]} -- [Testcase param]\n \"\"\"\n\n expected_status_code = 200\n name = \"Dangote\"\n category = \"Automobile\"\n phone_number = \"08037452103\"\n\n def setUp(self):\n \"\"\"\n [Set up the data neeeded]\n \"\"\"\n self.user = UserFactory()\n token = get_token(user=self.user)\n self.auth = \"Token {}\".format(token)\n payload = {\n \"shopName\": self.name,\n \"shopCategory\": self.category,\n \"phoneNumber\": self.phone_number,\n }\n self.response = self.client.post(\n reverse(\"api:create_shop\"),\n data=json.dumps(payload),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=self.auth,\n )\n\n def test_status_code(self):\n \"\"\"\n [Test if the succes code match the one returned]\n \"\"\"\n assert self.response.status_code == self.expected_status_code\n\n def test_is_shop_saved(self):\n \"\"\"\n [Test if the shop has been created]\n \"\"\"\n if self.expected_status_code == 201:\n assert Shop.objects.filter(title=self.name).exists()\n","sub_path":"tests/commerce/test_user_can_create_shop.py","file_name":"test_user_can_create_shop.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552302605","text":"'''\nCreated on 2011/7/15\n\n@author: user\n'''\nimport wx\n\n\nID_STAT = 1\nID_TOOL = 2\n\nclass SimpleMenu(wx.Frame):\n def __init__(self, parent, id, title):\n wx.Frame.__init__(self, parent, id, title, size=(350, 250))\n # super(SimpleMenu, self).__init__(self, parent, id, title, size=(350, 250))\n self.count = 5\n \n menubar = wx.MenuBar(wx.MB_DOCKABLE)\n \n file = wx.Menu()\n quit = wx.MenuItem(file, 1, '&Quit\\tCtrl+Q')\n quit.SetBitmap(wx.Bitmap('../icons/exit.ico'))\n \n imp = wx.Menu()\n imp.Append(-1, 'Import newsfeed list...')\n imp.Append(-1, 'Import bookmarks...')\n imp.Append(-1, 'Import mail...')\n \n file.Append(-1, '&New')\n file.Append(-1, '&Open')\n file.Append(-1, '&Save')\n #file.Append(-1, 'Quit', 'Quit application')\n file.AppendSeparator()\n file.AppendMenu(-1, 'I&mport', imp)\n file.AppendItem(quit)\n \n view = wx.Menu()\n self.shst = view.Append(ID_STAT, 'Show statubar', 'Show Statusbar', kind=wx.ITEM_CHECK)\n self.shtl = view.Append(ID_TOOL, 'Show toolbar', 'Show Toolbar', kind=wx.ITEM_CHECK)\n view.Check(ID_STAT, True)\n view.Check(ID_TOOL, True)\n \n self.Bind(wx.EVT_MENU, self.ToggleStatusBar, id=ID_STAT)\n self.Bind(wx.EVT_MENU, self.ToggleToolBar, id=ID_TOOL)\n self.Bind(wx.EVT_MENU, self.OnQuit, id=1)\n self.Bind(wx.EVT_TOOL, self.OnUndo, id=wx.ID_UNDO)\n self.Bind(wx.EVT_TOOL, self.OnRedo, id=wx.ID_REDO)\n \n edit = wx.Menu()\n insr = wx.Menu()\n form = wx.Menu()\n tool = wx.Menu()\n help = wx.Menu()\n\n menubar.Append(file, '&File')\n menubar.Append(view, '&View')\n menubar.Append(edit, '&Edit')\n menubar.Append(insr, '&Insert')\n menubar.Append(form, '&Format')\n menubar.Append(tool, '&Tools')\n menubar.Append(help, '&Help')\n \n self.toolbar = self.CreateToolBar()\n self.toolbar.AddLabelTool(wx.ID_ANY, '', wx.Bitmap('../icons/delete.png'))\n self.toolbar.AddLabelTool(wx.ID_UNDO, '', wx.Bitmap('../icons/undo.png'))\n self.toolbar.AddLabelTool(wx.ID_REDO, '', wx.Bitmap('../icons/redo.png'))\n self.toolbar.EnableTool(wx.ID_REDO, False)\n self.toolbar.AddSeparator()\n self.toolbar.AddLabelTool(wx.ID_EXIT, '', wx.Bitmap('../icons/exit.ico'))\n self.toolbar.Realize()\n \n self.statusbar = self.CreateStatusBar()\n \n \n self.SetMenuBar(menubar)\n self.Centre()\n self.Show(True)\n \n def ToggleStatusBar(self, event):\n if self.shst.IsChecked():\n self.statusbar.Show()\n else:\n self.statusbar.Hide()\n \n def ToggleToolBar(self, event):\n if self.shtl.IsChecked():\n self.toolbar.Show()\n else:\n self.toolbar.Hide() \n \n def OnUndo(self, event):\n if self.count > 1 and self.count <= 5:\n self.count = self.count - 1\n if self.count == 1:\n self.toolbar.EnableTool(wx.ID_UNDO, False)\n if self.count == 4:\n self.toolbar.EnableTool(wx.ID_REDO, True)\n\n def OnRedo(self, event):\n if self.count < 5 and self.count >= 1:\n self.count = self.count + 1\n if self.count == 5:\n self.toolbar.EnableTool(wx.ID_REDO, False)\n if self.count == 2:\n self.toolbar.EnableTool(wx.ID_UNDO, True)\n \n def OnQuit(self, event):\n self.Close()\n\n def InitUI(self):\n panel = wx.Panel(self)\n panel.SetBackgroundColour('#4f5049')\n\n midPan = wx.Panel(panel)\n midPan.SetBackgroundColour('#ededed')\n\n vbox = wx.BoxSizer(wx.VERTICAL)\n #vbox.Add(midPan, 1, wx.EXPAND | wx.ALL, 20)\n vbox.Add(midPan, 1, wx.BOTTOM | wx.TOP | wx.EXPAND, border=20)\n panel.SetSizer(vbox)\n \n def InitUIBoxSizer(self):\n panel = wx.Panel(self)\n\n font = wx.SystemSettings_GetFont(wx.SYS_SYSTEM_FONT)\n font.SetPointSize(9)\n\n vbox = wx.BoxSizer(wx.VERTICAL)\n\n hbox1 = wx.BoxSizer(wx.HORIZONTAL)\n st1 = wx.StaticText(panel, label='Class Name')\n st1.SetFont(font)\n hbox1.Add(st1, flag=wx.RIGHT, border=8)\n tc = wx.TextCtrl(panel)\n hbox1.Add(tc, proportion=1)\n vbox.Add(hbox1, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)\n\n vbox.Add((-1, 10))\n\n hbox2 = wx.BoxSizer(wx.HORIZONTAL)\n st2 = wx.StaticText(panel, label='Matching Classes')\n st2.SetFont(font)\n hbox2.Add(st2)\n vbox.Add(hbox2, flag=wx.LEFT | wx.TOP, border=10)\n\n vbox.Add((-1, 10))\n\n hbox3 = wx.BoxSizer(wx.HORIZONTAL)\n tc2 = wx.TextCtrl(panel, style=wx.TE_MULTILINE)\n hbox3.Add(tc2, proportion=1, flag=wx.EXPAND)\n vbox.Add(hbox3, proportion=1, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, \n border=10)\n\n vbox.Add((-1, 25))\n\n hbox4 = wx.BoxSizer(wx.HORIZONTAL)\n cb1 = wx.CheckBox(panel, label='Case Sensitive')\n cb1.SetFont(font)\n hbox4.Add(cb1)\n cb2 = wx.CheckBox(panel, label='Nested Classes')\n cb2.SetFont(font)\n hbox4.Add(cb2, flag=wx.LEFT, border=10)\n cb3 = wx.CheckBox(panel, label='Non-Project classes')\n cb3.SetFont(font)\n hbox4.Add(cb3, flag=wx.LEFT, border=10)\n vbox.Add(hbox4, flag=wx.LEFT, border=10)\n\n vbox.Add((-1, 25))\n\n hbox5 = wx.BoxSizer(wx.HORIZONTAL)\n btn1 = wx.Button(panel, label='Ok', size=(70, 30))\n hbox5.Add(btn1)\n btn2 = wx.Button(panel, label='Close', size=(70, 30))\n hbox5.Add(btn2, flag=wx.LEFT|wx.BOTTOM, border=5)\n vbox.Add(hbox5, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)\n\n panel.SetSizer(vbox)\n \napp = wx.App()\nframe = SimpleMenu(None, -1, 'simple menu example')\nframe.InitUI()\n\napp.MainLoop()\n\n","sub_path":"python_study/python_study/src/modules/wx/example/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496932161","text":"'''\n References: \n https://blog.csdn.net/JineD/article/details/106622398,\n https://blog.csdn.net/mouday/article/details/81512870\n'''\n\nfrom PIL import Image, ImageDraw, ImageFont\nimport sys\n# sys.path[0] = \".\"\nprint(sys.path)\nfrom font_set import WESTERN_CHAR, CHINESE_CHAR\nimport os\n\nDIR = './data/TTFfont/'\nOUT_DIR = './data/PNGfont/'\nOUT_CHINESE_DIR = './data/PNGfont_cn/'\nPNG_SIZE = 256\n\ndef is_contain_chinese(check_str):\n \"\"\"\n 判断字符串中是否包含中文\n :param check_str: {str} 需要检测的字符串\n :return: {bool} 包含返回True, 不包含返回False\n \"\"\"\n for ch in check_str:\n if u'\\u4e00' <= ch <= u'\\u9fff':\n return True\n return False\n\ndef draw_png(name, font_size = 128, extension='.ttf'):\n font = ImageFont.truetype(DIR + name + extension, font_size)\n max_width, max_height = (PNG_SIZE, PNG_SIZE)\n print('Start processing font {} to PNG file.'.format(name))\n os.makedirs(os.path.join(OUT_DIR, name), exist_ok=True)\n\n\n for char in WESTERN_CHAR:\n text_width, text_height = font.getsize(char)\n image = Image.new(mode='RGBA', size=(max_width, max_height))\n padding = ((max_width-text_width)//2, (max_height-text_height)//2)\n draw_table = ImageDraw.Draw(im=image)\n draw_table.text(xy=padding, text=char, fill='#000000', font=font)\n image.save(os.path.join(OUT_DIR, name) + '/' + str(ord(char)) + '.png', 'PNG') \n image.close()\n print(is_contain_chinese(name), name)\n if is_contain_chinese(name):\n os.makedirs(os.path.join(OUT_CHINESE_DIR, name), exist_ok=True)\n for char in CHINESE_CHAR:\n text_width, text_height = font.getsize(char)\n image = Image.new(mode='RGBA', size=(max_width, max_height))\n padding = ((max_width-text_width)//2, (max_height-text_height)//2)\n draw_table = ImageDraw.Draw(im=image)\n draw_table.text(xy=padding, text=char, fill='#000000', font=font)\n image.save(os.path.join(OUT_CHINESE_DIR, name) + '/' + str(ord(char)) + '.png', 'PNG') \n image.close()\n\n \nif __name__ == \"__main__\":\n for name in os.listdir(DIR):\n try:\n name = name.split('.')[0]\n draw_png(name, extension='.ttf')\n except Exception as e:\n print(name, ' ERR: ', e)\n continue","sub_path":"sourcecode/utils/ttfont_convert.py","file_name":"ttfont_convert.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"116244125","text":"import json\r\n\r\ncompanies_data = []\r\n\r\nwith open('task07.txt') as input_data:\r\n companies_dict = {}\r\n profit_list = []\r\n\r\n for company_row in input_data:\r\n name, form, revenue, costs = company_row.split()\r\n\r\n profit = float(revenue) - float(costs)\r\n companies_dict[name] = profit\r\n\r\n if profit:\r\n profit_list.append(profit)\r\n\r\n companies_data.append(companies_dict)\r\n companies_data.append({\r\n \"average_profit\": round(sum(profit_list) / len(profit_list), 2)\r\n })\r\n\r\nwith open('task07.json', 'w') as output_data:\r\n json.dump(companies_data, output_data)","sub_path":"example07.py","file_name":"example07.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"293593824","text":"from HelperFunctions import readInputFile\nfrom HelperFunctions import readExampleInput\nfrom HelperFunctions import convertToInt\nfrom collections import defaultdict\n\nnumberMapping = {1: ['c', 'f'], 2: ['a', 'c', 'd', 'e', 'g'], 3: ['a', 'c', 'd', 'f', 'g'], 4: ['b', 'c', 'd', 'f'], 5: ['a', 'b', 'd', 'f', 'g'], 6: ['a', 'b', 'd', 'e', 'f', 'g'], 7: ['a', 'c', 'f'], 8: ['a', 'b', 'c', 'd', 'e', 'f', 'g'], 9: ['a', 'b', 'c', 'd', 'f', 'g'], 0: ['a', 'b', 'c', 'e', 'f', 'g']}\nallPossibleDigitsPerNumber = {2: ['c', 'f'], 3: ['a', 'c', 'f'], 4: ['b', 'c', 'd', 'f'], 7: ['a', 'b', 'c', 'd', 'e', 'f', 'g'], 5: ['a', 'b', 'c', 'd', 'e', 'f', 'g'], 6: ['a', 'b', 'c', 'd', 'e', 'f', 'g']}\n\ndef do1(splitInput):\n\tuniqueDigits = 0\n\n\tfor line in splitInput:\n\t\tinput,output = line.split('|')\n\n\t\toutputDigits = output.split(' ')\n\n\t\tfor digit in outputDigits:\n\t\t\tif len(digit) in [2, 3, 4, 7]:\n\t\t\t\tuniqueDigits += 1\n\t\n\treturn uniqueDigits\n\ndef do2(splitInput):\n\tsumOutputValues = 0\n\n\tfor line in splitInput:\n\t\tinput,output = line.split('|')\n\n\t\tinputValues = input.split(' ')\n\t\toutputValues = output.split(' ')\n\n\t\tmapping = getMapping(inputValues)\n\t\toutputValue = calculateOutputValues(outputValues, mapping)\n\n\t\tsumOutputValues += outputValue\n\t\n\treturn sumOutputValues\n\ndef getMapping(inputValues):\n\tmapping = {'a': set(('a', 'b', 'c', 'd', 'e', 'f', 'g')), 'b': set(('a', 'b', 'c', 'd', 'e', 'f', 'g')), 'c': set(('a', 'b', 'c', 'd', 'e', 'f', 'g')), 'd': set(('a', 'b', 'c', 'd', 'e', 'f', 'g')), 'e': set(('a', 'b', 'c', 'd', 'e', 'f', 'g')), 'f': set(('a', 'b', 'c', 'd', 'e', 'f', 'g')), 'g': set(('a', 'b', 'c', 'd', 'e', 'f', 'g'))}\n\n\tinputPerLength = defaultdict(lambda: [])\n\n\tfor inputValue in inputValues[0:-1]:\n\t\tlength = len(inputValue)\n\t\tinputPerLength[length].append([x for x in inputValue])\n\t\tpossibleValues = set(allPossibleDigitsPerNumber[length])\n\t\tfor digit in inputValue:\n\t\t\tmapping[digit].intersection_update(possibleValues)\n\n\t# c & f (1)\n\tone1,one2 = [x for x in mapping.keys() if mapping[x] == {'c', 'f'}]\n\tsumOfOne = countOccurence(one1, inputValues)\t\n\t\n\tif sumOfOne == 8:\n\t\tmapping[one1] = {'c'}\n\t\tremoveFromMapping(one1, 'c', mapping)\n\t\tremoveFromMapping(one2, 'f', mapping)\n\telse:\n\t\tmapping[one1] = {'f'}\n\t\tremoveFromMapping(one1, 'f', mapping)\n\t\tremoveFromMapping(one2, 'c', mapping)\n\n\t# a (7)\n\tseven = [x for x in mapping.keys() if mapping[x] == {'a'}][0]\n\tremoveFromMapping(seven, 'a', mapping)\n\n\t# b & d (4)\n\tfour1,four2 = [x for x in mapping.keys() if mapping[x] == {'b', 'd'}]\n\tsumOfFour = countOccurence(four1, inputValues)\n\tif sumOfFour == 6:\n\t\tmapping[four1] = {'b'}\n\t\tremoveFromMapping(four1, 'b', mapping)\n\t\tremoveFromMapping(four2, 'd', mapping)\n\telse:\n\t\tmapping[four1] = {'d'}\n\t\tremoveFromMapping(four1, 'd', mapping)\n\t\tremoveFromMapping(four2, 'b', mapping)\n\n\t# e & g\n\tother1,other2 = [x for x in mapping.keys() if mapping[x] == {'e', 'g'}]\n\tsumOfOthers = countOccurence(other1, inputValues)\n\tif sumOfOthers == 4:\n\t\tmapping[other1] = {'e'}\n\t\tremoveFromMapping(other1, 'e', mapping)\n\telse:\n\t\tmapping[other1] = {'g'}\n\t\tremoveFromMapping(other1, 'g', mapping)\n\t\n\treturn mapping\n\ndef countOccurence(digit, inputValues):\n\tsumOfDigit = 0\n\tfor input in inputValues:\n\t\tif digit in input:\n\t\t\tsumOfDigit += 1\n\treturn sumOfDigit\n\ndef removeFromMapping(rightKey, digit, mapping):\n\tfor key in mapping.keys():\n\t\t\tif key != rightKey:\n\t\t\t\tif digit in mapping[key]:\n\t\t\t\t\tmapping[key].remove(digit)\n\ndef calculateOutputValues(outputValues, mapping):\n\tresult = ''\n\tfor outputValue in outputValues:\n\t\tif outputValue == '':\n\t\t\tcontinue\n\t\t\n\t\tresultStr = []\n\t\tfor digit in outputValue:\n\t\t\tresultStr.append(list(mapping[digit])[0])\n\t\tsortedResult = sorted(resultStr)\n\n\t\tresultDigit = str([x for x in numberMapping.keys() if sorted(numberMapping[x]) == sortedResult][0])\n\t\tresult += resultDigit\t\n\t\n\treturn int(result)\n\ndef do():\n\tstrInput = readInputFile(8)\n\tsplitInput = strInput.split('\\n')\n\n\tprint(do1(splitInput))\n\tprint(do2(splitInput))\n\n\tprint('done')\n\n\ndo()","sub_path":"2021/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"185537212","text":"from objects.objects import Player\nimport logging\n\nfrom interface.gui import GameScreen\n\nfrom objects.map import *\nfrom objects.choicetext import ChoiceText\nfrom objects.menu import *\n\nfrom states.states import *\n\n\nclass Game():\n def __init__(self, is_debug, debug_room, farm):\n self.game_screen = GameScreen(40, 50, 40)\n self.debug = is_debug\n self.debug_room = debug_room\n self.farm = farm\n self.logger = Game.get_logger(self.debug)\n\n self.menu_text = ChoiceText(\"So ur with ur honey and yur making out wen the phone rigns. U anser it n the vioce is \\\"wut r u doing wit my daughter?\\\" U tell ur girl n she say \\\"my dad is ded\\\". THEN WHO WAS PHONE?\", [\"Me\", \"Ur dad\", \"Linus Torvald, PHD\"])\n self.menu = Menu(self.menu_text, 30, 40, 40)\n\n self.game_states_map = {\n \"Active\": (ActiveState(), self.game_screen.game_console_wrapper),\n \"Pause\": (PauseState(), self.game_screen.game_console_wrapper),\n \"Game_Over\": (GameOverState(), self.game_screen.game_console_wrapper),\n \"Menu\": (PopUpState(), self.menu.console_wrapper),\n }\n\n self.change_game_state(\"Active\")\n self.game_screen.current_console = self.game_screen.game_console_wrapper\n\n self.game_screen_width = self.game_screen.game_console_wrapper.dimensions.get_width()\n self.game_screen_height = self.game_screen.game_console_wrapper.dimensions.get_height()\n self.initialize_game_area(is_debug, debug_room, farm)\n\n def initialize_game_area(self, is_debug, debug_room, farm):\n if debug_room:\n self.current_map = MapBuilder(0, self.game_screen.current_console.console).make_map_debug(self.game_screen_width, self.game_screen_height)\n starting_position = (int(self.game_screen_width / 4) + 2, int(self.game_screen_height / 4) + 2)\n elif farm:\n self.current_map = MapBuilder(1, self.game_screen.current_console.console).make_map_farm(self.game_screen_width, self.game_screen_height)\n starting_position = (int(self.game_screen_width / 2) - 2, int(self.game_screen_height / 2))\n else:\n self.current_map = MapBuilder(1, self.game_screen.current_console.console).make_map(self.game_screen_width, self.game_screen_height)\n starting_position = self.current_map.get_free_space()\n\n self.player = Player('@', starting_position[0], starting_position[1], self.game_screen.game_console_wrapper.console)\n self.current_map.entity_list.insert(0, self.player)\n if is_debug:\n self.currentDrawMap = DebugDrawableMap(self.current_map, self.player, self.game_screen.game_console_wrapper.console)\n else:\n self.currentDrawMap = DrawableMap(self.current_map, self.player, self.game_screen.game_console_wrapper.console)\n\n def change_game_state(self, new_state):\n self.game_state, self.game_screen.current_console = self.game_states_map[new_state]\n\n @staticmethod\n def get_logger(debug):\n loggerElem = logging.getLogger('game.py')\n if debug:\n loggerElem.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n else:\n loggerElem.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n loggerElem.addHandler(ch)\n logging.basicConfig(filename='jurassic-mendel.log',level=logging.DEBUG)\n return loggerElem\n\n def run_game(self):\n while not libtcod.console_is_window_closed():\n self.game_screen.render_all(self)\n\n world_handler = self.game_state.handle_world(self)\n for event in world_handler:\n event.handle(event, self)\n\n def reset(self):\n self.initialize_game_area(self.debug, self.debug_room, self.farm)\n","sub_path":"states/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"648553091","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport subprocess\nimport sys\nimport i3ipc\n\n\ndef send_notify(title, msg, msg_type):\n subprocess.run([\"notify-send\", \"-u\", msg_type, title, msg])\n\n\nif __name__ == \"__main__\":\n i3 = i3ipc.Connection()\n\n focused = i3.get_tree().find_focused()\n if len(focused.workspace().descendents()) == 0:\n # no children to move\n sys.exit(0)\n\n spaces = i3.get_workspaces()\n new_num = 1\n space_nums = []\n for space in spaces:\n space_nums.append(space.num)\n wrapped = False\n\n while new_num in space_nums:\n new_num += 1\n if new_num > 10:\n if wrapped:\n send_notify(\"Workspace move failed\", \"No free workspace left\", \"normal\")\n sys.exit(0)\n new_num = 1\n wrapped = True\n\n i3.command('move window to workspace {}'.format(new_num))\n send_notify(\"New workspace {}\".format(new_num), focused.name, \"normal\")\n","sub_path":"etc/i3/userscripts/window_to_new_workspace.py","file_name":"window_to_new_workspace.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"529317621","text":"#!/usr/bin/env python3\nimport xml.etree.ElementTree as ET\nimport pystache as TPL\nfrom build_layout import *\nimport markdown\nimport yaml\nimport glob\nimport os\n\ndef load_prop(p):\n if \"inherit\" in p:\n path = \"manual/prop_%s.yaml\" % p.get(\"inherit\", \"\")\n if os.path.exists(path):\n p = {**yaml.load(open(path)), **p}\n\n if not \"desc_short\" in p:\n p[\"desc_short\"] = \"\"\n\n if not \"name\" in p:\n p[\"name\"] = p[\"prop\"]\n\n return p\n\ndef build_elem_page(elem_file):\n elem = yaml.load(open(elem_file))\n elem[\"properties\"] = list(map(lambda p: load_prop(p), elem[\"properties\"]))\n url = \"/documentation/elements/\" + elem[\"name\"]\n print(\"> Building page: %s\" % url)\n title = elem[\"name\"]\n tpl = Path(\"extra/web/reference_detail.tpl.html\").read_text()\n html = TPL.render(tpl, elem)\n write_page(url, html, title=title)\n\ndef main():\n elems = glob.glob(\"manual/elem_*.yaml\")\n\n for elem in elems:\n build_elem_page(elem)\n\nmain()\n","sub_path":"extra/web/build_api_reference.py","file_name":"build_api_reference.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"76253506","text":"import shlex\nimport timeit\nimport shellcommand\ntry:\n from shlex import quote # python 3.3 and above\nexcept:\n from pipes import quote # python 3.2 and earlier\n\n\ndef prepareRunSafely(context, commandline, outfile):\n config = context.config\n cmd = shellcommand.parse(commandline)\n\n runsafely = \"%s/RunSafely.sh\" % config.test_suite_root\n runsafely_prefix = [runsafely]\n if cmd.workdir is not None:\n runsafely_prefix += [\"-d\", cmd.workdir]\n timeit = \"%s/tools/timeit\" % config.test_source_root\n if config.remote_host:\n timeit = \"%s/tools/timeit-target\" % config.test_source_root\n runsafely_prefix += [\"-r\", config.remote_host]\n if config.remote_user:\n runsafely_prefix += [\"-l\", config.remote_user]\n if config.remote_client:\n runsafely_prefix += [\"-rc\", config.remote_client]\n if config.remote_port:\n runsafely_prefix += [\"-rp\", config.remote_port]\n if config.run_under:\n runsafely_prefix += [\"-u\", config.run_under]\n if not config.traditional_output:\n runsafely_prefix += [\"-n\"]\n if cmd.stdout is not None:\n runsafely_prefix += [\"-o\", cmd.stdout]\n if cmd.stderr is not None:\n runsafely_prefix += [\"-e\", cmd.stderr]\n else:\n if cmd.stdout is not None or cmd.stderr is not None:\n raise Exception(\"Separate stdout/stderr redirection not \" +\n \"possible with traditional output\")\n timeout = \"7200\"\n if cmd.stdin is not None:\n stdin = cmd.stdin\n else:\n stdin = \"/dev/null\"\n runsafely_prefix += [\"-t\", timeit, timeout, stdin, outfile]\n\n complete_command = runsafely_prefix + [cmd.executable] + cmd.arguments\n new_commandline = \" \".join(map(quote, complete_command))\n return new_commandline\n\n\ndef wrapScript(context, script, suffix):\n adjusted_script = []\n outfile = context.tmpBase + suffix\n # Set name of timefile so getTime() can use it\n context.timefiles = []\n i = 0\n for line in script:\n number = \"\"\n if len(script) > 1:\n number = \"-%s\" % (i,)\n i += 1\n outfile = context.tmpBase + number + suffix\n context.timefiles.append(outfile + \".time\")\n\n line = prepareRunSafely(context, line, outfile)\n adjusted_script.append(line)\n return adjusted_script\n\n\ndef getTime(context):\n time = 0.0\n for timefile in context.timefiles:\n time += timeit.getUserTime(timefile)\n return time\n","sub_path":"litsupport/runsafely.py","file_name":"runsafely.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"98791356","text":"# PhonopyImporter/CASTEP.py\n\n\n# ----------------\n# Module Docstring\n# ----------------\n\n\"\"\" Contains routines for working with the CASTEP code. \"\"\"\n\n\n# -------\n# Imports\n# -------\n\nimport numpy as np\n\n\n# ---------\n# Functions\n# ---------\n\ndef ReadPhonon(file_path):\n \"\"\"\n Parse the CASTEP .phonon file at file_path.\n\n Return value:\n A (params, structure_data, q_point_data) tuple containing the data read in from the .phonon file.\n * params: { num_atoms : int, num_bands : int, num_qpts : int, 'freq_units' : string, 'ir_units' : string, 'raman_units' : string}\n * structure_data: (v_latt, atom_pos, atom_types, atom_mass)\n * q_point_data: [(q_pt, q_wt, freqs, ir_ints, eigenvectors)]\n \"\"\"\n\n # Parameters to capture.\n\n params, structure_data, q_point_data = None, None, None\n\n # Read and parse input file.\n\n with open(file_path, 'r') as input_reader:\n # Read header.\n\n assert next(input_reader).strip() == \"BEGIN header\"\n\n # Read calculation parameters.\n\n params = { }\n\n capture = [\n (\"Number of ions\" , 'num_atoms' , int ),\n (\"Number of branches\" , 'num_bands' , int ),\n (\"Number of wavevectors\", 'num_qpts' , int ),\n (\"Frequencies in\" , 'freq_units' , None),\n (\"IR intensities in\" , 'ir_units' , None),\n (\"Raman activities in\" , 'raman_units', None)\n ]\n\n for starts_with, params_key, conv_func in capture:\n line = next(input_reader).strip()\n\n assert line.startswith(starts_with)\n\n param = line.replace(starts_with, '').strip()\n\n if conv_func is not None:\n param = conv_func(param)\n\n params[params_key] = param\n\n assert next(input_reader).strip() == \"Unit cell vectors (A)\"\n\n # Read lattice vectors.\n\n v_latt = [\n [float(item) for item in next(input_reader).strip().split()[:3]]\n for i in range(0, 3)\n ]\n\n assert next(input_reader).strip() == \"Fractional Co-ordinates\"\n\n # Read atom data.\n\n atom_data = []\n\n for i in range(0, params['num_atoms']):\n elements = next(input_reader).strip().split()\n\n assert len(elements) >= 5 and int(elements[0]) == i + 1\n\n atom_pos = [float(item) for item in elements[1:4]]\n atom_type = str(elements[4])\n atom_mass = float(elements[5])\n\n atom_data.append(\n (atom_pos, atom_type, atom_mass)\n )\n\n assert next(input_reader).strip() == \"END header\"\n\n # Read frequencies/eigenvectors for each calculated wavevector.\n\n q_point_data = []\n\n for i in range(0, params['num_qpts']):\n # Read wavevector coordinates and weight.\n\n elements = next(input_reader).strip().split()\n\n assert len(elements) >= 6 and elements[0] == \"q-pt=\" and int(elements[1]) == i + 1\n\n q = [float(item) for item in elements[2:5]]\n w = float(elements[5])\n\n # Read frequencies and spectroscopic activities.\n\n freqs, ir_ints = [], []\n\n for j in range(0, params['num_bands']):\n elements = next(input_reader).strip().split()\n\n assert len(elements) >= 3 and int(elements[0]) == j + 1\n\n freqs.append(\n float(elements[1])\n )\n\n ir_ints.append(\n float(elements[2])\n )\n\n # Read eigenvectors.\n\n assert next(input_reader).strip() == \"Phonon Eigenvectors\"\n\n headers = next(input_reader).strip().split()\n\n expected_headers = [\"Mode\", \"Ion\", \"X\", \"Y\", \"Z\"]\n\n for j, expected_header in enumerate(expected_headers):\n assert headers[j] == expected_header\n\n eigenvectors = []\n\n for j in range(0, params['num_bands']):\n eigenvector = []\n\n for k in range(0, params['num_atoms']):\n elements = next(input_reader).strip().split()\n\n assert len(elements) >= 8 and int(elements[0]) == j + 1 and int(elements[1]) == k + 1\n\n eigenvector.append(\n [float(elements[i]) + 1.0j * float(elements[i + 1]) for i in range(2, 8, 2)]\n )\n\n eigenvectors.append(eigenvector)\n\n q_point_data.append(\n (q, w, freqs, ir_ints, eigenvectors)\n )\n\n # Reformat data.\n\n v_latt = [np.array(v, dtype = np.float64) for v in v_latt]\n\n atom_pos = [\n np.array(pos, dtype = np.float64)\n for pos, _, _ in atom_data\n ]\n\n atom_types = [atom_type for _, atom_type, _ in atom_data]\n atom_mass = [atom_mass for _, _, atom_mass in atom_data]\n\n structure_data = (v_latt, atom_pos, atom_types, atom_mass)\n\n for i, (q, w, freqs, ir_ints, eigenvectors) in enumerate(q_point_data):\n q = np.array(q, dtype = np.float64)\n\n for j, eigenvector in enumerate(eigenvectors):\n eigenvectors[j] = np.array(eigenvector, dtype = np.complex128)\n\n q_point_data[i] = (q, w, freqs, ir_ints, eigenvectors)\n\n # Returm results\n\n return (params, structure_data, q_point_data)\n","sub_path":"Phonopy-Importer/PhonopyImporter/CASTEP.py","file_name":"CASTEP.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398482983","text":"'''\r\nCreated on Jul 31, 2018\r\n\r\n@author: Vladyslav_Breus\r\n'''\r\nimport collections\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport pickle\r\nfrom util import pat_config\r\nimport const\r\n\r\ndef read_data(db, col_name):\r\n records = db.get_entire_collection(col_name)\r\n texts = {}\r\n for value in records:\r\n texts[value[const.PATNO_KEY]] = tf.compat.as_str(pat_record_to_str(value))\r\n \r\n return texts\r\n\r\ndef pat_record_to_str(record):\r\n str_rec = ''\r\n for key, value in record.items():\r\n if isinstance(value, str) & (key is not const.PATNO_KEY): \r\n str_rec += value + ' '\r\n return str_rec\r\n\r\ndef normalize_text(texts, stops):\r\n # Lower case, remove digits and punctuation\r\n# texts = {k:v.lower().translate(str.maketrans('','',string.punctuation+'1234567890')) for k,v in texts.items()}\r\n\r\n # Remove stopwords\r\n\r\n texts = {k:' '.join([word for word in v.split() if word not in (stops)]) for k,v in texts.items()}\r\n # Trim extra whitespace\r\n# texts = {k:' '.join(v.split()) for k,v in texts.items()}\r\n return(texts)\r\n\r\ndef build_entire_dict(db, cols, vocabulary_size, logger, file_name, oft):\r\n cnter = collections.Counter()\r\n pat_dict = {}\r\n pat_id = 0\r\n \r\n for col in cols:\r\n records = db.get_entire_collection(col)\r\n if records is None:\r\n logger.debug('*** Collection '+str(col)+' is non exist')\r\n continue\r\n for rec in records:\r\n words = []\r\n pat_dict[pat_id] = {const.COL_KEY : col, const.PATNO_KEY : rec[const.PATNO_KEY]}\r\n pat_id += 1\r\n for key, value in rec.items():\r\n if isinstance(value, str) & (key != const.PATNO_KEY): \r\n words.extend(tf.compat.as_str(value).split())\r\n #cc = collections.Counter(words)\r\n cnter.update(words)\r\n #logger.debug('Collection: '+str(col)+' unique words total: '+str(len(cnter)))\r\n \r\n word_dict = {}\r\n count = [['UNK', -1]]\r\n\r\n total_words = len(cnter)\r\n\r\n logger.debug('Total patents: '+str(len(pat_dict)))\r\n logger.debug('Total words: '+str(total_words))\r\n \r\n lst = cnter.most_common()\r\n\r\n for i in range(0,10):\r\n lst2 = [(k,v) for (k,v) in lst if v>oft+i]\r\n logger.debug('Words found more than '+str(oft+i)+' times: '+str(len(lst2))+' or '+str((len(lst2)*100)//total_words)+'%')\r\n \r\n count.extend(cnter.most_common(vocabulary_size-1))\r\n\r\n for word,_ in count:\r\n word_dict[word] = len(word_dict)\r\n\r\n unk_count = total_words - len(word_dict) + 1\r\n# cfg = pat_config.PatConfig.getInstance()\r\n #word_dict_file = cfg.getModelDir() + cfg.getWordDictFile()\r\n with open(file_name, 'wb') as fileObj:\r\n pickle.dump(pat_dict, fileObj)\r\n pickle.dump(word_dict, fileObj)\r\n logger.debug('Saved to: '+file_name)\r\n \r\n return word_dict, unk_count\r\n\r\ndef read_entire_dict(logger):\r\n cfg = pat_config.PatConfig.getInstance()\r\n word_dict_file = cfg.getModelDir() + cfg.getWordDictFile()\r\n with open(word_dict_file, 'rb') as fileObj:\r\n pat_dict = pickle.load(fileObj, encoding='utf-8')\r\n word_dict = pickle.load(fileObj, encoding='utf-8')\r\n \r\n logger.debug('Word dict>> total words: '+str(\"{:,}\".format(len(word_dict)))+' total patents: '+str(\"{:,}\".format(len(pat_dict))))\r\n return pat_dict, word_dict\r\n\r\ndef encode_texts(texts, word_dict):\r\n data = []\r\n\r\n for _, patent in texts.items():\r\n pat_data = []\r\n for word in patent.split():\r\n idx = word_dict.get(word, 0)\r\n pat_data.append(idx)\r\n data.append(pat_data)\r\n \r\n return data\r\n\r\ndef build_dictionary(texts, vocabulary_size):\r\n # Turn sentences (list of strings) into lists of words\r\n split_sentences = [v.split() for v in texts.values()]\r\n words = [x for sublist in split_sentences for x in sublist]\r\n \r\n # Initialize list of [word, word_count] for each word, starting with unknown\r\n count = [[const.UNKNOWN_KEY, -1]]\r\n \r\n # Now add most frequent words, limited to the N-most frequent (N=vocabulary size)\r\n count.extend(collections.Counter(words).most_common(vocabulary_size-1))\r\n \r\n # Now create the dictionary\r\n word_dict = {}\r\n # For each word, that we want in the dictionary, add it, then make it\r\n # the value of the prior dictionary length\r\n for word,_ in count:\r\n word_dict[word] = len(word_dict)\r\n\r\n unk_count = 0\r\n data = []\r\n patnums = {}\r\n i = 0\r\n \r\n for key, patent in texts.items():\r\n pat_data = []\r\n patnums[i] = key\r\n i+=1\r\n \r\n for word in patent.split():\r\n idx = word_dict.get(word, 0)\r\n if idx == 0:\r\n unk_count += 1\r\n pat_data.append(idx)\r\n data.append(pat_data)\r\n\r\n# count[0][1] = unk_count\r\n\r\n return word_dict, data, patnums, unk_count\r\n\r\ndef generate_batch_data(sentences, batch_size, window_size, method='doc2vec'):\r\n # Fill up data batch\r\n batch_data = []\r\n label_data = []\r\n while len(batch_data) < batch_size:\r\n # select random sentence to start\r\n rand_sentence_ix = int(np.random.choice(len(sentences), size=1))\r\n rand_sentence = sentences[rand_sentence_ix]\r\n # Generate consecutive windows to look at\r\n window_sequences = [rand_sentence[max((ix-window_size),0):(ix+window_size+1)] for ix, x in enumerate(rand_sentence)]\r\n # Denote which element of each window is the center word of interest\r\n label_indices = [ix if ix> pc3.out &\"))\n else:\n \tprint(\"The program is alive!\")\n time.sleep(1)","sub_path":"Server/connect/check_run.py","file_name":"check_run.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"116448465","text":"#! /usr/bin/env python2\n#coding:utf-8\n__metaclass__ = type\n\n\nimport os, sys\n\n#terst\nclass Addr():\n ADDR_PATH = '/etc/sysconfig/network-scripts'\n def get_device(self):\n tl_device = []\n for a, b, files in os.walk(self.ADDR_PATH):\n pass\n for file in files:\n if 'ifcfg' in file and file != 'ifcfg-lo':\n tl_device.append(file.split('-')[1])\n return tl_device\n\n def reload_device(self, tl_device):\n for device in tl_device:\n stop_cmd = 'ifdown ' + device\n start_cmd = 'ifup ' + device\n os.system(stop_cmd)\n os.system(start_cmd)\n return\n\n def get_uuid(self, device):\n file = self.ADDR_PATH + '/ifcfg-' + device\n for line in open(file, 'r').readlines():\n if 'UUID' in line:\n line = line.split('=')[1]\n line = line.strip('\"')\n line = line.strip('\\n')\n line = line.strip('\"')\n uuid = line.strip('\\n')\n return uuid\n\n def get_gateway(self, ipaddr):\n if '192.168.1' in ipaddr:\n gateway = '192.168.1.1'\n else:\n gateway = '.'.join(ipaddr.split('.')[0:3]) + '.2'\n return gateway\n \n def get_dns(self, ipaddr):\n if '192.168.1' in ipaddr:\n dns = '8.8.8.8'\n else:\n dns = '.'.join(ipaddr.split('.')[0:3]) + '.12'\n return dns\n \n\n def analyse_addr_config_files(self):\n tl_device = self.get_device()\n self.reload_device(tl_device)\n td_devices = {}\n for device in tl_device:\n cmd = 'ifconfig -v ' + device\n cmd_ifconfig = os.popen(cmd).readlines()\n for line in cmd_ifconfig:\n if 'inet' in line and 'inet6' not in line:\n ipaddr = ''.join([x for x in line.split('netmask')[0].split('inet')[1] if x != ' '])\n netmask = ''.join([x for x in line.split('netmask')[1].split('broadcast')[0] if x != ' '])\n td_devices[device] = {'IPADDR': ipaddr, 'NETMASK': netmask}\n if 'ether' in line:\n hwaddr = ''.join([x for x in line.split('ether')[1].split('txqueuelen')[0] if x != ' '])\n td_devices[device]['HWADDR'] = hwaddr\n uuid = self.get_uuid(device)\n td_devices[device]['UUID'] = uuid\n td_devices[device]['TYPE'] = 'Ethernet'\n td_devices[device]['BOOTPROTO'] = 'static'\n td_devices[device]['NAME'] = device\n td_devices[device]['DEVICE'] = device\n td_devices[device]['ONBOOT'] = 'yes'\n td_devices[device]['NM_CONTROLLED'] = 'yes'\n td_devices[device]['GATEWAY'] = self.get_gateway(td_devices[device]['IPADDR'])\n td_devices[device]['DNS1'] = self.get_dns(td_devices[device]['IPADDR'])\n return td_devices\n \n def create_addr_dict(self):\n td_devices = self.analyse_addr_config_files()\n for device in td_devices:\n file = self.ADDR_PATH + '/ifcfg-' + device\n bak_file = open('/tmp/ifcfg-' + device, 'w')\n for line in open(file, 'r').readlines():\n bak_file.write(line)\n bak_file.close()\n after_file = open(file, 'w')\n td_device = td_devices[device]\n for colum in td_device:\n after_file.write(colum + '=' + td_device[colum] + '\\n')\n after_file.close()\n\n\nif '__main__' == __name__:\n a = Addr()\n a.create_addr_dict()\n\n","sub_path":"python/addr.py","file_name":"addr.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"528525985","text":"from util.common_imports import *\n\n\nclass Solution:\n def numSubseq(self, nums: List[int], target: int) -> int:\n nums.sort()\n n = len(nums)\n i, j = 0, n - 1\n res = 0\n sum = [0] * n\n\n M = 10**9 + 7\n for i in range(n):\n while nums[i] + nums[j] > target and j > i:\n j -= 1\n if j == i:\n if nums[i] + nums[j] <= target:\n res += 1\n return res % M\n\n num_between = j - i - 1\n if i == 0:\n prePow=1\n sum[0] = 2 # special {num[i]}\n for k in range(1, num_between + 1):\n prePow <<= 1 # !2^n,调用pow会超时\n sum[k] = (sum[k - 1] + prePow) % M\n res += sum[num_between]\n\n return res % (1000000000 + 7)\n\n\nprint(Solution().numSubseq(nums=[2, 3, 3, 4, 6, 7], target=12))\nprint(Solution().numSubseq(nums=[5, 2, 4, 1, 7, 6, 8], target=16))\nprint(Solution().numSubseq(nums=[3, 5, 6, 7], target=9))\n\n# 执行用时:3976 ms, 在所有 Python3 提交中击败了100.00%的用户\n# 内存消耗:22.5 MB, 在所有 Python3 提交中击败了100.00%的用户\n","sub_path":"weekly-contest/195/3_quickPow.py","file_name":"3_quickPow.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"163509465","text":"from typing import Any\nfrom typing import Optional\n\n\nclass Test:\n @staticmethod\n def assert_equals(\n executed_value: Any, expected_value: Any, error_message: Optional[str] = None\n ):\n \"\"\"Asserts that the executed value is equal to the expected value.\n\n Check if the executed value is equal to the expected value.\n\n Args:\n executed_value(Any): The value that was executed.\n expected_value(Any): The value that was expected.\n error_message(Optional[str]): The error message that should be\n displayed if assertion fails.\n \"\"\"\n assert executed_value == expected_value, error_message\n","sub_path":"examples/python/testframework.py","file_name":"testframework.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"93586384","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom common.helpers import time_helper\r\nfrom common.items import Profile, Post, MergedPost, PostView\r\nfrom twitter.helpers import gen_id\r\nfrom twitter.helpers import gen_url\r\nfrom twitter.object_types import ObjectType, DiscussionType\r\n\r\n\r\ndef profile_from_user(user):\r\n user_id = user['id']\r\n profile = Profile()\r\n profile['_id'] = gen_id.profile(user_id)\r\n profile['sm_id'] = user_id\r\n profile['source'] = gen_id.SOURCE\r\n profile['object_type'] = ObjectType.USER\r\n profile['href'] = gen_url.profile(user['screen_name'])\r\n profile['name'] = user['name']\r\n profile['fetch_time'] = time_helper.utc_now()\r\n profile['attrs'] = user\r\n return profile\r\n\r\n\r\ndef post_from_tweet(tweet):\r\n tweet_id = tweet['id']\r\n user_id = tweet['user']['id']\r\n\r\n post = Post()\r\n post['_id'] = gen_id.post(user_id=user_id, post_id=tweet_id)\r\n post['sm_id'] = tweet_id\r\n post['source'] = gen_id.SOURCE\r\n post['owner_id'] = gen_id.profile(user_id)\r\n post['author_id'] = gen_id.profile(user_id)\r\n post['href'] = gen_url.post(tweet['user']['screen_name'], tweet_id)\r\n post['discussion_type'] = DiscussionType.TWEETS\r\n\r\n retweeted = tweet.get('retweeted_status')\r\n replied = tweet.get('replied_status')\r\n # reply_tweet_id = tweet.get('in_reply_to_status_id')\r\n # reply_user_id = tweet.get('in_reply_to_user_id')\r\n\r\n if retweeted:\r\n post['object_type'] = ObjectType.RETWEET\r\n post['original_topic_id'] = retweeted['id']\r\n post['is_repost'] = True\r\n post['href'] = gen_url.post(retweeted['user']['screen_name'], retweeted['id'])\r\n elif replied:\r\n post['object_type'] = ObjectType.REPLY\r\n post['parent_post_id'] = gen_id.post(user_id=replied['user']['id'], post_id=replied['id'])\r\n post['owner_id'] = gen_id.profile(user_id=replied['user']['id'])\r\n else:\r\n post['object_type'] = ObjectType.TWEET\r\n\r\n # naive_post_date = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')\r\n # post['post_date'] = time_helper.localize(naive_date=naive_post_date, tz=time_helper.TZ_UTC)\r\n post['post_date'] = time_helper.to_utc_date(tweet['created_at'], dayfirst=True, yearfirst=False)\r\n post['fetch_time'] = time_helper.utc_now()\r\n post['attrs'] = tweet\r\n\r\n return post\r\n\r\n\r\ndef merge_post_and_profile(profile, post):\r\n \"\"\"\r\n :type profile: common.items.Profile\r\n :type post: common.items.Post\r\n \"\"\"\r\n result = MergedPost()\r\n result['_id'] = post['_id']\r\n result['source'] = post['source']\r\n result['discussion_type'] = post['discussion_type']\r\n result['object_type'] = post['object_type']\r\n result['fetch_time'] = post['fetch_time']\r\n result['profile'] = dict(profile)\r\n result['post'] = dict(post)\r\n return result\r\n\r\n\r\ndef post_view_from_merged_post(merged_post):\r\n # assert isinstance(merged_post, dict)\r\n\r\n # discussion_type = merged_post['discussion_type']\r\n object_type = merged_post['object_type']\r\n\r\n profile = merged_post['profile']\r\n post = merged_post['post']\r\n\r\n if object_type == ObjectType.TWEET:\r\n return _post_view_from_tweet(post, profile)\r\n\r\n if object_type == ObjectType.RETWEET:\r\n return _post_view_from_retweet(post, profile)\r\n\r\n if object_type == ObjectType.REPLY:\r\n return _post_view_from_reply(post, profile)\r\n\r\n\r\ndef _post_view_from_tweet(post, profile):\r\n post_view = PostView()\r\n post_view['id'] = post['_id']\r\n post_view['source'] = post['source']\r\n post_view['profile_id'] = profile['_id']\r\n post_view['sm_profile_id'] = profile['sm_id']\r\n post_view['profile_name'] = profile['name']\r\n\r\n post_view['profile_href'] = profile['href']\r\n post_view['post_href'] = post['href']\r\n post_view['sm_post_id'] = post['sm_id']\r\n post_view['parent_post_id'] = post.get('parent_post_id', '')\r\n\r\n post_view['post_date'] = post['post_date']\r\n post_view['fetch_time'] = post['fetch_time']\r\n post_view['post_body'] = post['attrs']['text']\r\n\r\n post_view['discussion_type'] = post['discussion_type']\r\n post_view['object_type'] = post['object_type']\r\n\r\n followers_count = profile['attrs'].get('followers_count', 0)\r\n friends_count = profile['attrs'].get('friends_count', 0)\r\n reach = followers_count + friends_count\r\n\r\n post_view['reach'] = reach\r\n post_view['reach_followers'] = followers_count\r\n post_view['reach_friends'] = friends_count\r\n\r\n post_view['profile_reach'] = reach\r\n post_view['profile_reach_followers'] = followers_count\r\n post_view['profile_reach_friends'] = friends_count\r\n\r\n favorite_count = post['attrs'].get('favorite_count', 0)\r\n retweet_count = post['attrs'].get('retweet_count', 0)\r\n # replies_count = ???\r\n engagement = favorite_count + retweet_count\r\n\r\n post_view['engagement_likes'] = favorite_count\r\n post_view['engagement_reposts'] = retweet_count\r\n post_view['engagement'] = engagement\r\n\r\n post_view['is_comment'] = False\r\n\r\n return post_view\r\n\r\n\r\ndef _post_view_from_retweet(post, profile):\r\n post_view = PostView()\r\n post_view['id'] = post['_id']\r\n post_view['source'] = post['source']\r\n post_view['profile_id'] = profile['_id']\r\n post_view['sm_profile_id'] = profile['sm_id']\r\n post_view['profile_name'] = profile['name']\r\n\r\n post_view['profile_href'] = profile['href']\r\n post_view['post_href'] = post['href']\r\n post_view['sm_post_id'] = post['sm_id']\r\n post_view['parent_post_id'] = post.get('parent_post_id', '')\r\n\r\n post_view['post_date'] = post['post_date']\r\n post_view['fetch_time'] = post['fetch_time']\r\n post_view['post_body'] = post['attrs']['text']\r\n\r\n post_view['discussion_type'] = post['discussion_type']\r\n post_view['object_type'] = post['object_type']\r\n\r\n post_view['sm_original_post_id'] = post['original_topic_id']\r\n post_view['is_repost'] = True\r\n\r\n followers_count = profile['attrs'].get('followers_count', 0)\r\n friends_count = profile['attrs'].get('friends_count', 0)\r\n reach = followers_count + friends_count\r\n\r\n post_view['reach'] = reach\r\n post_view['reach_followers'] = followers_count\r\n post_view['reach_friends'] = friends_count\r\n\r\n post_view['profile_reach'] = reach\r\n post_view['profile_reach_followers'] = followers_count\r\n post_view['profile_reach_friends'] = friends_count\r\n\r\n retweeted = post['attrs']['retweeted_status']\r\n favorite_count = retweeted.get('favorite_count', 0)\r\n retweet_count = retweeted.get('retweet_count', 0)\r\n # replies_count = ???\r\n engagement = favorite_count + retweet_count\r\n\r\n post_view['engagement_likes'] = favorite_count\r\n post_view['engagement_reposts'] = retweet_count\r\n post_view['engagement'] = engagement\r\n\r\n post_view['is_comment'] = False\r\n\r\n return post_view\r\n\r\n\r\ndef _post_view_from_reply(post, profile):\r\n post_view = PostView()\r\n post_view['id'] = post['_id']\r\n post_view['source'] = post['source']\r\n post_view['profile_id'] = profile['_id']\r\n post_view['sm_profile_id'] = profile['sm_id']\r\n post_view['profile_name'] = profile['name']\r\n\r\n post_view['profile_href'] = profile['href']\r\n post_view['post_href'] = post['href']\r\n post_view['sm_post_id'] = post['sm_id']\r\n post_view['parent_post_id'] = post.get('parent_post_id', '')\r\n\r\n post_view['post_date'] = post['post_date']\r\n post_view['fetch_time'] = post['fetch_time']\r\n post_view['post_body'] = post['attrs']['text']\r\n\r\n post_view['discussion_type'] = post['discussion_type']\r\n post_view['object_type'] = post['object_type']\r\n\r\n followers_count = profile['attrs'].get('followers_count', 0)\r\n friends_count = profile['attrs'].get('friends_count', 0)\r\n reach = followers_count + friends_count\r\n\r\n replied_user = post['attrs']['replied_status']['user']\r\n replied_followers_count = replied_user.get('followers_count', 0)\r\n replied_friends_count = replied_user.get('friends_count', 0)\r\n replied_reach = replied_followers_count + replied_friends_count\r\n\r\n post_view['reach'] = replied_reach\r\n post_view['reach_followers'] = replied_followers_count\r\n post_view['reach_friends'] = replied_friends_count\r\n\r\n post_view['profile_reach'] = reach\r\n post_view['profile_reach_followers'] = followers_count\r\n post_view['profile_reach_friends'] = friends_count\r\n\r\n favorite_count = post['attrs'].get('favorite_count', 0)\r\n retweet_count = post['attrs'].get('retweet_count', 0)\r\n # replies_count = ???\r\n engagement = favorite_count + retweet_count\r\n\r\n post_view['engagement_likes'] = favorite_count\r\n post_view['engagement_reposts'] = retweet_count\r\n post_view['engagement'] = engagement\r\n\r\n post_view['is_comment'] = True\r\n\r\n return post_view\r\n","sub_path":"twitter/item_helper.py","file_name":"item_helper.py","file_ext":"py","file_size_in_byte":8746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"519200153","text":"# Calc and plot time complexity of the sort, lower, max python functions\r\nfrom matplotlib import pyplot\r\nfrom time import perf_counter\r\nimport random\r\nimport re\r\n\r\n# sort an array in ascending Order\r\ndef sort_arr(arr):\r\n return arr.sort()\r\n\r\n\r\n\r\n# get the max value from an array of integers\r\ndef max_arr(arr):\r\n return max(arr)\r\n\r\n\r\n# transform letters of a string to lower case\r\ndef lower_case(word):\r\n return \" \".join(re.findall(r\"[a-zA-Z0-9]+\", word))\r\n\r\ndef plotTime(f, minArg, maxArg):\r\n \"\"\"\r\n Run timer and plot time complexity\r\n \"\"\"\r\n len_input = []\r\n t = []\r\n for i in range(minArg, maxArg):\r\n if f == lower_case:\r\n # string of capital As of length i\r\n l_input = \"Aaaaa\" + \" \"*i + \"AAasama\" + \" \"*i\r\n else:\r\n # randomize an array of length i with values from 0 to 100\r\n l_input = random.sample(range(100), i)\r\n # time start point\r\n start = perf_counter()\r\n # calling the function\r\n f(l_input)\r\n # time end point\r\n end = perf_counter()\r\n # appending the len of the input and time it took to run\r\n len_input.append(i)\r\n t.append((end - start) / 1000)\r\n return len_input, t\r\n\r\n\r\ndef size_million(f):\r\n \"\"\"\r\n estimating the time if the input length is 1,000,000\r\n \"\"\"\r\n if f == lower_case:\r\n l_input = \"A\" * 1000000\r\n else:\r\n l_input = random.sample(range(1000000), 1000000)\r\n start = perf_counter()\r\n f(l_input)\r\n end = perf_counter()\r\n return end - start\r\n\r\n\r\ndef main():\r\n # task 2; plot the time it takes for a given function to run with an input length of len 1: 100\r\n print('Analyzing Algorithms...')\r\n # calling the plotTime function and give it a function name, min, max values\r\n len_input, t = plotTime(sort_arr, 1, 100)\r\n # plot the output\r\n pyplot.plot(len_input, t, 'o')\r\n # title of the plot\r\n pyplot.title(\"Time Complexity of sort fun\")\r\n # naming x axis\r\n pyplot.xlabel(\"List length\")\r\n # naming y axis\r\n pyplot.ylabel(\"Time in (ms)\")\r\n pyplot.show()\r\n\r\n len_input, t = plotTime(max_arr, 1, 100)\r\n pyplot.plot(len_input, t, 'o')\r\n pyplot.title(\"Time Complexity of max fun\")\r\n pyplot.xlabel(\"List length\")\r\n pyplot.ylabel(\"Time in (ms)\")\r\n pyplot.show()\r\n\r\n len_input, t = plotTime(lower_case, 1, 100)\r\n pyplot.plot(len_input, t, 'o')\r\n pyplot.title(\"Time Complexity of Lower_case fun\")\r\n pyplot.xlabel(\"List length\")\r\n pyplot.ylabel(\"Time in (ms)\")\r\n pyplot.show()\r\n\r\n # task 3; estimating the time it will take to execute input of length million\r\n print(\"Time to sort an array of length million= {} sec\".format(size_million(sort_arr)))\r\n print(\"Time to find max number of an array of length million= {} sec\".format(size_million(max_arr)))\r\n print(\"Time to lower a string length million= {} sec\".format(size_million(lower_case)))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"implementation_1/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"424192023","text":"\"\"\"\n\n@author: eocampo\n\n\nModification History\ndate-author-description\n\n\"\"\"\n__version__ = '20181113'\n\nimport logging\nimport sys\nimport os\n\nfrom logging.handlers import TimedRotatingFileHandler\n\n\nclass _AppLogger(object):\n \"\"\"docstring for _AppLogger.\"\"\"\n\n def __init__(self):\n super(_AppLogger, self).__init__()\n\n self.formatter = None\n self._set_cfg_logger()\n\n\n\n def _set_cfg_logger(self):\n\n self.formatter = logging.Formatter('%(asctime)s [%(levelname)-8s] %(module)s.%(funcName)s -- %(message)s',\n \"%d.%m.%Y_%H:%M:%S\")\n\n\n def get_console_handler(self):\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(self.formatter)\n return console_handler\n\n def get_file_handler(self, logFile):\n file_handler = TimedRotatingFileHandler(logFile)\n file_handler.setFormatter(self.formatter)\n return file_handler\n\n def get_logger(self, ldir, lname):\n\n log_cons = 'FALSE'\n\n if 'LOG_NAME' in os.environ.keys(): lname = os.environ['LOG_NAME']\n\n if 'LOG_DIR' in os.environ.keys(): ldir = os.environ['LOG_DIR' ]\n\n if 'LOG_CONS' in os.environ.keys(): log_cons = os.environ['LOG_CONS']\n\n if 'LOG_LEVEL' in os.environ.keys(): ll = eval(f\"logging.{os.environ['LOG_LEVEL']}\")\n else : ll = logging.INFO\n\n logFile = os.path.join(ldir, lname)\n logger = logging.getLogger(lname)\n\n logger.setLevel(ll)\n\n if log_cons == 'TRUE':\n logger.addHandler(self.get_console_handler())\n\n logger.addHandler(self.get_file_handler(logFile))\n\n # with this pattern, it's rarely necessary to propagate the error up to parent\n logger.propagate = False\n print(f'In get_logger, logfile = {logFile}')\n return logger\n","sub_path":"lib/common/app_log.py","file_name":"app_log.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"175632971","text":"import sys\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\n\nclass MainWindow(QMainWindow):\n \n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Stock Management Test\")\n self.resize(600, 400)\n self.stockManagement()\n\n def stockManagement(self):\n #Title\n self.title = QLabel(\"Stock Management\")\n self.title.setAlignment(Qt.AlignCenter)\n self.title_font = QFont()\n self.title_font.setPointSize(18)\n self.title_font.setBold(True)\n self.title.setFont(self.title_font)\n\n #Image\n self.image = QLabel()\n self.image_pixmap = QPixmap(\".\\images\\dance.gif\")\n self.scaled_image = self.image_pixmap.scaled(180, 180, Qt.IgnoreAspectRatio, Qt.FastTransformation)\n self.image.setPixmap(self.scaled_image)\n \n \n #Product Name\n self.product_name = QLineEdit(\"Pedigree Chum\")\n self.product_name.setReadOnly(True)\n self.product_name.setFixedHeight(50)\n self.product_name.setAlignment(Qt.AlignRight)\n self.product_font = QFont()\n self.product_font.setPointSize(12)\n self.product_name.setFont(self.product_font)\n self.product_name.setAlignment(Qt.AlignCenter)\n\n\n #Location 1\n self.location1_label = QLineEdit(\"Stock in Location 1:\")\n self.location1_label.setAlignment(Qt.AlignCenter)\n self.location1_label.setReadOnly(True)\n self.location1 = QSpinBox()\n self.location1.setRange(0, 100)\n self.location1.setMinimumWidth(100)\n\n #Location 2\n self.location2_label = QLineEdit(\"Stock in Location 2:\")\n self.location2_label.setAlignment(Qt.AlignCenter)\n self.location2_label.setReadOnly(True)\n self.location2 = QSpinBox()\n self.location2.setRange(0, 100)\n self.location2.setMinimumWidth(100)\n\n #Stock Required\n self.stock_required_label = QLineEdit(\"Stock Required\")\n self.stock_required_label.setAlignment(Qt.AlignCenter)\n self.stock_required_label.setReadOnly(True)\n self.stock_required = QLineEdit(\"12\")\n self.stock_required.setAlignment(Qt.AlignCenter)\n self.stock_required.setReadOnly(True)\n\n #Done\n self.done_button = QPushButton(\"Done\")\n\n\n # Adding Layouts \n self.image_layout = QHBoxLayout()\n self.image_widget = QWidget()\n self.image_layout.addWidget(self.image)\n self.image_layout.addWidget(self.product_name)\n self.image_widget.setLayout(self.image_layout)\n\n self.location1_layout = QHBoxLayout()\n self.location1_widget = QWidget()\n self.location1_layout.addWidget(self.location1_label)\n self.location1_layout.addWidget(self.location1)\n self.location1_widget.setLayout(self.location1_layout)\n\n self.location2_layout = QHBoxLayout()\n self.location2_widget = QWidget()\n self.location2_layout.addWidget(self.location2_label)\n self.location2_layout.addWidget(self.location2)\n self.location2_widget.setLayout(self.location2_layout)\n\n self.stock_layout = QHBoxLayout()\n self.stock_widget = QWidget()\n self.stock_layout.addWidget(self.stock_required_label)\n self.stock_layout.addWidget(self.stock_required)\n self.stock_widget.setLayout(self.stock_layout)\n\n #Group Box\n self.group_box_layout = QVBoxLayout()\n self.group_box = QGroupBox(\"Current Stock\")\n self.group_box_layout.addWidget(self.location1_widget)\n self.group_box_layout.addWidget(self.location2_widget)\n self.group_box.setLayout(self.group_box_layout)\n\n\n\n self.main_layout = QVBoxLayout()\n self.main_widget = QWidget()\n self.main_layout.addWidget(self.title)\n self.main_layout.addWidget(self.image_widget)\n self.main_layout.addWidget(self.group_box)\n self.main_layout.addWidget(self.stock_widget)\n self.main_layout.addWidget(self.done_button)\n self.main_widget.setLayout(self.main_layout)\n self.setCentralWidget(self.main_widget)\n \n \n\n \n \n \ndef main():\n app = QApplication(sys.argv)\n main_window = MainWindow()\n main_window.show()\n main_window.raise_()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"Implementation/Gui/tests/Stock Management Test.py","file_name":"Stock Management Test.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650714891","text":"# Image Processing Assignment 1\n\n# Sharon O'Malley C16469614 DT228 2019\n# I will be using image processing techniques to identify and enhance the shark in the image.\n# I will be using image processing techniques such as:\n# changing the colour space, thresholding, masking, Kernels and cropping\n\n\n# import the necessary packages:\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom matplotlib import image as image\nfrom matplotlib import colors\nimport easygui\n\n######### Reading Images #########\n\n# Opening an image from a file:\n#I = cv2.imread(\"Shark.png\")\nI =cv2.imread(\"Shark 1.png\")\n\n######### Colourspaces #########\n\n# Converting to different colour spaces:\nRGB = cv2.cvtColor(I, cv2.COLOR_BGR2RGB)\nYUV = cv2.cvtColor(I, cv2.COLOR_BGR2YUV)\n\n# Seperating the channels in the colour spaces:\nY, U, V = cv2.split(YUV)\nR, G, B = cv2.split(RGB)\n\n#### combining colour channels ###\nRU = cv2.merge([R, U, U])\nnew = cv2.cvtColor(RU, cv2.COLOR_BGR2GRAY)\n\n#### Adaptive Threshold to seperate the shark from the background and create a mask of the image ####\nB= cv2.adaptiveThreshold(new, maxValue = 255, adaptiveMethod = cv2.ADAPTIVE_THRESH_GAUSSIAN_C, thresholdType = cv2.THRESH_BINARY_INV, blockSize = 941, C = 2);\n\n#### Morphology ####\nshape = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))\nNewMask = cv2.erode(B, shape)\ni = 0\nwhile i < 4:\n NewMask = cv2.morphologyEx(NewMask,cv2.MORPH_OPEN,shape)\n NewMask = cv2.erode(NewMask, shape)\n i += 1\n\n#### Applying the original shark over the mask and changing background of the image to white####\nsel = cv2.bitwise_and(I, I, mask = NewMask)\nsel[np.where((sel==[0,0,0]).all(axis=2))] = [255, 255, 255]\n\ncv2.imshow(\"sel\", sel)\ncv2.waitKey(0)\n\n####Finding X, Y, Height and Width of shark in the image####\nlight_blue = np.array([4, 3,4]) \ndark_blue = np.array([210, 255, 255])\nmaskroi = cv2.inRange(sel, light_blue, dark_blue)\n\n## Using more morphology to rid pixels outside of the shark for better identification shark\nmaskroi = cv2.erode(maskroi, shape)\nwhile i < 9:\n maskroi = cv2.erode(maskroi, shape)\n maskroi = cv2.morphologyEx(maskroi,cv2.MORPH_OPEN,shape)\n i += 1\n\ncv2.imshow(\"maskroi\", maskroi)\ncv2.waitKey(0)\n\n## Calculates and returns point x, y, height and width of the shark in the mask\n(x,y,w,h) = cv2.boundingRect(maskroi)\n\n#### Cropping the image to Shark co-ordinates ####\nx=x-25\ny=y-25\nw=x+w+25\nh=y+h+25\n\nif x<=0:\n\tx=25\n\nif y<=0:\n\ty=25\n\n#Final Image\nC = sel[y:h, x:w]\n\nprint(x, y, w, h)\ncv2.imshow(\"C\", C)\ncv2.waitKey(0)\n\n#### Showing the evolution of my images on MatPlotLib####\n\nI = cv2.cvtColor(I, cv2.COLOR_BGR2RGB)\nsel = cv2.cvtColor(sel, cv2.COLOR_BGR2RGB)\nFinal = cv2.cvtColor(C, cv2.COLOR_BGR2RGB)\nImages = [I, RU, B, NewMask, sel, Final]\nTitles = [\"Original Image\", \"Colour Space\", \"Threshold\", \"Morphology\", \"Final Image\",\"Cropped\"]\n\nfor x in range(6):\n\tplt.subplot(2, 3, x+1)\n\tif Titles[x] == \"Threshold\" or Titles[x] == \"Morphology\":\n\t\tplt.imshow(Images[x], cmap='gray')\n\telse:\n\t\tplt.imshow(Images[x])\n\tplt.axis(\"off\")\n\tplt.title(Titles[x])\n\n\nplt.show() \n\n#Displaying Final Image\ncv2.imshow(\"Displaying Final Image\", C)\nkey = cv2.waitKey(0)\n\n####Results####\n#I attempted using equalisiation on the image before thresholding the image however I did not get the desired results.\n#So I decided to remove equalisation. The below code is the code I was using for it.\n\n#H = cv2.equalizeHist(new)\n#values1 = H.ravel()\n\n#I then tried to use kernels to blur more of the pixels that were not part of the shark. However, I also did not get the desired results here.\n#The code I was attempting to do this with was\n\n#kernel = np.ones((5,5),np.float32)/25\n#F = cv2.filter2D(sel,ddepth=-1, kernel = kernel)\n\n#As I was trying to crop the picture accourding to the sharks location, I found the boundingrect function.\n#https://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=boundingrect\n \n","sub_path":"Image Test/Image_Processing_Test/Reference Files/Random/SHAR.py","file_name":"SHAR.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552427080","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom statsmodels.tools.eval_measures import mse\nfrom statsmodels.tsa.ar_model import AR\nfrom statsmodels.tsa.arima_model import ARMA\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing\n\n\ndef load_df(filename='data.csv'):\n df = pd.read_csv(filename)\n df['Date'] = pd.to_datetime(df['Date'])\n return df\n\n\ndef group_df(df, filename='data_grouped.csv'):\n \"\"\"\n Create and save new pd.DataFrame from given frame\n with thefts per day, sorted by 'Date' column\n \"\"\"\n\n df['Date'] = df['Date'].dt.date\n df = df[df['Primary Type'] == 'THEFT']\n\n new_df = pd.DataFrame(columns=['Date', 'Theft count'])\n new_df['Date'] = df['Date'].unique()\n new_df['Theft count'] = [len(df[df['Date'] == date]) for date in new_df['Date']]\n new_df = new_df.sort_values('Date')\n new_df.to_csv(filename, index=False)\n\n\ndef show_dynamics(df):\n \"\"\" Show thefts frequency dynamics \"\"\"\n\n df['Theft count'].plot()\n plt.title('Thefts frequency dynamics')\n plt.xlabel('Time')\n plt.ylabel('Frequency')\n plt.show()\n\n\ndef compare(df, forward=10):\n \"\"\"\n Predict next %forward% time series values\n by specified methods and compare results by MSE metric\n \"\"\"\n\n comparison = {}\n train, test = df[:-forward], df[-forward:]\n test_print = [round(value, 2) for value in list(test)]\n\n model = AR(train).fit()\n comparison['AR'] = model.predict(len(train), len(train) + forward - 1)\n\n model = ARMA(train, order=(0, 1)).fit(disp=False)\n comparison['MA'] = model.predict(len(train), len(train) + forward - 1)\n\n model = ExponentialSmoothing(train).fit()\n comparison['ES'] = model.predict(len(train), len(train) + forward - 1)\n\n for key in comparison:\n prediction_print = [round(value, 2) for value in list(comparison[key])]\n print('Method: {0}\\n\\nTrue:\\n{1}\\n\\nPrediction:\\n{2}\\n'.format(key, test_print, prediction_print))\n comparison[key] = mse(test, comparison[key])\n\n print('MSE scores:', comparison)\n\n\nif __name__ == '__main__':\n data = load_df(filename='data_grouped.csv')\n # show_dynamics(data)\n\n compare(data['Theft count'])\n","sub_path":"forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650599647","text":"import csv,requests,json\nfrom pprint import pprint\ntoken = \"224402655546228\"\nbaseURL = \"https://superheroapi.com/api/\" + token +\"/search/\"\n\nresponse = requests.get(baseURL + \"superman\")\n\n\n\n\nfirst_line = True\nallheroes = []\nkeys = []\nskiphero = 220\n\nwith open('heroes_information.csv', newline='') as csvfile:\n heroes = csv.reader(csvfile, delimiter=',')\n for herocount, row in enumerate(heroes, -1):\n if first_line is True:\n keys = row\n first_line = False\n else:\n if herocount < skiphero:\n continue\n hero = {}\n print(\"current at hero number\", herocount)\n for i,key in enumerate(keys[1:], 1):\n hero[key] = row[i]\n if key == \"name\":\n name = row[i]\n print(\"Getting img link for \", name)\n response = requests.get(baseURL + name)\n object = response.json()\n print(object)\n if object[\"response\"] != \"error\":\n url = object[\"results\"][0][\"image\"][\"url\"]\n hero[\"imgURL\"] = url\n\n # deal with api here\n\n #\n allheroes.append(hero)\n with open(\"heroes220.json\",\"w\") as f:\n json.dump(allheroes, f, indent=2)\n first_hero = False\n # with open('heroes_'+str(herocount)+'.json', 'w') as f:\n # json.dump(allheroes, f, indent=2)\n # first_hero = False\n","sub_path":"my-work/superhero_python/getImg.py","file_name":"getImg.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"329709828","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright 2016 Continuum Analytics, Inc.\n#\n# May be copied and distributed freely only as part of an Anaconda or\n# Miniconda installation.\n# -----------------------------------------------------------------------------\n\"\"\"\nModule in charge of the configuration settings.\n\nIt uses a modified version of Python's configuration parser.\n\"\"\"\n\n# Standard library imports\nimport os\n\n# Local imports\nfrom anaconda_navigator import __version__\nfrom anaconda_navigator.config.base import get_conf_path, get_home_dir\nfrom anaconda_navigator.config.main import CONF\n\n# FLAGS\nDEV = 'dev' in __version__\nTEST_CI = os.environ.get('TEST_CI', False)\n\n\nHOME_PATH = get_home_dir()\nCONF_PATH = get_conf_path()\nCONTENT_PATH = os.path.join(CONF_PATH, 'content')\nCONTENT_JSON_PATH = os.path.join(CONTENT_PATH, 'content.json')\nIMAGE_ICON_SIZE = (256, 256)\nIMAGE_DATA_PATH = os.path.join(CONF_PATH, 'images')\nDEFAULT_PROJECT_PATH = os.path.join(CONF_PATH, 'default_project')\nCHANNELS_PATH = os.path.join(CONF_PATH, 'channels')\nDEVTOOLS_PATH = os.path.join(CONF_PATH, 'devtools')\nPROJECT_YAML_FILE = 'project.yaml'\nPROJECT_ENVS_FOLDER = 'envs'\nDEFAULT_ENV = 'default'\n\nVALID_DEV_TOOLS = ['notebook', 'qtconsole', 'spyder']\nLOG_FOLDER = os.path.join(CONF_PATH, 'logs')\nLOG_FILENAME = 'navigator.log'\n\n\ndef get_projects_path():\n \"\"\"Return the default project and application projects path.\"\"\"\n paths = CONF.get('main', 'projects_path', [])\n\n if os.path.isdir(DEFAULT_PROJECT_PATH):\n paths.append(DEFAULT_PROJECT_PATH)\n\n if os.path.isdir(DEVTOOLS_PATH):\n for d in os.listdir(DEVTOOLS_PATH):\n p = os.sep.join([DEVTOOLS_PATH, d])\n paths.append(p)\n\n return paths\n\n\n# --- Local testing\n# -----------------------------------------------------------------------------\ndef test(): # pragma : no cover\n \"\"\"Local main testing.\"\"\"\n print(get_projects_path())\n\n\nif __name__ == \"__main__\": # pragma : no cover\n test()\n","sub_path":"site-packages/anaconda_navigator/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"541374352","text":"\"\"\"\n Module name: starter\n Description: Script to trigger the data flow and start the ui server.\n\"\"\"\n\nimport os\nimport sys\n\nimport psutil\nimport time\nimport subprocess\nfrom configparser import SafeConfigParser\n\n__directory = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(__directory + \"/..\")\nsys.path.append(__directory + \"/../..\")\n\nfrom pybcoin.DataCollector.controller_collector import ControllerCollector # noqa\nfrom pybcoin.SentimentAnalyzer.sentiment_scorer import SentimentAnalyzer # noqa\nfrom pybcoin.ModelForecast.btc_model import BtcModelPrediction # noqa\n\n\ndef start_data_collection(config):\n try:\n controller = ControllerCollector(config)\n controller.data_collection_pipeline()\n except Exception as e:\n # self.logger.error(e)\n print(e)\n\n\ndef start_sentiment_analyzer(config):\n try:\n analyzer = SentimentAnalyzer(config)\n analyzer.sentiment_scorer(keyword='tweets')\n analyzer.sentiment_scorer(keyword='reddit_comments')\n except Exception as e:\n # self.logger.error(e)\n print(e)\n\n\ndef start_forecast(config):\n try:\n model = BtcModelPrediction(config)\n model.final_prediction()\n except Exception as e:\n # self.logger.error(e)\n print(e)\n\n\ndef kill(pid):\n process = psutil.Process(pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n\n\nif __name__ == \"__main__\":\n\n config_file = './pybcoin/config/config.ini'\n config = SafeConfigParser()\n config.read(config_file)\n dayinsecs = 86400\n delay = 10\n spawn_command = 'python pybcoin/home.py'\n while True:\n print('Collecting data...')\n start_data_collection(config_file)\n\n print('Starting sentiment analyzer...')\n start_sentiment_analyzer(config)\n\n print('Predicting the next 24hr movement...')\n start_forecast(config)\n\n p = subprocess.Popen(spawn_command, shell=True)\n time.sleep(dayinsecs)\n kill(p.pid)\n time.sleep(delay)\n","sub_path":"pybcoin/starter.py","file_name":"starter.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"142011467","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 10 09:15:10 2018\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\n\r\nnode1 = tf.constant(3.0, tf.float32)\r\nnode2 = tf.constant(4.0)\r\nprint(node1, node2)\r\n\r\nsess = tf.Session()\r\nprint(sess.run([node1, node2]))\r\n\r\n\r\na = tf.placeholder(tf.float32)\r\nb = tf.placeholder(tf.float32)\r\nadder_node = a + b\r\n\r\nprint(a)\r\nsess = tf.Session()\r\nprint(sess.run(adder_node, {a:[1,3], b:[2,4]}))\r\n\r\nprint(sess.run(adder_node, {a:[4,5], b:[3,4]}))\r\n\r\nprint(sess.run(adder_node, {a:[4,5,7], b:[3,4,2]}))\r\n\r\nW = tf.Variable([.3], tf.float32)\r\nb = tf.Variable([-.3], tf.float32)\r\nx = tf.placeholder(tf.float32)\r\ny = tf.placeholder(tf.float32)\r\nlinear_model = W*x + b\r\nsquared_deltas = tf.square(linear_model-y)\r\nloss = tf.reduce_mean(squared_deltas)\r\noptimizer = tf.train.GradientDescentOptimizer(0.01)\r\ntrain = optimizer.minimize(loss)\r\n\r\n\r\ninit = tf.global_variables_initializer()\r\nsess = tf.Session()\r\nsess.run(init)\r\nfor i in range(100):\r\n print(sess.run(train, {x:[1,2,3,4], y:[0, -1, -2, -3]}))\r\n \r\nprint(sess.run([W, b]))\r\n \r\n\r\n\r\n","sub_path":"Modules/Module-2/code/tensorflow_example.py","file_name":"tensorflow_example.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"391060448","text":"import FWCore.ParameterSet.Config as cms\n\nfrom HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *\n\nakCs4PFJetAnalyzer = inclusiveJetAnalyzer.clone(\n jetTag = cms.InputTag(\"slimmedJets\"),\n genjetTag = 'slimmedGenJets',\n rParam = 0.4,\n fillGenJets = True,\n isMC = True,\n genParticles = cms.untracked.InputTag(\"prunedGenParticles\"),\n eventInfoTag = cms.InputTag(\"generator\"),\n jetName = cms.untracked.string(\"akCs4PF\"),\n bTagJetName = cms.untracked.string(\"akCs4PF\"),\n genPtMin = cms.untracked.double(5),\n hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),\n )\n\n","sub_path":"CMSSW_11_2_3/src/HeavyIonsAnalysis/JetAnalysis/python/akCs4PFJetSequence_pponPbPb_mc_cff.py","file_name":"akCs4PFJetSequence_pponPbPb_mc_cff.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"108539281","text":"# 分裂式饼图\nfrom pylab import *\n\n# make a square figure and axes\nfigure(1, figsize=(6, 6))\nax = axes(([0.1, 0.1, 0.8, 0.8]))\n\n# the slices will be ordered and plotted counter-clockwise\nlabels = ('Spring', 'Summer', 'Autumn', 'Winter')\nprint(labels)\n\n# fraction are either x/sum(x) or x if sum(x) <= 1\nx = [20, 10, 15, 12]\n\n# explode must be len(x) sequence or None\nexplode = (0.1, 0.1, 0.1, 0.1)\n\npie(x, explode=explode, labels=labels, autopct='%.2f%%', startangle=67)\n\nrcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\ntitle(u'Rainy days by season(数据是瞎编的)')\nsavefig('test310.png')\nshow()","sub_path":"chapter03/10_ExplodedPieChart.py","file_name":"10_ExplodedPieChart.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"572472339","text":"from typing import TypeVar, Callable, Tuple, List, Dict, Optional\n\nfrom graph.Graph import Graph\nfrom helpers.Utils import unique_id_generator\n\nN = TypeVar('N')\nND = TypeVar('ND')\nE = TypeVar('E')\nED = TypeVar('ED')\n\nGET_EDGE_WEIGHT_FUNC_TYPE =\\\n Callable[\n [\n E # edge ID\n ],\n float # edge weight\n ]\n\n\n# MARKDOWN\ndef find_max_path(\n graph: Graph[N, ND, E, ED],\n current_node: N,\n end_node: N,\n cache: Dict[N, Optional[Tuple[List[E], float]]],\n get_edge_weight_func: GET_EDGE_WEIGHT_FUNC_TYPE\n) -> Optional[Tuple[List[E], float]]:\n if current_node == end_node:\n return [], 0.0\n alternatives = []\n for edge_id in graph.get_outputs(current_node):\n edge_weight = get_edge_weight_func(edge_id)\n child_n = graph.get_edge_to(edge_id)\n if child_n in cache:\n res = cache[child_n]\n else:\n res = find_max_path(\n graph,\n child_n,\n end_node,\n cache,\n get_edge_weight_func\n )\n cache[child_n] = res\n if res is None:\n continue\n path, weight = res\n path = [edge_id] + path\n weight = edge_weight + weight\n res = path, weight\n alternatives.append(res)\n if len(alternatives) == 0:\n return None # no path to end, so return None\n else:\n return max(alternatives, key=lambda x: x[1]) # choose path to end with max weight\n# MARKDOWN\n\n\ndef graph_to_graphviz(\n graph: Graph[N, ND, E, ED],\n get_edge_weight_func: GET_EDGE_WEIGHT_FUNC_TYPE\n) -> str:\n dot_subgraph = 'digraph {\\n'\n dot_subgraph += ' node [shape=plaintext]\\n'\n for node_id in graph.get_nodes():\n dot_subgraph += f' \"{node_id}\"\\n'\n for edge_id in graph.get_edges():\n from_id, to_id, data = graph.get_edge(edge_id)\n weight = get_edge_weight_func(data)\n dot_subgraph += f' \"{from_id}\" -> \"{to_id}\" [label=\"{weight}\"]\\n'\n dot_subgraph += '}'\n return dot_subgraph\n\n\ndef main():\n print(\"
\", end=\"\\n\\n\")\n print(\"`{bm-disable-all}`\", end=\"\\n\\n\")\n try:\n edges = [tuple(v for v in s.split()) for s in input().split(\",\")]\n nodes = {n1 for n1, _, _ in edges} | {n2 for _, n2, _ in edges}\n graph = Graph()\n for n in nodes:\n graph.insert_node(n)\n edge_id_gen_func = unique_id_generator('E')\n for n1, n2, weight in edges:\n graph.insert_edge(edge_id_gen_func(), n1, n2, float(weight))\n from_node = input()\n to_node = input()\n print(f'Given the following graph...', end=\"\\n\\n\")\n print(f'````{{dot}}\\n{graph_to_graphviz(graph, lambda e: str(e))}\\n````', end='\\n\\n')\n path, weight = find_max_path(\n graph,\n from_node,\n to_node,\n {},\n lambda edge_id: graph.get_edge_data(edge_id)\n )\n path_as_nodes = [graph.get_edge_from(path[0])] + [graph.get_edge_to(e) for e in path]\n print(f'... the path with the max weight between {from_node} and {to_node} ...', end='\\n\\n')\n print(f' * Maximum path = {\" -> \".join(path_as_nodes)}', end='\\n')\n print(f' * Maximum weight = {weight}', end='\\n')\n finally:\n print(\"
\", end=\"\\n\\n\")\n print(\"`{bm-enable-all}`\", end=\"\\n\\n\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"docs/data/learn/Bioinformatics/output/ch5_code/src/find_max_path/FindMaxPath_DPCache.py","file_name":"FindMaxPath_DPCache.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"20902755","text":"import re\n\nfrom django import forms\n\n\nFIELD_TYPES = [\n (forms.URLField, \"url\"),\n (forms.EmailField, \"email\"),\n (forms.IntegerField, \"digits\"),\n (forms.DecimalField, \"number\"),\n (forms.FloatField, \"number\"),\n]\n\n\nFIELD_ATTRS = [\n (\"min_length\", \"minlength\"),\n (\"max_length\", \"maxlength\"),\n (\"min_value\", \"min\"),\n (\"max_value\", \"max\"),\n]\n\n\ndef update_widget_attrs(field):\n attrs = field.widget.attrs\n if field.required:\n attrs[\"required\"] = \"required\"\n\n error_message = field.error_messages.get('required', None)\n if error_message:\n attrs[\"parsley-error-message\"] = error_message\n\n if isinstance(field, forms.RegexField):\n attrs.update({\"parsley-regexp\": field.regex.pattern})\n\n error_message = field.error_messages.get('invalid', None)\n if error_message:\n attrs[\"parsley-error-message\"] = error_message\n\n if field.regex.flags & re.IGNORECASE:\n attrs.update({\"data-regexp-flag\": \"i\"})\n if isinstance(field, forms.MultiValueField):\n for subfield in field.fields:\n update_widget_attrs(subfield)\n\n # Set data-* attributes for parsley based on Django field attributes\n for attr, data_attr, in FIELD_ATTRS:\n if getattr(field, attr, None):\n attrs[\"parsley-{0}\".format(data_attr)] = getattr(field, attr)\n\n error_message = field.error_messages.get(attr, None)\n if error_message:\n attrs[\"parsley-{0}-message\".format(data_attr)] = error_message\n\n # Set data-type attribute based on Django field instance type\n for klass, field_type in FIELD_TYPES:\n if isinstance(field, klass):\n attrs[\"parsley-type\"] = field_type\n\n error_message = field.error_messages.get('invalid', None)\n if error_message:\n attrs[\"parsley-type-{0}-message\".format(field_type)] = error_message\n\n\ndef parsleyfy(klass):\n \"A decorator to add data-* attributes to your form.fields\"\n old_init = klass.__init__\n\n def new_init(self, *args, **kwargs):\n old_init(self, *args, **kwargs)\n for _, field in self.fields.items():\n update_widget_attrs(field)\n extras = getattr(getattr(self, 'Meta', None), 'parsley_extras', {})\n for field_name, data in extras.items():\n for key, value in data.items():\n if field_name not in self.fields:\n continue\n attrs = self.fields[field_name].widget.attrs\n if key == 'equalto':\n # Use HTML id for data-equalto\n value = '#' + self[value].id_for_label\n if isinstance(value, bool):\n value = \"true\" if value else \"false\"\n attrs['parsley-%s' % key] = value\n klass.__init__ = new_init\n\n try:\n klass.Media.js += (\"parsley/js/parsley-standalone.min.js\",)\n except AttributeError:\n class Media:\n js = (\n \"parsley/js/parsley-standalone.min.js\",\n )\n klass.Media = Media\n\n return klass\n","sub_path":"parsley/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"262806768","text":"import copy\nfrom ast import literal_eval as create_tuple\nfrom importlib import import_module\nfrom systemapps.menu.models import Menu, MenuItem\nfrom django import template\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.cache import cache\n\nregister = template.Library()\n\n\n@register.tag(name='menu')\ndef build_menu(parser, token):\n try:\n tag_name, menu_name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"%r tag requires a single argument\" % token.contents.split()[0])\n return MenuObject(menu_name)\n\n\nclass MenuObject(template.Node):\n def __init__(self, menu_name):\n self.menu_name = menu_name\n self.request = None\n\n def get_menuitems(self):\n from django.conf import settings\n cache_time = getattr(settings, 'MENU_CACHE_TIME', 1800)\n debug = getattr(settings, 'DEBUG', False)\n cache_key = 'menu-items/{menu_name}'.format(menu_name=self.menu_name)\n\n menuitems = []\n if cache_time >= 0 and not debug:\n menuitems = cache.get(cache_key, [])\n if not menuitems:\n menuitems = MenuItem.objects.filter(menu__name=self.menu_name)\n if cache_time >= 0 and not debug:\n cache.set(cache_key, menuitems, cache_time)\n\n return menuitems\n\n def get_callable(self, func_or_path):\n # Receives a dotted path or a callable, Returns a callable or None\n if callable(func_or_path):\n return func_or_path\n module_name = '.'.join(func_or_path.split('.')[:-1])\n function_name = func_or_path.split('.')[-1]\n _module = import_module(module_name)\n func = getattr(_module, function_name)\n return func\n\n def is_validated(self, menuitem):\n try:\n validators = create_tuple(menuitem.validators)\n except (ValueError, SyntaxError):\n validators = None\n\n if not validators:\n return True\n if not isinstance(validators, (list, tuple)):\n raise ImproperlyConfigured(\"validators must be a list\")\n\n result_validations = []\n for validator in validators:\n if isinstance(validator, tuple):\n if len(validator) <= 1:\n raise ImproperlyConfigured(\"You are passing a tuple validator without args %s\" % str(validator))\n func = self.get_callable(validator[0])\n # Using a python slice to get all items after the first to build function args\n args = validator[1:]\n # Pass the request as first arg by default\n result_validations.append(func(self.request, *args))\n else:\n func = self.get_callable(validator)\n result_validations.append(func(self.request)) # pragma: no cover\n return all(result_validations)\n\n def validate_menu(self):\n # A generator thet return validated menuitem only.\n for menuitem in self.get_menuitems():\n if self.is_validated(menuitem):\n yield copy.copy(menuitem)\n\n def generate_menu(self):\n visible_menu = []\n for item in self.validate_menu():\n visible_menu.append(item)\n return visible_menu\n\n def render(self, context):\n self.request = context['request']\n context['nodes'] = self.generate_menu()\n return ''\n","sub_path":"systemapps/menu/templatetags/menu_tags.py","file_name":"menu_tags.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"571885398","text":"#! /usr/bin/python\n\n'''\nCreated on 2013-05-20\n\n@author: crblackw\n'''\n\nWRITE_TO_FILE = 0\nSHOW_PLOT= 1\n\nimport datetime as dt\nimport time\nimport pymysql as mdb\nimport numpy as np\nimport matplotlib as mpl\nif WRITE_TO_FILE:\n mpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\n\ndef main():\n\n data = []\n\n # Post to database\n con = mdb.connect(host='192.168.1.143', db='monitor', user='crblackw')\n \n with con:\n cur = con.cursor()\n #cur.execute(\"SELECT temp_actual FROM sensor1 GROUP BY HOUR(datetime) LIMIT 0, 30\")\n for x in range(1,12):\n cur.execute(\"SELECT temp_actual FROM sensor1 WHERE MONTH(datetime) = %i\" %(x))\n data.append(np.array(cur.fetchall())) \n con.close()\n \n plt.boxplot(data)\n labels = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', \n 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')\n plt.xticks(range(1,13),labels, rotation=15)\n plt.show()\n \n '''\n \n d = dt.date.today() - dt.timedelta(days=1)\n t_start = dt.time(0,0,0,0)\n t_end = dt.time(23,59,59,999999)\n d_start = dt.datetime.combine(d, t_start)\n d_end = dt.datetime.combine(d, t_end)\n \n \n \n x = data[:,0]\n y_temp = data[:,1]\n y_photo = data[:,2]\n \n x_sec = []\n \n for i in x:\n i = time.mktime(i.timetuple())\n x_sec.append(i)\n \n tck_temp = interpolate.splrep(x_sec,y_temp,s=6)\n y_temp_spline = interpolate.splev(x_sec,tck_temp,der=0)\n \n tck_photo = interpolate.splrep(x_sec,y_photo,s=0)\n y_photo_spline = interpolate.splev(x_sec,tck_photo,der=0)\n \n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n ax.set_title('Recorded Home Data for ' + d.strftime('%Y-%m-%d'))\n ax.set_xlim(d_start, d_end)\n ax.set_xlabel('Time')\n ax.set_ylabel(r'Temperature [$^\\circ$C]')\n ax.plot(x,y_temp_spline, c='tomato',label='Temperature Spline')\n ax.scatter(x,y_temp,c='black',s=1,label='Raw Temp Data')\n \n ax2 = ax.twinx()\n ax2.set_ylabel(\"Photo [unitless]\")\n ax2.set_xlim(d_start, d_end)\n \n ax2.plot(x,y_photo_spline, c='blue', label='Photo Spline')\n #ax2.scatter(x,y_photo,c='black',s=1,label='Raw Photo Data')\n \n ax.legend(loc=2)\n ax2.legend(loc=1)\n \n fig.set_size_inches(18.5,10.5)\n \n plt.draw()\n \n if WRITE_TO_FILE:\n plt.savefig('/home/crblackw/public_html/graphs/' + d.strftime('%Y-%m-%d') + '.png',dpi=100)\n \n if SHOW_PLOT:\n plt.show()\n \n '''\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Remote/graph_year_py3.py","file_name":"graph_year_py3.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"302605336","text":"import numpy as np\nimport time\nimport random\nfrom copy import deepcopy\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torch.nn import MSELoss, BCELoss\nfrom sklearn.metrics import accuracy_score\nfrom matplotlib import pyplot as plt\n\nfrom IncrementalLearning import utils\n\nclass TPCP():\n def __init__(self,memory=2000,device='cuda',params=None,plot=False):\n self.memory = memory\n self.device = device\n self.params = params\n self.plot = plot\n self.nets = []\n self.discriminator = None\n\n def __FCClassifier__(self,data,net,task,discrimination=False,print_=True):\n if print_:\n print(f'\\n ### FC Layer ###')\n if discrimination:\n print(' # FC Layer Discriminating ')\n else:\n print(' # FC Layer Predicting ')\n net.eval()\n \n running_corrects = 0.0\n label_list, predictions = [], []\n\n with torch.no_grad():\n loader = DataLoader(data, batch_size=512, shuffle=False, num_workers=4, drop_last=False)\n for images, labels in loader:\n images = images.to(self.device)\n \n if discrimination:\n labels = torch.tensor([ int(label/10) for label in labels ])\n else:\n labels = torch.tensor([ label-(task*10) for label in labels ])\n\n labels = labels.to(self.device)\n\n outputs = torch.sigmoid(net(images))\n # Get predictions\n _, preds = torch.max(outputs.data, 1)\n # Update Corrects\n running_corrects += torch.sum(preds == labels.data).data.item()\n \n for prediction,label in zip(preds,labels):\n predictions.append(np.array(prediction.cpu()))\n label_list.append(np.array(label.cpu()))\n\n # Calculate Accuracy\n accuracy = running_corrects / len(data)\n \n if print_:\n print(f' # FC Layer Accuracy: {accuracy}')\n\n return accuracy, predictions, label_list\n\n def __trainTask__(self,data,net,n_classes):\n print('Training task')\n BATCH_SIZE = self.params['BATCH_SIZE']\n MOMENTUM = self.params['MOMENTUM']\n WEIGHT_DECAY = self.params['WEIGHT_DECAY']\n EPOCHS = self.params['EPOCHS']\n LR = self.params['LR']\n milestones = set([ int(7/10*EPOCHS), int(9/10*EPOCHS) ])\n\n # Define Loss\n criterion = MSELoss() \n # Define Dataloader\n loader = DataLoader(data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)\n\n net.fc = nn.Linear(64,10)\n net = net.to(self.device)\n optimizer = torch.optim.SGD(net.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)\n \n for epoch in range(EPOCHS):\n \n # LR step down policy\n if epoch+1 in milestones:\n for g in optimizer.param_groups:\n g['lr'] = g['lr']/5\n \n # Set module in training mode\n net.train() \n\n running_loss = 0.0\n for images, labels in loader:\n # Data augmentation\n images = images.to(self.device)\n images = torch.stack([ utils.augmentation(image) for image in images ])\n # Get One Hot Encoding for the labels\n labels = torch.tensor([ label-(n_classes-10) for label in labels ])\n labels = utils.getOneHot(labels,10)\n labels = labels.to(self.device)\n\n # Zero-ing the gradients\n optimizer.zero_grad()\n # Forward pass to the network\n outputs = torch.sigmoid(net(images))\n\n # Compute Losses\n tot_loss = criterion(outputs,labels)\n \n # Update Running Loss \n running_loss += tot_loss.item() * images.size(0)\n\n tot_loss.backward() \n optimizer.step() \n\n # Train loss of current epoch\n train_loss = running_loss / len(data)\n print('\\r # Epoch: {}/{}, LR = {}, Train loss = {}'.format(epoch+1, EPOCHS, optimizer.param_groups[0]['lr'], round(train_loss,5)),end='')\n print()\n\n self.nets.append(deepcopy(net))\n\n return net\n\n def __trainDiscriminator__(self,net,exemplars,n_tasks,test_data):\n print('Training discriminator')\n BATCH_SIZE = self.params['BATCH_SIZE']\n MOMENTUM = self.params['MOMENTUM']\n WEIGHT_DECAY = self.params['WEIGHT_DECAY']\n EPOCHS = self.params['EPOCHS2']\n LR = self.params['LR2']\n milestones = set([ int(7/10*EPOCHS), int(9/10*EPOCHS) ])\n\n data = self.__formatExemplars__(exemplars)\n\n # Define Loss\n criterion = MSELoss() \n # Define Dataloader\n loader = DataLoader(data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)\n\n net.fc = nn.Linear(64,n_tasks)\n\n net = net.to(self.device)\n optimizer = torch.optim.SGD(net.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)\n\n for epoch in range(EPOCHS):\n \n # LR step down policy\n if epoch+1 in milestones:\n for g in optimizer.param_groups:\n g['lr'] = g['lr']/5\n \n # Set module in training mode\n net.train() \n\n running_loss = 0.0\n for images, labels in loader:\n # Data augmentation\n images = images.to(self.device)\n images = torch.stack([ utils.augmentation(image) for image in images ])\n # Get One Hot Encoding for the labels\n labels = torch.tensor([ int(label/10) for label in labels ])\n labels = utils.getOneHot(labels,n_tasks)\n labels = labels.to(self.device)\n\n # Zero-ing the gradients\n optimizer.zero_grad()\n # Forward pass to the network\n outputs = torch.sigmoid(net(images))\n\n # Compute Losses\n tot_loss = criterion(outputs,labels)\n \n # Update Running Loss \n running_loss += tot_loss.item() * images.size(0)\n\n tot_loss.backward() \n optimizer.step() \n\n # Train loss of current epoch\n train_loss = running_loss / len(data)\n acc, _, _ = self.__FCClassifier__(test_data,net,n_tasks,True,False)\n\n print('\\r # Epoch: {}/{}, LR = {}, Train loss = {}, Test accuracy = {}'.format(epoch+1, EPOCHS, optimizer.param_groups[0]['lr'], round(train_loss,5), round(acc,5)),end='')\n print()\n\n return net\n\n def __randomExemplarSet__(self,data,n_classes):\n print('\\n ### Construct Random Exemplar Set ###')\n if n_classes != 10:\n m = int(self.memory/(n_classes-10))\n else:\n m = int(self.memory/(n_classes))\n print(f' # Exemplars per class: {m}')\n\n # Initialize lists of images and exemplars for each class\n class_map = utils.fillClassMap(data,n_classes)\n exemplars = dict.fromkeys(np.arange(n_classes-10,n_classes))\n for label in exemplars:\n exemplars[label] = []\n\n for label in class_map:\n indexes = random.sample(range(len(class_map[label])),m) \n for idx in indexes:\n exemplars[label].append(class_map[label][idx])\n\n return exemplars\n \n def __reduceExemplarSet__(self,exemplars,n_classes):\n print('\\n ### Reduce Exemplar Set ###')\n m = int(self.memory/n_classes)\n print(f' # Exemplars per class: {m}')\n for key in exemplars:\n exemplars[key] = exemplars[key][:m]\n \n return exemplars\n \n # dict to list\n def __formatExemplars__(self,exemplars):\n new_exemplars = []\n for key in exemplars:\n for item in exemplars[key]:\n new_exemplars.append([item[0],item[1]])\n\n return new_exemplars\n\n def run(self,train_batches,test_batches,net,net2):\n t0 = time.time()\n exemplars = {}\n accuracy_per_batch = []\n \n for idx, batch in enumerate(train_batches):\n print(f'\\n##### BATCH {idx+1} #####')\n n_classes = (idx+1)*10\n \n new_exemplars = self.__randomExemplarSet__(batch,n_classes)\n exemplars.update(new_exemplars)\n utils.printTime(t0)\n\n if idx != 0:\n self.discriminator = self.__trainDiscriminator__(self.discriminator,exemplars,idx+1,test_batches[idx])\n _, tasks, _ = self.__FCClassifier__(test_batches[idx],self.discriminator,n_classes,True)\n \n # Update Representation\n #net = self.__trainTask__(batch,net,n_classes)\n utils.printTime(t0)\n\n if idx == 0:\n self.discriminator = self.__trainTask__(batch,net,n_classes)\n \n # Classifier\n '''\n if idx != 0:\n dictionary = dict.fromkeys([i for i in range(idx+1)])\n\n for key in dictionary:\n dictionary[key] = []\n\n for item, task in zip(test_batches[idx], tasks):\n print(task)\n dictionary[int(task)].append(item)\n\n tot_acc = 0.0\n\n for task in dictionary:\n\n acc, _, _ = self.__FCClassifier__(dictionary[task],self.nets[task],task,False)\n tot_acc += acc\n\n print(f\"Total Accuracy: {tot_acc/(idx+1)}\")\n '''\n \n # Exemplars managing\n exemplars = self.__reduceExemplarSet__(exemplars,n_classes)\n utils.printTime(t0)\n\n return accuracy_per_batch\n\n","sub_path":"TPCP.py","file_name":"TPCP.py","file_ext":"py","file_size_in_byte":9372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"377770323","text":"# coding=utf-8\r\nfrom gfirefly.server.globalobject import webserviceHandle\r\nfrom flask import request\r\nfrom define import *\r\nimport user\r\nimport json\r\nimport jsonmanager\r\nfrom glb import strLen\r\n\r\n@webserviceHandle('/test',methods=['GET', 'POST'])\r\ndef test():\r\n '''测试案例\r\n '''\r\n if request.method == 'POST':\r\n return request.files['id']\r\n else:\r\n print(request.args)\r\n return \"hello,test.\"\r\n\r\n#用户注册\r\n#用户名,昵称,密码\r\n@webserviceHandle('/userregister',methods=['GET'])\r\ndef userregister():\r\n '''用户注册\r\n '''\r\n if request.args.has_key('name') and request.args.has_key('nickname') and request.args.has_key('pw'):\r\n name = request.args['name']\r\n if strLen(name) < 6:\r\n rt = dict(code=ZERO_USERNAMELEN_ERROR)\r\n return json.dumps(rt)\r\n nickname = request.args['nickname']\r\n if strLen(nickname) < 6:\r\n rt = dict(code=ZERO_USERNICKNAMELEN_ERROR)\r\n return json.dumps(rt)\r\n pw = request.args['pw']\r\n if strLen(pw) < 8:\r\n rt = dict(code=ZERO_USERNICKNAMELEN_ERROR)\r\n return json.dumps(rt)\r\n return user.createuser(name,nickname,pw)\r\n else:\r\n rt = dict(code=ZERO_DATA_ERROR)\r\n return json.dumps(rt)\r\n\r\n#用户登录\r\n#用户名,密码,token\r\n#用户名和密码可以正常登陆或者用户名和正确的token也可以登陆\r\n@webserviceHandle('/userlogin',methods=['GET'])\r\ndef userlogin():\r\n '''用户登陆\r\n '''\r\n if request.args.has_key('name'):\r\n name = request.args['name']\r\n pw = ''\r\n token = ''\r\n if request.args.has_key('pw'):\r\n pw = request.args['pw']\r\n if request.args.has_key('token'):\r\n token = request.args['token']\r\n return user.login(name,pw,token)\r\n else:\r\n rt = dict(code=ZERO_DATA_ERROR)\r\n return json.dumps(rt)\r\n\r\n#获取表格数据\r\n@webserviceHandle('/table',methods=['GET'])\r\ndef table():\r\n '''获取表格数据\r\n '''\r\n if request.args.has_key('name'):\r\n name = request.args['name']\r\n return jsonmanager.loadJsonFile(name)\r\n else:\r\n rt = dict(code=ZERO_FILENAME_ERROR)\r\n return json.dumps(rt)\r\n\r\n\r\n","sub_path":"app/webtest.py","file_name":"webtest.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"446515905","text":"from matplotlib import pyplot as pl\nfrom matplotlib import style\nfrom matplotlib.ticker import MultipleLocator\nimport matplotlib.patheffects as path_effects\nfrom K2fov import plot\n\nfrom fieldplot import annotate_target\n\nCAMPAIGN = 16\n\nstyle.use('gray.mplstyle')\np = plot.K2FootprintPlot(figsize=(11, 11))\n#p.plot_ecliptic()\np.plot_campaign(5, annotate_channels=False, facecolor='#aaaaaa', lw=0)\np.plot_campaign(CAMPAIGN, annotate_channels=False, facecolor='white', lw=1)\n\npl.annotate('C5', xy=(127, 24), xycoords='data', ha='center',\n xytext=(0, 40), textcoords='offset points',\n size=30, zorder=99999, color='#aaaaaa',\n arrowprops=dict(arrowstyle=\"simple\",\n fc=\"#aaaaaa\", ec=\"none\",\n connectionstyle=\"arc3,rad=0.0\"),\n )\n\nannotate_target(127.578771, +22.235908, \"K2-34\")\n\n\ntext = pl.text(240.2, -23.2, 'Upper Sco', style='italic',\n zorder=999, fontsize=22, va='center', ha='center')\ntext.set_path_effects([path_effects.Stroke(linewidth=4, foreground='white'),\n path_effects.Normal()])\n\n#pl.scatter(132.8250, +11.8000, zorder=899, marker='o', lw=2, s=800,\n# facecolor='black', alpha=0.5, edgecolor='None', label='M67')\n#text = pl.text(132.8250 - 0.5, +11.8000, 'M67', zorder=999,\n# fontsize=22, va='center', color='black')\n#text.set_path_effects([path_effects.Stroke(linewidth=4, foreground='white'),\n# path_effects.Normal()])\n\n#pl.scatter(130.1000, +19.6667, zorder=899, marker='o', lw=2, s=3500,\n# facecolor='black', alpha=0.5, edgecolor='None', label='Beehive')\n#text = pl.text(130.1000 - 0.8, +19.6667, 'M44 (Beehive)', zorder=999,\n# fontsize=22, va='center', color='black')\n#text.set_path_effects([path_effects.Stroke(linewidth=4, foreground='white'),\n# path_effects.Normal()])\n\n# Plot the Beehive cluster\nimport pandas as pd\ndf = pd.read_csv('catalogs/beehive.csv')\nfor member in df.iterrows():\n annotate_target(member[1].RA_d, member[1].DEC_d, \"\", size=10, color='#c0392b')\ntext = pl.text(130., 20.5, 'M44 (Beehive)', style='italic', color='black',\n zorder=999, fontsize=30, va='center', ha='center')\ntext.set_path_effects([path_effects.Stroke(linewidth=4, foreground='white'),\n path_effects.Normal()])\n\n# Plot the M67 cluster\ndf = pd.read_csv('catalogs/m67.csv')\nfor member in df.iterrows():\n annotate_target(member[1].RA_d, member[1].DEC_d, \"\", size=10, color='#c0392b')\ntext = pl.text(132.8250 - 0.6, +11.8000, 'M67', style='italic', color='black',\n zorder=999, fontsize=30, va='center')\ntext.set_path_effects([path_effects.Stroke(linewidth=4, foreground='white'),\n path_effects.Normal()])\n\n# Earth\nra = [136.64098, 137.61928, 138.59537, 139.56929, 140.54105,\n 141.51069, 142.47824, 143.44371, 144.40715, 159.56868]\ndec = [16.84930, 16.56961, 16.28518, 15.99608, 15.70240,\n 15.40423, 15.10166, 14.79476, 14.48364, 9.00822]\npl.plot(ra, dec, lw=5, zorder=500, c='#2980b9', ls='dashed')\ntext = pl.text(139, 15.8, 'Earth', zorder=999, style='italic',\n fontsize=24, va='center', color='#2980b9')\ntext.set_path_effects([path_effects.Stroke(linewidth=4, foreground='white'),\n path_effects.Normal()])\n\n\n\npl.suptitle('K2 Campaign {}'.format(CAMPAIGN), fontsize=44)\npl.xlim([142, 125.5])\npl.ylim([11.3, 27])\np.ax.xaxis.set_major_locator(MultipleLocator(2))\np.ax.yaxis.set_major_locator(MultipleLocator(2))\npl.tight_layout()\nfor extension in ['png', 'eps']:\n pl.savefig('k2-c{}-field.{}'.format(CAMPAIGN, extension), dpi=100)\npl.close()\n","sub_path":"plot-c16.py","file_name":"plot-c16.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"417582705","text":"# Definition for a binary tree node.\nimport json\n\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def delNodes(self, root, to_delete):\n \"\"\"\n :type root: TreeNode\n :type to_delete: List[int]\n :rtype: List[TreeNode]\n \"\"\"\n result = []\n to_delete_dic = set(to_delete)\n if root and root.val not in to_delete_dic:\n result.append(root)\n self._del(root, to_delete_dic, result)\n return result\n\n def _del(self, node, dic, result):\n if not node:\n return None\n node.left = self._del(node.left, dic, result)\n node.right = self._del(node.right, dic, result)\n if node.val in dic:\n if node.left:\n result.append(node.left)\n if node.right:\n result.append(node.right)\n return None\n return node\n\ndef stringToTreeNode(input):\n input = input.strip()\n input = input[1:-1]\n if not input:\n return None\n\n inputValues = [s.strip() for s in input.split(',')]\n root = TreeNode(int(inputValues[0]))\n nodeQueue = [root]\n front = 0\n index = 1\n while index < len(inputValues):\n node = nodeQueue[front]\n front = front + 1\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n leftNumber = int(item)\n node.left = TreeNode(leftNumber)\n nodeQueue.append(node.left)\n\n if index >= len(inputValues):\n break\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n rightNumber = int(item)\n node.right = TreeNode(rightNumber)\n nodeQueue.append(node.right)\n return root\n\ndef stringToIntegerList(input):\n return json.loads(input)\n\ndef treeNodeToString(root):\n if not root:\n return \"[]\"\n output = \"\"\n queue = [root]\n current = 0\n while current != len(queue):\n node = queue[current]\n current = current + 1\n\n if not node:\n output += \"null, \"\n continue\n\n output += str(node.val) + \", \"\n queue.append(node.left)\n queue.append(node.right)\n return \"[\" + output[:-2] + \"]\"\n\ndef treeNodeArrayToString(treeNodeArray):\n serializedTreeNodes = []\n for treeNode in treeNodeArray:\n serializedTreeNode = treeNodeToString(treeNode)\n serializedTreeNodes.append(serializedTreeNode)\n return \"[{}]\".format(', '.join(serializedTreeNodes))\n\nroot = stringToTreeNode(\"[1,2,3,4,5,6,7]\")\nres = Solution().delNodes(root, [3, 5])\nprint(res)","sub_path":"src2/delete-nodes-and-return-forest/s.py","file_name":"s.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"7280194","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef fib(n): #Returns the list with the Fibonacci series up to n.\n \"\"\" Returns the list with the Fibonacci series up to n. \"\"\"\n retval=[]\n a, b = 0, 1\n while a= self._failure_threshold:\n self._state = STATE_OPEN\n self._opened = datetime.utcnow()\n logger.debug(\"call_failed-Open circuit \" + str(self.name)+ \" : \"+self._uuid)\n\n @property\n def state(self):\n if self._state == STATE_OPEN and self.open_remaining <= 0:\n return STATE_HALF_OPEN\n return self._state\n\n @property\n def open_until(self):\n \"\"\"\n The datetime, when the circuit breaker will try to recover\n :return: datetime\n \"\"\"\n return self._opened + timedelta(seconds=self._recovery_timeout)\n\n @property\n def open_remaining(self):\n \"\"\"\n Number of seconds remaining, the circuit breaker stays in OPEN state\n :return: int\n \"\"\"\n return (self.open_until - datetime.utcnow()).total_seconds()\n\n @property\n def failure_count(self):\n return self._failure_count\n\n @property\n def closed(self):\n return self.state == STATE_CLOSED\n\n @property\n def opened(self):\n return self.state == STATE_OPEN\n\n @property\n def name(self):\n return self._name\n\n def __str__(self, *args, **kwargs):\n return self._name\n\n\nclass CircuitBreakerError(ApiError):\n def __init__(self, circuit_breaker, *args, **kwargs):\n \"\"\"\n :param circuit_breaker:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n super(CircuitBreakerError, self).__init__(\"msg\",*args, **kwargs)\n self._circuit_breaker = circuit_breaker\n\n def __str__(self, *args, **kwargs):\n return 'Circuit \"%s\" OPEN until %s (%d failures, %d sec remaining)' % (\n self._circuit_breaker.name,\n self._circuit_breaker.open_until,\n self._circuit_breaker.failure_count,\n round(self._circuit_breaker.open_remaining)\n )\n\n\nclass CircuitBreakerMonitor(AbsBaseClass):\n circuit_breakers = {}\n\n @classmethod\n def register(cls, circuit_breaker):\n if circuit_breaker.name not in cls.circuit_breakers:\n sem.acquire()\n cls.circuit_breakers[circuit_breaker.name] = circuit_breaker\n sem.release()\n\n @classmethod\n def all_closed(cls):\n # type: () -> bool\n return len(list(cls.get_open())) == 0\n\n @classmethod\n def get_circuits(cls):\n # type: () -> Iterable[CircuitBreaker]\n return cls.circuit_breakers.values()\n\n @classmethod\n def get(cls, name):\n # type: (AnyStr) -> CircuitBreaker\n return cls.circuit_breakers.get(name)\n\n @classmethod\n def get_open(cls):\n # type: () -> Iterable[CircuitBreaker]\n for circuit in cls.get_circuits():\n if circuit.opened:\n yield circuit\n\n @classmethod\n def get_closed(cls):\n # type: () -> Iterable[CircuitBreaker]\n for circuit in cls.get_circuits():\n if circuit.closed:\n yield circuit\n\n\ndef circuit(failure_threshold=None,\n recovery_timeout=None,\n expected_exception=None,\n name=None,\n cls=CircuitBreaker):\n\n # if the decorator is used without parameters, the\n # wrapped function is provided as first argument\n if callable(failure_threshold):\n return cls().decorate(failure_threshold)\n else:\n return cls(\n failure_threshold=failure_threshold,\n recovery_timeout=recovery_timeout,\n expected_exception=expected_exception,\n name=name)\n","sub_path":"halo_flask/circuitbreaker.py","file_name":"circuitbreaker.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"270352993","text":"import pandas as pd\r\n\r\n\r\ndef clean_file(r_file, i_w_file):\r\n cleaned_data = pd.read_csv(r_file)\r\n # Fills in blank cells with interpolated values\r\n cleaned_data = cleaned_data.interpolate(method='linear').round(decimals=1)\r\n # Save interpolated data to new file\r\n cleaned_data.to_csv(path_or_buf=i_w_file, index=False)\r\n\r\n print(\"\\nData successfully interpolated!\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n original_file = input(\"Enter file path: \")\r\n read_file = original_file.replace('\\\\', '/')\r\n read_file = read_file.replace('\"', '')\r\n\r\n end_name_index = read_file.find('.csv')\r\n interpolated_write_file = read_file[:end_name_index] + '_interpolated' + read_file[end_name_index:]\r\n\r\n print(\"\\nInput file name: \", original_file)\r\n print(\"Interpolated file name: \", interpolated_write_file.replace(\"/\", \"\\\\\"))\r\n\r\n clean_file(read_file, interpolated_write_file)\r\n","sub_path":"Code/Data Analysis/interpolate_data.py","file_name":"interpolate_data.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"554352980","text":"import time\nfrom datetime import datetime as dt\n\ndef block_during_hours(websites: list, start_hour: int, end_hour):\n hosts_path = r\"/etc/hosts\"\n hosts_temp = \"hosts\"\n redirect = \"127.0.0.1\"\n while True:\n if dt(dt.now().year, dt.now().month, dt.now().day, start_hour) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, end_hour):\n with open(hosts_path, \"r+\") as file:\n content = file.read()\n for website in websites:\n if website in content:\n pass\n else:\n file.write(redirect+\" \"+website+\"\\n\")\n else:\n with open(hosts_path, \"r+\") as file:\n content = file.readlines()\n file.seek(0)\n for line in content:\n if not any(website in line for website in web_sites_list):\n file.write(line)\n file.truncate()\n time.sleep(5)\n\nif __name__ == \"__main__\":\n block_during_hours([\"facebook.com\", \"instagram.com\"], 9, 14)\n","sub_path":"website_blocker.py","file_name":"website_blocker.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27166353","text":"import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\nsoldiers = list(map(int, input().split()))\nsoldiers.reverse()\n\n#dp[i] : array[i]를 마지막 원소로 가지는 부분 수열의 최대 길이\ndp = [1]*(N+1)\n\n#LIS 알고리즘\nfor i in range(1, N):\n for j in range(0, i):\n if soldiers[j] < soldiers[i]:\n dp[i] = max(dp[i], dp[j]+1)\n\nprint(N-max(dp))","sub_path":"dp/8#4_병사배치하기.py","file_name":"8#4_병사배치하기.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"41914113","text":"#!/usr/bin/env python3\n\n###################\n# [연습문제 08-2] #\n###################\n\n\n# 1. 6 과 45 의 최소공배수를 구하는 코드를 while-loop 기반으로 작성하라. 참고로 6 으로도 나누어지고 45 로도 나누어지는 값들 중에서 제일 작은 값이 '최소공배수' 이다. 따라서 45 부터 시작해서 값을 1 씩 증가시켜가면서 6 과 45 로 나누어 떨어지는 첫 번째 값을 찾으면 된다. \n\"\"\"\n >> lcm = 0 # 변수 lcm 을 선언하고, 그리고 필요하면 변수를 추가로 선언\n >> # 변수 lcm 에 최소공배수를 찾아서 저장하는 코드를 구성\n ....\n >> lcm # 변수 lcmdp 저장된 값을 출력한다. (90 이 출력되어야 정답.)\n 90\n\n\"\"\"\n\ndef ex082_1():\n lcm = 0\n while True:\n lcm += 1\n if lcm % 6 == 0 and lcm % 45 == 0:\n break\n print(lcm)\n\nex082_1()\n\n\n\n\n\n\n\n# 2. 42 와 120 의 최대공약수를 구하는 코드를 while-loop 기반으로 작성하라. 참고로 42 도 나눌 수 있고 120 도 나눌 수 있는 값들 중에서 가장 큰 값이 '최대공약수' 이다. 따라서, 42 부터 시작해서 값을 1 씩 감소시켜가면서 42 와 120 을 나눌 수 있는 값을 찾으면 된다.\n\"\"\"\n>> gcm = 0 # 변수 gcm 을 선언하고, 그리고 필요하면 변수를 추가로 선언하고\n>> # 변수 gcm 에 최대공약수를 찾아서 저장하는 코드를 구성하고\n....\n>> gcm # 변수 gcm 에 저장된 값을 출력한다. (6 이 출력되어야 정답.)\n6\n\n\"\"\"\n\ndef ex082_2():\n gcm = 42\n while True:\n gcm -= 1\n if 120 % gcm == 0 and 42 % gcm == 0:\n break\n print(gcm)\n\nex082_2()\n \n","sub_path":"EX_08-2.py","file_name":"EX_08-2.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"528086190","text":"# Copyright (c) 2012-2016 The University of Edinburgh\n# Copyright (c) 2017 Michael Koutroumpas\n# Copyright (c) 2018 Geo Smart Decisions\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice, this\n# list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n# * Neither the name of the copyright holders nor the names of its contributors may be used to\n# endorse or promote products derived from this software without specific prior\n# written permission.\n# THIS SOFTWARE IS PROVIDED BY THE ABOVE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT\n# SHALL EDINA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\n# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n# DAMAGE.\n\n\"\"\" Datacube processing functions\n\"\"\"\nfrom backend import logtool\nfrom backend.helper import isdate\n\nlog = logtool.getLogger(\"db\", \"datacube_precesses\")\n# Make this configurable once the user can dymanically generate one's own datasets\n\n# Accelerated LS8 ingested from S3 -- R,G,B,NIR\nLS8 = {\n 'env' : None,\n 'product' : 'ls8_rgb_ndvi'\n}\n\n# Raw BOA S2 L2A from disk -- all bands, 2 years, less coverage\nL2A = {\n 'env' : 'l2a_safe',\n 'product' : 's2a_sen2cor_granule'\n}\n\n# Raw TOA S2 L1C from disk -- all bands, all years, full coverage\nL1C = {\n 'env' : 'l1c_safe',\n 'product' : 's2a_level1c_granule'\n # 'product' : 's2b_level1c_granule'\n}\n\nDATASET = L2A\n\ndef execute(params, fp=None):\n \"\"\"Handles REST parameters and dispatches the right function\n :param Dictionary params: request parameters\n :param file object params: optional file object to save plots are other bulky files\n :return: raw HTTP response (json or image/*)\n \"\"\"\n if not ('selection' in params) or not ('type' in params):\n return error(\"Both selection [line|rectangle] and type\"\n \" [ndvi_transect|ndvi_time_series|time_series|...] need to be defined.\")\n if params['selection'] == 'line':\n res = line(params, fp)\n if params['selection'] == 'rectangle':\n res = rectangle(params, fp)\n return res\n\n\ndef line(params, fp=None):\n \"\"\"Handles line-based algorithms e.g. ndvi_transect and dispatches\n the right function.\n :param Dictionary params: dictionary with xmin,xmax,ymin,ymax,type\n :param file object params: optional file object to save plots are other bulky files\n :return: raw HTTP response (json or image/*)\n\n NOTE: line can potentially be any multipoint LINESTRING but we\n start with xmin,xmax,ymin,ymax\n \"\"\"\n from datacube.utils import geometry\n\n if not ('xmin' in params) or not ('xmax' in params) \\\n or not ('ymin' in params) or not ('ymax' in params):\n return error(\"Line requires xmin,xmax,ymin,ymax\")\n xmin = float(params[\"xmin\"])\n xmax = float(params[\"xmax\"])\n ymin = float(params[\"ymin\"])\n ymax = float(params[\"ymax\"])\n # NOTE: you can use a line with multiple points here\n line = geometry.line([(xmin, ymin), (xmax, ymax)], 'EPSG:4326')\n if(line.type != 'LineString'):\n return error(\"ndvi_transect: line not LINESTRING\")\n\n query = {\n 'geopolygon': line\n }\n if ('time_begin' in params) and ('time_end' in params):\n if (not isdate(params['time_begin'])) or (not isdate(params['time_end'])):\n return error(\"Invalid time specified\")\n query['time'] = (params['time_begin'], params['time_end'])\n if params['type'] == 'ndvi_transect':\n if not fp:\n return error(\"ndvi_transet needs a pre-allocated file\")\n return ndvi_transect(query, fp)\n else:\n return error(\"Supported line-processing types: ndvi_transect\")\n\n\ndef rectangle(params, fp=None):\n \"\"\"Handles rectangle-based algorithms e.g. ndvi_time_series and dispatches\n the right function.\n :param Dictionary params: request parameters\n :param file object params: optional file object to save plots are other bulky files\n :return: raw HTTP response (json or image/*)\n \"\"\"\n if not ('xmin' in params) or not ('xmax' in params) \\\n or not ('ymin' in params) or not ('ymax' in params):\n return error(\"Rectangle requires xmin,xmax,ymin,ymax\")\n xmin = float(params[\"xmin\"])\n xmax = float(params[\"xmax\"])\n ymin = float(params[\"ymin\"])\n ymax = float(params[\"ymax\"])\n query = {\n 'x' : (xmin, xmax),\n 'y' : (ymin, ymax)\n }\n if ('time_begin' in params) and ('time_end' in params):\n if (not isdate(params['time_begin'])) or (not isdate(params['time_end'])):\n return error(\"Invalid time specified\")\n query['time'] = (params['time_begin'], params['time_end'])\n \n if not fp:\n return error(\"A pre-allocated file is currently mandatory for all operations\")\n\n if params['type'] == 'ndvi_time_series':\n return ndvi_time_series(query, fp, std_dev=False)\n elif params['type'] == 'ndvi_std_dev':\n return ndvi_time_series(query, fp, std_dev=True)\n elif params['type'] == 'time_series':\n query[\"measurements\"] = ['B04_10m', 'B03_10m', 'B02_10m']\n return time_series(query, fp)\n elif params['type'] == 'colour_infrared':\n query[\"measurements\"] = ['B08_10m', 'B04_10m', 'B03_10m']\n return time_series(query, fp)\n elif params['type'] == 'colour_urban':\n query[\"measurements\"] = ['B12_20m', 'B11_20m', 'B04_20m']\n return time_series(query, fp)\n #elif params['type'] == 'swir':\n # query[\"measurements\"] = ['B12_20m', 'B8A_20m', 'B04_20m']\n # return time_series(query, fp)\n # Crops:bright green, non-crop vegetation: darker green, bare earth: shades of pink\n #elif params['type'] == 'agriculture':\n # query[\"measurements\"] = ['B11_20m', 'B8A_20m', 'B02_20m']\n # return time_series(query, fp)\n # elif params['type'] == 'healthy_vegetation':\n # query[\"measurements\"] = ['B8A_20m', 'B11_20m', 'B02_20m']\n # return time_series(query, fp)\n # elif params['type'] == 'atmo_penetration':\n # query[\"measurements\"] = ['B12_20m', 'B11_20m', 'B8A_20m']\n # return time_series(query, fp)\n # elif params['type'] == 'land_water':\n # query[\"measurements\"] = ['B8A_20m', 'B11_20m', 'B04_20m']\n # return time_series(query, fp)\n # elif params['type'] == 'scene_classifier':\n # return l2a_scene_classifier(query, fp)\n else:\n return error(\"Please use a supported rectangle-processing type e.g. ndvi_time_series, colour_infrared etc.\")\n\ndef time_series(query, fp):\n \"\"\"Returns muliple images with R,G,B values mapped to measurements parameter\n :param dict query: x (or longitude), y (or latitude), time\n :param file object params: optional file object to save plots are other bulky files\n :return: raw HTTP response (json or image/*)\n \"\"\"\n # keep those imports here to avoid breaking the rest of the file when these\n # libraries do not exist\n import matplotlib\n # pyplot will dry to plot on an X11 Display without this:\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import datacube\n from datacube.storage.masking import mask_invalid_data\n \n\n if 'granule' in DATASET['product']:\n query['resolution'] = (-0.000135, 0.000135)\n query['output_crs'] = 'EPSG:4326'\n \n dc = datacube.Datacube(env=DATASET['env'], app=\"ndvi_time_series\")\n\n data = dc.load(DATASET['product'], **query)\n data = mask_invalid_data(data)\n rgb = data.to_array(dim='color')\n fake_saturation = 4000\n rgb = rgb.transpose(*(rgb.dims[1:]+rgb.dims[:1])) # make 'color' the last dimension\n rgb = rgb.where((rgb <= fake_saturation).all(dim='color')) # mask out pixels where any band is 'saturated'\n rgb /= fake_saturation # scale to [0, 1] range for imshow\n try:\n rgb.plot.imshow(x=data.crs.dimensions[1], y=data.crs.dimensions[0],col='time', col_wrap=5)\n except Exception as err:\n return error(\"Plotting failed: {}\".format(err))\n ############################\n # save to supplied file object:\n plt.savefig(fp, dpi=150, format='jpg')\n plt.gcf().clear() # clear figure instead of combining new images with old\n size = fp.tell()\n return {'error': 0, 'mimetype': 'image/jpg', 'size': size}\n\n\ndef l2a_scene_classifier(query, fp):\n \"\"\"Return a plot a scene classification according to SCL band in Sentinel2 L2A\n :param dict query: x (or longitude), y (or latitude), time\n :param file object params: optional file object to save plots are other bulky files\n :return: raw HTTP response (json or image/*)\n \"\"\"\n # keep those imports here to avoid breaking the rest of the file when these\n # libraries do not exist\n import matplotlib\n # pyplot will dry to plot on an X11 Display without this:\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import datacube\n from datacube.storage.masking import mask_invalid_data\n \n\n if 'granule' in DATASET['product']:\n query['resolution'] = (-0.000135, 0.000135)\n query['output_crs'] = 'EPSG:4326'\n \n dc = datacube.Datacube(env=DATASET['env'], app=\"S2_scene_classifier\")\n\n data = dc.load(DATASET['product'], measurements=['SCL_20m'], **query)\n scl = data.SCL_20m\n colors=[\"xkcd:charcoal\", \"#3498db\", \"xkcd:green\", \"xkcd:white\", \"xkcd:dark green\", \"xkcd:white\", \"xkcd:deep blue\", \"xkcd:dark brown\",\"xkcd:white\",\"xkcd:white\"]\n \n try:\n scl.plot(col='time', col_wrap=3, levels=[0,1,2,3,4,5,6,7,8,9,10], colors = colors)\n except Exception as err:\n return error(\"Plotting failed: {}\".format(err))\n ############################\n # save to supplied file object:\n plt.savefig(fp, dpi=150, format='jpg')\n plt.gcf().clear() # clear figure instead of combining new images with old\n size = fp.tell()\n return {'error': 0, 'mimetype': 'image/jpg', 'size': size}\n\n\ndef ndvi_transect(query, fp):\n \"\"\"Return ndvi_transect as image\n :param dict query: x (or longitude), y (or latitude), time\n :param File fp: file pointer to save resulting plot\n :return: raw HTTP response (json on success or image on error)\n\n NOTE: line can potentially be any multipoint LINESTRING but we\n start with xmin,xmax,ymin,ymax\n \"\"\"\n # keep those imports here to avoid breaking the rest of the file when these\n # libraries do not exist\n import matplotlib\n # pyplot will dry to plot on an X11 Display without this:\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import numpy as np\n import xarray\n import datacube\n\n line = query['geopolygon']\n \n query.update({\n 'resolution': (-0.00027, 0.00027), # ADJUST THIS FOR QUALITY (in WPS84 degrees) -- can run out of memory ;)\n 'output_crs': 'EPSG:4326'\n })\n \n dc = datacube.Datacube(env=DATASET['env'], app='ndvi-transect')\n nired = dc.load(product='safe_10m', measurements=['B04_10m', 'B08_10m'],\n group_by='solar_day', **query)\n # Return error message if we find no data instead of crashing\n if (len(nired.data_vars) == 0):\n error(\"Didn't find any index/ingested data in selected area at {}\".format(DATASET))\n #### interpolation -- calculate all values along the line according to resolution\n try:\n resolution = abs(nired.affine.a)\n line = line.to_crs(nired.crs)\n dist = np.arange(0, line.length, resolution)\n points = [line.interpolate(d).coords[0] for d in dist]\n indexers = {\n nired.crs.dimensions[0]: [p[1] for p in points],\n nired.crs.dimensions[1]: [p[0] for p in points]\n }\n trans = nired.sel_points(xarray.DataArray(dist, name='distance', dims=['distance']),\n method='nearest',\n tolerance=None,\n **indexers)\n ####\n nir = trans.B08_10m.where(trans.B08_10m != trans.B08_10m.attrs['nodata'])\n red = trans.B04_10m.where(trans.B04_10m != trans.B04_10m.attrs['nodata'])\n ndvi = ((nir - red) / (nir + red))\n # For transect just choose cloud-free images for now\n #good_data = datacube.storage.masking.make_mask(trans.quality, cloud=0)\n # drop N/A values\n #ndvi_cloud_free = ndvi.where(good_data).dropna('time', how='all')\n #ndvi_cloud_free.plot()\n #ndvi.plot()\n # reverse Y,X and use custom cmap:\n ndvi.plot(x='distance', y='time', cmap='RdYlGn')\n except Exception as err:\n return error(\"Plotting failed: {}\".format(err))\n ############################\n # save to supplied file object:\n plt.savefig(fp, dpi=150, format='jpg')\n plt.gcf().clear() # clear figure instead of combining new images with old\n size = fp.tell()\n return {'error': 0, 'mimetype': 'image/jpg', 'size': size}\n\n\ndef ndvi_time_series(query, fp, std_dev=False):\n \"\"\"Return ndvi time series as a big image of smaller images\n :param dict query: x (or longitude), y (or latitude), time\n :param File fp: file pointer to save resulting plot\n :param std_dev Bool: If True then plot ndvi std deviation over period\n :return: raw HTTP response (json on success or image on error)\n \"\"\"\n # keep those imports here to avoid breaking the rest of the file when these\n # libraries do not exist\n import matplotlib\n # pyplot will dry to plot on an X11 Display without this:\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import datacube\n \n if 'granule' in DATASET['product']:\n query['resolution'] = (-0.000135, 0.000135)\n query['output_crs'] = 'EPSG:4326'\n \n dc = datacube.Datacube(env=DATASET['env'], app=\"ndvi_time_series\")\n nired = dc.load(product=DATASET['product'], measurements=['B04_20m', 'B8A_20m', 'SCL_20m'], group_by='solar_day', **query)\n nir = nired.B8A_20m.where(nired.B8A_20m != nired.B8A_20m.attrs['nodata'])\n red = nired.B04_20m.where(nired.B04_20m != nired.B04_20m.attrs['nodata'])\n ndvi = ((nir - red) / (nir + red))\n\n # Return error message if we find no data instead of crashing\n if (len(nired.data_vars) == 0):\n error(\"Didn't find any index/ingested data in selected area at {}\".format(DATASET))\n try:\n cloud = datacube.storage.masking.make_mask(nired.SCL_20m, sca=\"snow\")\n ndvi_cloud_free = ndvi.where(~cloud).dropna('time', how='all')\n if ( std_dev ):\n ndvi_cloud_free.std(dim='time').plot()\n else:\n ndvi_cloud_free.plot(col='time', col_wrap=3, cmap='RdYlGn')\n except Exception as err:\n return error(\"Plotting failed: {}\".format(err))\n ############################\n # save to supplied file object:\n plt.savefig(fp, dpi=150, format='jpg')\n plt.gcf().clear() # clear figure instead of combining new images with old\n size = fp.tell()\n return {'error': 0, 'mimetype': 'image/jpg', 'size': size}\n\n\ndef error(message):\n return {\"error\": 1, \"msg\": message}\n\n\ndef success(message):\n return {\"error\": 0, \"msg\": message}\n\n################### MAIN #######################\n\n\nif __name__ == \"__main__\":\n import sys\n # test error json\n print(logtool.pp(execute({'selection': 'line', 'type': 'ndvi_transect'})))\n\n ## create params\n params = {\n 'xmin' : -4.037846435,\n 'ymin' : 52.51647802,\n 'xmax' : -3.970512503,\n 'ymax' : 52.56521060,\n 'time_begin' : '2018-2-1',\n 'time_end' : '2018-6-1',\n 'product' : 'sentinel'\n }\n # Agriculture\n with open('agriculture.jpg', 'wb+') as out:\n params['selection'] = 'rectangle'\n params['type'] = 'agriculture'\n img = execute(params, fp=out)\n print(\"Saved {}\".format(str(img)))\n sys.exit(0)\n # Scene classifier\n with open('scene_classifier.jpg', 'wb+') as out:\n params['selection'] = 'rectangle'\n params['type'] = 'scene_classifier'\n img = execute(params, fp=out)\n print(\"Saved {}\".format(str(img)))\n sys.exit(0)\n # false-color-infrared\n with open('colour_infrared.jpg', 'wb+') as out:\n params['selection'] = 'rectangle'\n params['type'] = 'colour_infrared'\n img = execute(params, fp=out)\n print(\"Saved {}\".format(str(img)))\n sys.exit(0)\n # NDVI TIME SERIES\n with open('test_ndvi_time_series.jpg', 'wb+') as out:\n params['selection'] = 'rectangle'\n params['type'] = 'ndvi_time_series'\n img = execute(params, fp=out)\n print(\"Saved {}\".format(str(img)))\n #sys.exit(0)\n # NDVI STD DEV\n with open('test_ndvi_std_dev.jpg', 'wb+') as out:\n params['selection'] = 'rectangle'\n params['type'] = 'ndvi_std_dev'\n img = execute(params, fp=out)\n print(\"Saved {}\".format(str(img)))\n sys.exit(0)\n # TIME SERIES\n with open('test_time_series.jpg', 'wb+') as out:\n params['selection'] = 'rectangle'\n params['type'] = 'time_series'\n img = execute(params, fp=out)\n print(\"Saved {}\".format(str(img)))\n sys.exit(0)\n # NDVI TRANSECT\n with open('test_ndvi_transect.jpg', 'wb+') as out:\n params['selection'] = 'line'\n params['type'] = 'ndvi_transect'\n img = execute(params, fp=out)\n print(\"Saved {}\".format(str(img)))\n sys.exit(0)\n","sub_path":"src/backend/db/datacube_processes.py","file_name":"datacube_processes.py","file_ext":"py","file_size_in_byte":18062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"388793270","text":"#validating python version\nimport sys\nif sys.version_info[0] < 3:\n print(\"Error: You must use Python 3, try running $ python3 app.py or updating the python interpreter\")\n\n#their exercise code starts here\nimport json\nfrom DataStructures import Queue\n\n# there queue has to be declared globally (outside any other function)\n# that way all methods have access to it\nqueue = Queue(mode=\"FIFO\")\n\ndef show_main_menu():\n print('''\nWhat would you like to do (type a number and press Enter)?\n - Type 1: For adding someone to the Queue.\n - Type 2: For removing someone from the Queue.\n - Type 3: For printing the current Queue state.\n - Type 4: To export the queue to the queue.json file.\n - Type 5: To import the queue from the queue.json file.\n - Type 6: To quit\n ''')\n response = input()\n return response\n\n#1\ndef enqueue():\n print('\\nWho would you like to add to the queue?')\n person = input()\n queue.enqueue( person )\n next_in_line = queue.size() - 1 if queue._mode == 'FIFO' else 0\n amt = 'is one person' if next_in_line == 1 else f'are {next_in_line} people'\n print(f'{person} added to queue. There are {amt} before them.')\n\n#2\ndef dequeue():\n person = queue.dequeue()\n print(f'{person} has been removed from the queue.')\n\n#3\ndef print_queue():\n # you must print on the console the entire queue list\n print(\"Printing the entire list...\")\n print(queue.get_queue())\n\n#4\ndef export_queue():\n print('Exporting queue to JSON file...')\n jsonfile = open('queue.json' , 'w')\n json.dump(queue.get_queue() , jsonfile)\n jsonfile.close()\n print('JSON file has been created successfully')\n\n#5\ndef import_queue():\n print('Importing queue from JSON file')\n jsonfile = open('queue.json' , 'r')\n global queue\n queue = Queue (mode='FIFO', current_queue = json.load(jsonfile))\n jsonfile.close()\n print_queue()\n\n\ndef start():\n \n print(\"\\nHello, this is the Command Line Interface for a Queue Management application.\")\n while True:\n \n option = show_main_menu()\n \n try: #converting the user input into an integer\n option = int(option)\n except ValueError:\n print(\"Invalid option \"+str(option))\n\n # add your options here using conditionals (if)\n if option == 1:\n enqueue()\n elif option == 2:\n dequeue()\n elif option == 3:\n print_queue()\n elif option == 4:\n export_queue()\n elif option == 5:\n import_queue()\n elif option == 6:\n print(\"Bye bye!\")\n return None\n else:\n print(\"Invalid option \"+str(option))\n\n \nstart()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522376582","text":"\nimport time\n\nimport pygame\nfrom pygame import Rect\n\nfrom tilegamelib.frame import Frame\nfrom tilegamelib.screen import Screen\nfrom tilegamelib.tile_factory import TileFactory\nfrom tilegamelib.tiled_map import TiledMap\n\n\nclass BarDisplay:\n\n def __init__(self, frame, game, value, tile_name, vertical=False):\n self.frame = frame\n self.map = TiledMap(game, frame=frame)\n self.value = value\n self.tile_name = tile_name\n self.vertical = vertical\n self.redraw()\n\n def redraw(self):\n if self.vertical:\n self.map.set_map('\\n'.join([self.tile_name] * self.value))\n else:\n self.map.set_map(self.tile_name * self.value)\n self.frame.clear()\n self.map.draw()\n\n def increase(self):\n self.value += 1\n self.redraw()\n\n def decrease(self):\n if self.value > 0:\n self.value -= 1\n self.redraw()\n\n\nif __name__ == '__main__':\n screen = Screen()\n tile_factory = TileFactory()\n frame = Frame(screen, Rect(96, 64, 640, 32))\n bananas = BarDisplay(frame, tile_factory, 0, 'b', False)\n frame = Frame(screen, Rect(64, 64, 32, 320))\n cherries = BarDisplay(frame, tile_factory, 10, 'c', True)\n for i in range(15):\n pygame.display.update()\n time.sleep(0.1)\n # screen.clear()\n bananas.increase()\n cherries.decrease()\n pygame.display.update()\n for i in range(15):\n pygame.display.update()\n time.sleep(0.1)\n screen.clear()\n bananas.decrease()\n cherries.increase()\n pygame.display.update()\n time.sleep(2)\n","sub_path":"tilegamelib/bar_display.py","file_name":"bar_display.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"502518351","text":"\"\"\"Definitions of common job functions\"\"\"\n\nfrom pymatgen.core.composition import Element\n\nfrom rxn_network.core.composition import Composition\nfrom rxn_network.utils.funcs import get_logger\n\nlogger = get_logger(__name__)\n\n\ndef run_enumerators(enumerators, entries):\n rxn_set = None\n for enumerator in enumerators:\n logger.info(f\"Running {enumerator.__class__.__name__}\")\n rxns = enumerator.enumerate(entries)\n\n logger.info(f\"Adding {len(rxns)} reactions to reaction set\")\n\n if rxn_set is None:\n rxn_set = rxns\n else:\n rxn_set = rxn_set.add_rxn_set(rxns)\n\n logger.info(\"Completed reaction enumeration. Filtering duplicates...\")\n rxn_set = rxn_set.filter_duplicates()\n return rxn_set\n\n\ndef get_added_elem_data(entries, targets):\n added_elems = entries.chemsys - {\n str(e) for target in targets for e in Composition(target).elements\n }\n added_chemsys = \"-\".join(sorted(list(added_elems)))\n added_elements = [Element(e) for e in added_elems]\n\n return added_elements, added_chemsys\n","sub_path":"src/rxn_network/jobs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"476968435","text":"import os\nimport os.path as op\nimport datetime\nfrom flask import request, render_template, redirect, url_for, flash\nfrom .models import Universities, Colleges, Programs, User, Posts, Image, Video, Widgets\nfrom app import app, db, login_manager\nfrom flask_admin import Admin, form\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_login import login_user, logout_user,\\\n current_user, login_required\nimport re\nfrom .forms import LoginForm, CKTextAreaField\nfrom jinja2 import evalcontextfilter, Markup, escape\nfrom sqlalchemy.event import listens_for\nfrom flask_admin.contrib.fileadmin import FileAdmin\nfrom flask.ext.admin.menu import MenuLink\nfrom sqlalchemy import or_, and_, desc\n\n_paragraph_re = re.compile(r'(?:\\r\\n|\\r|\\n){2,}')\n\n@login_manager.user_loader\ndef user_loader(user_id):\n return User.query.filter_by(username=user_id).first()\n\n\n@app.template_filter()\n@evalcontextfilter\ndef nl2br(eval_ctx, value):\n result = u'\\n\\n'.join(u'

%s

' % p.replace('\\n', '
\\n') \\\n for p in _paragraph_re.split(escape(value)))\n if eval_ctx.autoescape:\n result = Markup(result)\n return result\n\n\nPOSTS_PER_PAGE = 20\n\n\n# Create directory for file fields to use\nfile_path = op.join(op.dirname(__file__), 'static')\ntry:\n os.mkdir(file_path)\nexcept OSError:\n pass\n\nclass MyModelView(ModelView):\n def __init__(self, model, session, name=None, category=None, endpoint=None, url=None, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n super(MyModelView, self).__init__(model, session, name=name, category=category, endpoint=endpoint, url=url)\n\n def is_accessible(self):\n return current_user.is_authenticated\n\n column_list=['id', 'uni_name', 'city', 'province']\n\nclass UniModelView(MyModelView):\n column_searchable_list = ['uni_name']\n\nclass ProgAdmin(ModelView):\n def is_accessible(self):\n return current_user.is_authenticated\n column_display_pk = True\n column_hide_backrefs = False\n column_list=['id', 'title', 'university.uni_name', 'degree']\n column_sortable_list=['id', 'title', 'university.uni_name', 'degree']\n column_searchable_list = ['title', 'university.uni_name']\n form_columns=['university','degree','title','city','district','province','institute_type','eligibility','complation_requirement','scope','fees','scholarship','duration','credit_hours','teaching_system','seats','shift','session','regular_private','department','faculty','course_outline','campus','hec_ranking','hostel_facility','contact','location','affiliation','lectures_notes','reference_sites','web','alumni']\n form_choices = {'degree': [ ('Bachelor', 'Bachelor'),\n ('Master', 'Master'),\n ('MS', 'MS'),\n ('M.Phil', 'M.Phil'),\n ('MS/M.Phil', 'MS/M.Phil'),\n ('Phd', 'Phd'),\n ('Diploma', 'Diploma')],\n 'scholarship':[('Available','Available'),('Not available','Not available')],\n 'teaching_system':[('Semester System', 'Semester System'),\n ('Annual system', 'Annual system'),\n ('Distance education system', 'Distance education system')],\n 'shift':[('morning','morning'),('evening','evening'),('afternoon','afternoon')],\n 'institute_type':[('university','university'),('college','college')]\n }\n\n \nclass PostsView(ModelView):\n def is_accessible(self):\n return current_user.is_authenticated\n\n column_list=['id', 'title', 'post_date', 'post_type']\n column_filters = ['id', 'title', 'post_date', 'post_type']\n column_default_sort=('post_date', True)\n column_searchable_list = ['title', 'post_type', 'program.title' ,'university.uni_name']\n form_overrides = dict(text=CKTextAreaField)\n form_args = dict(\n post_date=dict(default=datetime.datetime.now())\n )\n create_template = 'edit.html'\n edit_template = 'edit.html'\n\n form_choices = {'post_type': [ ('articles', 'Article'),\n ('past-papers', 'Past Paper'),\n ('date-sheets', 'Date Sheet'),\n ('syllabus', 'Syllabus'),\n ('news', 'News'),\n ('notes', 'Notes'),\n ('results', 'Results'),\n ('scholarships', 'Scholarships'),\n ('career-counselling', 'Career Counselling'),\n ('success-stories', 'Success Stories'),\n ('css','CSS'),\n ('my-teachers', 'My Teachers')\n ]\n }\n form_extra_fields = {\n 'featured_image': form.ImageUploadField('Featured Image',\n base_path=file_path),\n 'pdf_attachment': form.FileUploadField('PDF Attachment',\n base_path=file_path,\n allowed_extensions=[\"pdf\"]),\n }\n\n\nclass ImageView(ModelView):\n def is_accessible(self):\n return current_user.is_authenticated\n\n def _list_thumbnail(view, context, model, name):\n if not model.path:\n return ''\n img_thumb = form.thumbgen_filename(model.path).replace('_thumb', '')\n imgpath = url_for('static', filename=img_thumb)\n return Markup('' % imgpath)\n def _list_path(view, context, model, name):\n if not model.path:\n return ''\n imgpath = url_for('static', filename=model.path)\n return Markup('
{0}'.format(imgpath, imgpath))\n column_formatters = {\n 'img': _list_thumbnail,\n 'path': _list_path\n }\n column_list=['name', 'path', 'img']\n form_columns=['name', 'path']\n # Alternative way to contribute field is to override it completely.\n # In this case, Flask-Admin won't attempt to merge various parameters for the field.\n form_extra_fields = {\n 'path': form.ImageUploadField('Image',\n base_path=file_path)\n }\n@listens_for(Image, 'after_delete')\ndef del_image(mapper, connection, target):\n if target.path:\n # Delete image\n try:\n os.remove(op.join(file_path, target.path))\n except OSError:\n pass\n\n # Delete thumbnail\n try:\n os.remove(op.join(file_path,\n form.thumbgen_filename(target.path)))\n except OSError:\n pass\n\nclass MyFileAdmin(FileAdmin):\n def is_accessible(self):\n return current_user.is_authenticated\n\nclass UserModelView(ModelView):\n def is_accessible(self):\n return current_user.is_authenticated\n\n column_list=['id', 'username', 'role']\n form_columns=['username', 'password', 'role']\n\n def after_model_change(self, form, model, is_created):\n password = form.password.data\n print(password)\n model.set_password(password)\n db.session.commit()\n # if is_created: # create the table just once\n # model.set_password(password)\n # db.session.commit()\n # #\n\nclass WidgetsView(ModelView):\n def is_accessible(self):\n return current_user.is_authenticated\n\n form_choices = {'homepage': [('yes', 'Yes - Show on Home Page'),\n ('no', 'No - Dont show on homepage') ],\n 'allpages': [('yes', 'Yes - Show on All Page'),\n ('no', 'No - Show only on specified pages') ],\n 'categories': [ ('all', 'All Categories'),\n ('articles', 'Article'),\n ('past-papers', 'Past Paper'),\n ('date-sheets', 'Date Sheet'),\n ('syllabus', 'Syllabus'),\n ('news', 'News'),\n ('notes', 'Notes'),\n ('results', 'Results'),\n ('scholarships', 'Scholarships'),\n ('career-counselling', 'Career Counselling'),\n ('success-stories', 'Success Stories'),\n ('my-teachers', 'My Teachers'),\n ('videos', 'Videos')\n ]\n }\n\n create_template = 'edit.html'\n edit_template = 'edit.html'\n\nclass VideosView(ModelView):\n def is_accessible(self):\n return current_user.is_authenticated\n\n form_extra_fields = {\n 'img': form.ImageUploadField('Featured Image',\n base_path=file_path)\n }\n\n\nadmin = Admin(app, template_mode='bootstrap3')\nadmin.add_view(UserModelView(User, db.session))\nadmin.add_view(UniModelView(Universities, db.session))\nadmin.add_view(MyModelView(Colleges, db.session))\nadmin.add_view(ProgAdmin(Programs, db.session))\nadmin.add_view(PostsView(Posts, db.session))\nadmin.add_view(ImageView(Image, db.session))\nadmin.add_view(VideosView(Video, db.session))\nadmin.add_view(WidgetsView(Widgets, db.session))\nadmin.add_link(MenuLink(name='Site', category='', url=\"/\"))\nadmin.add_link(MenuLink(name='Logout', category='', url=\"/logout\"))\n# admin.add_view(MyFileAdmin(file_path, '/static/', name='Static Files'))\n\nCITIES = ['Abbottabad', 'Bagh', 'Bahawalpur', 'Bannu', 'Bhimber', 'Charsadda', 'D.I.Khan', 'Dera Ghazi Khan', 'Dir', 'Faisalabad', 'Gilgit', 'Gujranwala', 'Gujrat', 'Haripur', 'Hyderabad', 'Islamabad', 'Jamshoro', 'Karachi', 'Karak', 'Khairpur', 'Khuzdar', 'Kohat', 'Kotli', 'Lahore', 'Larkana', 'Lasbela', 'Loralai', 'Malakand', 'Manshera', 'Mardan', 'Mirpur', 'Multan', 'Muzaffarabad', 'Nawabshah', 'Nerain Sharif', 'Nowshera', 'Peshawar', 'Quetta', 'Rahim Yar Khan', 'Rawalakot', 'Rawalpindi', 'Sakrand', 'Sargodha', 'Sialkot', 'Sukkur', 'Swabi', 'Swat', 'Tandojam', 'Taxila', 'Topi', 'Turbat', 'Wah']\nPROVINCES = [\"Islamabad\", \"Khyber Pakhtunkhwa\", \"Punjab\", \"Sindh\", \"Balochistan\", \"Azad Jammu and Kashmir\", \"Gilgit-Baltistan\"]\n\n@app.context_processor\ndef ctites_provinces():\n return {'cities': CITIES, 'provinces': PROVINCES}\n\n\ndef widget_maker(placement, **kwargs):\n category = kwargs.get('category', '*')\n\n if placement=='homepage':\n widget = Widgets.query.filter(Widgets.homepage=='yes').all()\n\n if placement=='posts-list':\n widget = Widgets.query.filter((Widgets.allpages=='yes')\\\n | (Widgets.categories==category)\\\n | (Widgets.categories=='all')\\\n ).all()\n\n return widget\n\napp.jinja_env.globals.update(widget_maker=widget_maker)\n\n\n#ROUTES\n\n@app.route('/setup')\ndef setup():\n admin = User.query.filter_by(username=\"admin\").first()\n if not admin:\n admin = User(\"admin\", \"123karwan\")\n db.session.add(admin)\n db.session.commit()\n return \"ready\"\n else:\n return \"already ready\"\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n flash('Logged in successfully.')\n\n return redirect('/admin')\n else:\n flash('Unsuccessful.', 'warning')\n\n return render_template('login.html', form=form)\n\n\n@app.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/login\")\n\n\n@app.route('/')\ndef index():\n types = {'news':'', 'articles':'', 'past-papers':'',\n 'date-sheets':'', 'syllabus':'', 'notes':'',\n 'results':'', 'scholarships':'', 'career-counselling':'',\n 'success-stories':'', 'my-teachers':''\n }\n for type in types:\n types[type] = Posts.query.filter_by(post_type=type).order_by(desc(Posts.post_date)).limit(4).all()\n\n videos = Video.query.order_by(desc(Video.id)).limit(4).all()\n return render_template('index2.html', types=types, videos=videos)\n\n\n@app.route('/')\n@app.route('//')\ndef posts(type, page=1):\n posts = Posts.query.filter_by(post_type=type).order_by(desc(Posts.post_date)).paginate(page, POSTS_PER_PAGE, False)\n return render_template('posts.html', pcont=posts, posts=posts.items, type=type)\n\n@app.route('/videos')\ndef videos():\n videos = Video.query.order_by(desc(Video.id)).all()\n return render_template('videos.html', videos=videos)\n\n@app.route('/search/')\ndef search():\n query = request.args.get('s')\n s = \"%{0}%\".format(query)\n province = request.args.get('province', '*')\n city = request.args.get('city')\n program = request.args.get('p')\n \n\n if program is not None:\n program = \"%{0}%\".format(program)\n programs = Programs.query.filter(Programs.title.ilike(program)).\\\n all()\n return render_template('programs-list.html', programs=programs)\n else:\n if city and province and query:\n universities = Universities.query.filter(and_(Universities.uni_name.ilike(s),\\\n Universities.city==city,\\\n Universities.province==province,\\\n )).all()\n elif city and query:\n universities = Universities.query.filter(and_(Universities.uni_name.ilike(s),\\\n Universities.city==city,\\\n )).all()\n elif province and query:\n universities = Universities.query.filter(and_(Universities.uni_name.ilike(s),\\\n Universities.province==province,\\\n )).all()\n elif city and province:\n universities = Universities.query.filter(Universities.province==province).filter(Universities.city==city).all()\n elif city:\n universities = Universities.query.filter(Universities.city==city).all()\n elif province:\n universities = Universities.query.filter(Universities.province==province).all()\n elif query:\n universities = Universities.query.filter(Universities.uni_name.ilike(s)).all()\n else:\n universities = Universities.query.all()\n\n return render_template('universities.html', universities=universities)\n\n\n@app.route('/universities')\ndef universities():\n universities = Universities.query.all()\n\n if request.args.get('city') is not None:\n universities = Universities.query.filter_by(city=request.args.get('city')).all()\n if request.args.get('province') is not None:\n universities = Universities.query.filter_by(province=request.args.get('province')).all()\n if request.args.get('pub_pri') is not None:\n universities = Universities.query.filter_by(pub_pri=request.args.get('pub_pri')).all()\n return render_template('universities.html', universities=universities)\n\n\n@app.route('/programs/')\ndef programs(uni_id):\n university = Universities.query.get(uni_id)\n programs = Programs.query.filter_by(uni_id=uni_id).all()\n return render_template('programs.html', university=university, programs=programs)\n\n\n@app.route('/program/')\ndef program(id):\n program = Programs.query.filter_by(id=id).first()\n university = Universities.query.get(program.uni_id)\n return render_template('program.html', university=university, program=program)\n\n@app.route('/program//add', methods=['POST'])\n@login_required\ndef program_add(id):\n program = Programs()\n program.uni_id = id\n program.title = \"New Program\"\n db.session.add(program)\n db.session.commit()\n return redirect(url_for('programs', uni_id=id))\n\n\n@app.route('/program//delete', methods=['POST'])\n@login_required\ndef program_delete(id):\n program = Programs.query.filter_by(id=id).first()\n uni = program.uni_id\n db.session.delete(program)\n db.session.commit()\n return redirect(url_for('programs', uni_id=uni))\n\n\n@app.route('/programs/scholarships')\ndef scholarship():\n programs = Programs.query.filter(Programs.scholarship is not False).all()\n return render_template('scholarships.html', programs=programs, university=None)\n\n\n@app.route('/programs/')\ndef all_programs():\n programs = Programs.query.filter(Programs.degree is not False, Programs.title is not False).all()\n return render_template('all_programs.html', programs=programs)\n\n\n@app.route('///')\ndef article(id, slug, type):\n post = Posts.query.get(id)\n related = Posts.query.filter_by(post_type=type).filter(Posts.id != post.id).order_by(desc(Posts.post_date)).limit(5).all()\n return render_template('article.html', post=post, related=related)","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"460838599","text":"\"\"\"\r\nA simple python file for randomly selecting names from a dictionary list\r\nNames drawn are considered winners of an art raffle.\r\n\"\"\"\r\n\r\nimport random\r\n\r\ndef main(user_dict, limit):\r\n banner = [\"++++++++++++++++++++++++++++++\",\r\n \"+ Art Raffle Random Selector +\",\r\n \"++++++++++++++++++++++++++++++\"]\r\n for row in banner:\r\n print(row)\r\n print()\r\n while True:\r\n winners = draw_winner(user_dict, limit)\r\n output(winners)\r\n catcher = input(\"Again? (Press any key to quit): \")\r\n if catcher != \"\":\r\n print(\"\\nQuitting...\\n\")\r\n break\r\n else:\r\n print(\"\\nContinuing...\\n\")\r\n \r\ndef draw_winner(user_dict, limit):\r\n count = 0\r\n selector = None\r\n used_int = []\r\n length = len(user_dict)\r\n winners = []\r\n while count < limit:\r\n count += 1\r\n selector = random.randint(1, length)\r\n if selector in used_int:\r\n while selector in used_int:\r\n selector = random.randint(1, length)\r\n winners.append(user_dict[selector])\r\n used_int.append(selector)\r\n return winners\r\n\r\ndef output(final):\r\n message = \"Results of the draw:\"\r\n print(len(message) * \"=\")\r\n print(message)\r\n count = 1\r\n for winner in final:\r\n print(str(count) + \") \" + winner)\r\n count += 1\r\n print(len(message) * \"=\")\r\n\r\nuser_dict = {1:\"\", 2:\"\",\r\n 3:\"\", 4:\"\",\r\n 5:\"\", 6:\"\",\r\n 7:\"\", 8:\"\",\r\n 9:\"\", 10:\"\",\r\n 11:\"\", 12:\"\",\r\n 13:\"\", 14:\"\",\r\n 15:\"\", 16:\"\",\r\n 17:\"\", 18:\"\",\r\n 19:\"\", 20:\"\"}\r\nwin_count = 2\r\n\r\nmain(user_dict, win_count)\r\n","sub_path":"randomdraw.py","file_name":"randomdraw.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"117394574","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 3 15:21:04 2018\n\n@author: jliu\n\"\"\" \n \nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport math\n\nimport sys\nsys.path.append('../../../../1_1d/2_figure')\nimport settings as set\n\n\nvec_equ=['1_u_e_m_c_x_m_0p5_square','2_u_1_over_c_x_m_0p5_square','3_u_cx_plus_100_square','4_u_x_square_m_c_square']\nid_equ=2\n\nvec_FEM=['1_sm','2_mm_PPdisc','3_mm_RT']\nid_FEM=1\n\ndegree = 4\nif id_FEM==2:\n degree=2\n\n\nvec_xaxis=['ndofs','ncond']\nid_xaxis=0\n\n\n\nn_param = 5\nvec_param = ['1em2', '1em1', '1e0', '1e1', '1e2']\nid_param=0\n\nvec_var = ['solu','grad','2ndd']\n\nvec_offset = [2e-20, 1e-19, 2e-20]\nvec_slope = [1, 1.5, 2.5]\n\nvec_label_xaxis=['Number of DoFs','Condition number']\n\nn_degree = 5\nn_err_refer = 28\n\nvec_legend = ['1e-2', '1e-1', '1e0', '1e1', '1e2']\n\nid_loc_legend = 1\nvec_marker=['o','d','^','s','*'] \nlegend_size=16\nfontsize_label = 18\n\nxaxis_up_bound=1e6\nyaxis_low_bound=1e-20\nyaxis_up_bound=1e0\nvec_ytick = [1e-20, 1e-16, 1e-12, 1e-8, 1e-4, 1e0]\n\ndof_ref = np.zeros(n_err_refer)\nerr_round_off_approx = np.zeros(n_err_refer)\n\nfor i in range(n_err_refer):\n dof_ref[i] = 2**i\n \n \nif id_FEM==0:\n vec_slope=[1,1,1] \n \nif id_xaxis==0: \n if id_FEM==1:\n vec_slope=[1.0,1.0,1.0]\n elif id_FEM==2:\n vec_slope=[0.5,0.5,1]\nelif id_xaxis==1:\n if id_FEM==1:\n vec_slope=[1,1.5,2.5]\n elif id_FEM==2:\n vec_slope=[1,1,2] \n \nif id_equ==0:\n if id_FEM==0:\n vec_offset = [2e-17, 2e-16, 2e-15]\nelif id_equ==1:\n if id_FEM==0:\n yaxis_low_bound=1e-20\n yaxis_up_bound=1e-4\n vec_ytick = [1e-20, 1e-16, 1e-12, 1e-8, 1e-4] \n elif id_FEM==1 or id_FEM==2:\n yaxis_low_bound=1e-24\n yaxis_up_bound=1e-4\n vec_ytick = [1e-24, 1e-20, 1e-16, 1e-12, 1e-8, 1e-4] \nelif id_equ==2:\n if id_FEM==1:\n vec_offset=[1e-14, 1e-12, 1e-15]\n elif id_FEM==2:\n vec_offset=[4e-13, 2e-12, 5e-12]\nelif id_equ==3:\n vec_offset=[6e-13, 3e-12, 4e-12]\n \nif id_xaxis==1: \n matrix_data_xaxis = [line.strip().split() for line in open(vec_equ[id_equ]+'/'+vec_FEM[id_FEM]+'/data_ncond.txt','r')]\n \nmatrix_data_error = [line.strip().split() for line in open(vec_equ[id_equ]+'/'+vec_FEM[id_FEM]+'/data_error_deg_'+str(degree)+'_c_'+vec_param[0]+'.txt','r')]\nn_refine = len(matrix_data_error)-1\n \ndata_error = np.zeros((n_refine,n_param))\ndata_xaxis = np.zeros(n_refine) \n \n\nid_var_start = 0\nid_var_end = 3\n\nfor id_var in range(id_var_start,id_var_end):\n \n print ('var: '+vec_var[id_var]) \n \n f=plt.figure(figsize=(5,4))\n axes= f.add_axes([0,0,1,1])\n \n for id_param in range(0,n_param): \n matrix_data_error = [line.strip().split() for line in open(vec_equ[id_equ]+'/'+vec_FEM[id_FEM]+'/data_error_deg_'+str(degree)+'_c_'+vec_param[id_param]+'.txt','r')]\n for i in range(n_refine):\n data_error[i][id_param]=matrix_data_error[i+1][id_var]\n \n for i in range(n_refine):\n if id_xaxis==0:\n if id_FEM==0:\n data_xaxis[i]=2**(i+1)*degree+1\n elif id_FEM==1:\n if id_var==0:\n data_xaxis[i]=2**(i+1)*degree\n elif id_var==1 or id_var==2:\n data_xaxis[i]=2**(i+1)*degree+1\n elif id_xaxis==1:\n data_xaxis[i]=matrix_data_xaxis[i+1][0]\n \n for i in range(n_refine):\n if(data_xaxis[i]==0):\n data_xaxis[i] = np.nan \n for p in range(n_param):\n if(data_error[i][p]==0):\n data_error[i][p] = np.nan\n\n\n \n for i in range(n_err_refer):\n err_round_off_approx[i] = vec_offset[id_var]* dof_ref[i]**vec_slope[id_var] \n \n tria_start = 8\n tria_end = tria_start+3\n \n tria_coeff_x = 1e1\n tria_coeff_y = 1e-2\n \n \n text_offset_x = dof_ref[1]\n \n text_offset_coeff_y=0.1\n \n \n text_slope_bottom_coeff_x = 0.4\n text_slope_bottom_coeff_y = 0.07\n text_slope_right_coeff_x = 1.2\n text_slope_right_coeff_y = 0.2\n \n \n\n if id_xaxis==0:\n if id_var == 1:\n text_slope_right_coeff_y = 0.2\n elif id_var == 2:\n text_offset_x = dof_ref[1]\n text_slope_right_coeff_y = 0.13 \n elif id_xaxis==1:\n if id_var == 1:\n text_slope_right_coeff_y = 0.08\n elif id_var == 2:\n text_offset_x = dof_ref[2]\n text_slope_right_coeff_y = 0.02 \n \n \n tria_p_1 = [dof_ref[tria_start],err_round_off_approx[tria_start]]\n tria_p_2 = [dof_ref[tria_end],err_round_off_approx[tria_start]]\n tria_p_3 = [dof_ref[tria_end],err_round_off_approx[tria_end]]\n \n tria_x = [tria_p_1[0],tria_p_2[0],tria_p_3[0],tria_p_1[0]]#*\n tria_x = [i*tria_coeff_x for i in tria_x]\n tria_y = [tria_p_1[1],tria_p_2[1],tria_p_3[1],tria_p_1[1]]#/tria_coeff_y\n tria_y = [i*tria_coeff_y for i in tria_y]\n \n text_slope_bottom_x = 10**((math.log10(tria_x[0])+math.log10(tria_x[1]))/2)\n text_slope_bottom_y = tria_y[0]*text_slope_bottom_coeff_y\n text_slope_right_x = tria_x[1]*text_slope_right_coeff_x\n text_slope_right_y = 0.5*10**((math.log10(tria_y[1])+math.log10(tria_y[2]))/2)\n \n \n text_offset_y = vec_offset[id_var]* text_offset_x**vec_slope[id_var]*text_offset_coeff_y \n \n for id_param in range(n_param):\n plt.loglog(data_xaxis, data_error[:,id_param],'k'+vec_marker[id_param]+'-', markerfacecolor='none',label='c='+vec_legend[id_param],linewidth=1.0)\n \n \n \n if id_var == 0: \n plt.legend()\n plt.legend(loc=id_loc_legend, prop={'size': set.fontsize_legend})\n \n if degree==4 or (degree==2 and id_FEM==2):\n \n plt.loglog(dof_ref, err_round_off_approx,'k--',linewidth=1.0)\n plt.text(text_offset_x,text_offset_y,r'$\\alpha_{\\rm R}=$'+str(vec_offset[id_var]),fontsize = 15) \n \n plt.plot(tria_x,tria_y,'k-',linewidth=1.0)\n plt.text(text_slope_bottom_x,text_slope_bottom_y,'1',fontsize = 15)\n plt.text(text_slope_right_x,text_slope_right_y,str(vec_slope[id_var]),fontsize = 15) \n \n elif id_var == 1 or id_var == 2:\n plt.loglog(dof_ref, err_round_off_approx,'k--',linewidth=1.0)\n plt.text(text_offset_x,text_offset_y,r'$\\alpha_{\\rm R}=$'+str(vec_offset[id_var]),fontsize = 15) \n \n plt.plot(tria_x,tria_y,'k-',linewidth=1.0)\n plt.text(text_slope_bottom_x,text_slope_bottom_y,'1',fontsize = 15)\n plt.text(text_slope_right_x,text_slope_right_y,str(vec_slope[id_var]),fontsize = 15) \n \n\n \n \n plt.tick_params(axis='both', which='major', labelsize=set.fontsize_tick)\n \n plt.xlabel(vec_label_xaxis[id_xaxis], fontsize=set.fontsize_label)\n plt.ylabel('Error', fontsize=set.fontsize_label)\n \n \n print('yaxis_low_bound: ', yaxis_low_bound) \n print('yaxis_up_bound: ', yaxis_up_bound) \n \n axes.set_xlim([1, xaxis_up_bound])\n axes.set_ylim([yaxis_low_bound, yaxis_up_bound]) \n \n plt.yticks(vec_ytick) \n \n plt.show()\n \n f.savefig(vec_equ[id_equ]+'/'+vec_FEM[id_FEM]+\"/py_error_oned_\"+vec_equ[id_equ]+\"_\"+vec_FEM[id_FEM]+'_deg_'+str(degree)+'_'+vec_xaxis[id_xaxis]+\"_\"+vec_var[id_var]+\".pdf\", bbox_inches='tight')\n \n\n","sub_path":"1_article_1d/2_figure/1_dealii/9_for_2d/1_error_evolution/2_magnitude/py_error_oned_param_var.py","file_name":"py_error_oned_param_var.py","file_ext":"py","file_size_in_byte":7508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"155391589","text":"\n\nclass Insights_instagram:\n def __init__(self,id,metric,period,value,thumb_url,caption):\n self.id = id\n self.metric = metric\n self.period = period\n self.value = value\n self.thumb_url = thumb_url\n self.caption = caption\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Insights.py","file_name":"Insights.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"66803543","text":"import torch\nfrom pathlib import Path\n\n\nclass Config:\n def __init__(self):\n self.path_to_root = Path('..')\n self.path_to_data = self.path_to_root / 'data'\n self.path_to_output = self.path_to_root / 'output'\n self.path_to_models = self.path_to_output / 'models'\n self.path_to_predictions = self.path_to_output / 'predictions'\n self.path_to_tensorboard_logs = self.path_to_output / 'tensorboard_logs'\n \n self.autoencoder_data_ratio = 0.5\n self.shuffle = True\n self.use_stratify = True\n \n self.start_epoch = 0\n self.epochs = 15\n self.batch_size = 128\n self.num_workers = 4\n self.pin_memory = True\n self.use_gpu = True\n self.device = f\"cuda:{torch.cuda.current_device()}\" if self.use_gpu and torch.cuda.is_available() else \"cpu\"\n self.seed = 42\n self.print_freq = 10\n \n self.disc_hidden_channels = 16\n self.autoencoder_hidden_channels = 16\n self.encoder_out_channels = self.autoencoder_hidden_channels * 8\n \n self.lr = 3e-4\n \n self.n_classes = 10\n self.img_height = 28\n self.img_width = 28\n self.norm_mean = None\n self.norm_std = None\n \n self._init_dirs()\n \n def _init_dirs(self):\n self.path_to_data.mkdir(exist_ok=True)\n self.path_to_output.mkdir(exist_ok=True)\n self.path_to_models.mkdir(exist_ok=True)\n self.path_to_predictions.mkdir(exist_ok=True)\n self.path_to_tensorboard_logs.mkdir(exist_ok=True)\n \n \nopt = Config()\n","sub_path":"it-jim-labs/04-problem/src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52040664","text":"import torch\nimport torchvision\nimport numpy as np\n\nclass PermutedMNISTDataLoader(torchvision.datasets.MNIST):\n \n def __init__(self, source='./mnist_data', train = True, shuffle_seed = None):\n super(PermutedMNISTDataLoader, self).__init__(source, train, download=True)\n \n self.train = train\n if self.train:\n self.permuted_train_data = torch.stack(\n [img.type(dtype=torch.float32).view(-1)[shuffle_seed] / 255.0\n for img in self.train_data])\n else:\n self.permuted_test_data = torch.stack(\n [img.type(dtype=torch.float32).view(-1)[shuffle_seed] / 255.0\n for img in self.test_data])\n \n def __getitem__(self, index):\n \n if self.train:\n input, label = self.permuted_train_data[index], self.train_labels[index]\n else:\n input, label = self.permuted_test_data[index], self.test_labels[index]\n \n return input, label\n\n def sample(self, size):\n return [img for img in self.permuted_train_data[random.sample(range(len(self), size))]]\n \n '''\n def __len__(self):\n if self.train:\n return self.train_data.size()\n else:\n return self.test_data.size()\n '''","sub_path":"pyfiles/PMNISTDataLoader.py","file_name":"PMNISTDataLoader.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"466925864","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass Dashboard(Model):\n \"\"\"Dashboard.\n\n :param _links:\n :type _links: :class:`ReferenceLinks `\n :param description: Description of the dashboard.\n :type description: str\n :param eTag: Server defined version tracking value, used for edit collision detection.\n :type eTag: str\n :param id: ID of the Dashboard. Provided by service at creation time.\n :type id: str\n :param name: Name of the Dashboard.\n :type name: str\n :param owner_id: ID of the Owner for a dashboard. For any legacy dashboards, this would be the unique identifier for the team associated with the dashboard.\n :type owner_id: str\n :param position: Position of the dashboard, within a dashboard group. If unset at creation time, position is decided by the service.\n :type position: int\n :param refresh_interval: Interval for client to automatically refresh the dashboard. Expressed in minutes.\n :type refresh_interval: int\n :param url:\n :type url: str\n :param widgets: The set of Widgets on the dashboard.\n :type widgets: list of :class:`Widget `\n \"\"\"\n\n _attribute_map = {\n '_links': {'key': '_links', 'type': 'ReferenceLinks'},\n 'description': {'key': 'description', 'type': 'str'},\n 'eTag': {'key': 'eTag', 'type': 'str'},\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'owner_id': {'key': 'ownerId', 'type': 'str'},\n 'position': {'key': 'position', 'type': 'int'},\n 'refresh_interval': {'key': 'refreshInterval', 'type': 'int'},\n 'url': {'key': 'url', 'type': 'str'},\n 'widgets': {'key': 'widgets', 'type': '[Widget]'}\n }\n\n def __init__(self, _links=None, description=None, eTag=None, id=None, name=None, owner_id=None, position=None, refresh_interval=None, url=None, widgets=None):\n super(Dashboard, self).__init__()\n self._links = _links\n self.description = description\n self.eTag = eTag\n self.id = id\n self.name = name\n self.owner_id = owner_id\n self.position = position\n self.refresh_interval = refresh_interval\n self.url = url\n self.widgets = widgets\n","sub_path":"vsts/vsts/dashboard/v4_1/models/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"644360415","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport csv\n\n\ndef get_html(url):\n r = requests.get(url)\n if r.ok:\n return r.text\n print(r.status_code)\n\n\ndef write_csv(data):\n with open('sw.csv', 'w', encoding='utf8') as f:\n writer = csv.writer(f)\n writer.writerows(data)\n\n\ndef get_playlist(html):\n soup = BeautifulSoup(html, 'lxml')\n\n table = soup.find_all('table')[1]\n trs = table.find_all('tr')\n\n playlist = []\n\n for tr in trs[1:]:\n tmp_lst = []\n name = tr.find_all('td')[1].text.strip('')\n tmp_lst.append({'title': name})\n playlist.append(tmp_lst)\n\n write_csv(playlist)\n\n\ndef main():\n url = 'http://s5.radioboss.fm:8380/played.html?sid=1'\n r = requests.get(url)\n\n while True:\n if r.ok:\n get_playlist(get_html(url))\n time.sleep(10)\n else:\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"app/sw_pl.py","file_name":"sw_pl.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"150233575","text":"from primitives import *\n\n\nclass Mouse:\n def __init__(self, world):\n self.world = world\n self.body = Circle(10)\n\n self.delta = Vector()\n\n pygame.mouse.set_visible(True)\n\n @staticmethod\n def getScreenPos():\n pos = Vector()\n pos.set(pygame.mouse.get_pos())\n return pos\n\n def updatePos(self):\n old = self.body.pos\n pos = Mouse.getScreenPos()\n pos.div(self.world.viewer.display.get_size())\n pos.mult(self.world.viewer.size)\n pos.add(self.world.viewer.pos)\n self.body.pos = pos\n self.delta.set(pos)\n self.delta.sub(old)\n\n def update(self):\n self.updatePos()\n\n if pygame.mouse.get_pressed(3)[0]:\n self.world.viewer.pos.sub(self.delta)\n self.body.pos.sub(self.delta)\n\n for event in self.world.events:\n if event.type == 5:\n amt = .1\n\n if event.button == 4:\n self.world.viewer.zoom(1 + amt)\n elif event.button == 5:\n self.world.viewer.zoom(1 - amt)\n\n def draw(self):\n # pygame.draw.circle(self.world.viewer.display, (0, 0, 0), Mouse.getScreenPos(), self.body.radius)\n pass\n","sub_path":"mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"524033322","text":"import pygame\r\nfrom imagerect import ImageRect\r\n\r\n\r\nclass Map:\r\n BLOCK_SIZE = 48\r\n\r\n def __init__(self, screen, worldfile, rockfile, metalfile, stonefile, brickfile, quesfile, pipefile, pipefile_1, coinfile, polefile, flagfile, topfile, castlefile):\r\n self.screen = screen\r\n self.screen_rect = screen.get_rect()\r\n self.filename = worldfile\r\n with open(self.filename, 'r') as f:\r\n self.rows = f.readlines()\r\n\r\n self.rock = []\r\n self.stone = []\r\n self.metal = []\r\n self.brick = []\r\n self.q = []\r\n self.pipe = []\r\n self.pipe_1 = []\r\n self.coins = []\r\n self.pole = []\r\n self.flags = []\r\n self.tops = []\r\n self.castle = []\r\n sz = Map.BLOCK_SIZE\r\n\r\n self.rock_block = ImageRect(screen, rockfile, sz, sz)\r\n self.stone_block = ImageRect(screen, stonefile, sz, sz)\r\n self.metal_block = ImageRect(screen, metalfile, sz, sz)\r\n self.brick_block = ImageRect(screen, brickfile, sz, sz)\r\n self.q_block = ImageRect(screen, quesfile, sz, sz)\r\n self.pipe_block = ImageRect(screen, pipefile, sz, sz)\r\n self.long_pipe = ImageRect(screen, pipefile_1, sz, sz)\r\n self.coin = ImageRect(screen, coinfile, sz, sz)\r\n self.pole_block = ImageRect(screen, polefile, sz, sz)\r\n self.flag = ImageRect(screen, flagfile, sz, sz)\r\n self.top = ImageRect(screen, topfile, sz, sz)\r\n self.cas = ImageRect(screen, castlefile, sz, sz)\r\n\r\n self.deltax = self.deltay = Map.BLOCK_SIZE\r\n self.spawnx = 0\r\n self.spawny = 0\r\n self.map_shift = 0\r\n\r\n self.build()\r\n\r\n def __str__(self): return 'maze(' + self.filename + ')'\r\n\r\n def build(self):\r\n r = self.rock_block.rect\r\n w, h = r.width, r.height\r\n dx, dy = self.deltax, self.deltay\r\n\r\n for nrow in range(len(self.rows)):\r\n row = self.rows[nrow]\r\n for ncol in range(len(row)):\r\n col = row[ncol]\r\n if col == 's':\r\n self.stone.append(pygame.Rect(ncol * dx, nrow * dy, w, h))\r\n if col == 'M':\r\n self.spawnx = ncol * dx\r\n self.spawny = nrow * dy\r\n if col == 'm':\r\n self.metal.append(pygame.Rect(ncol * dx, nrow * dy, w, h))\r\n if col == 'r':\r\n self.rock.append(pygame.Rect(ncol * dx, nrow * dy, w, h))\r\n if col == 'b':\r\n self.brick.append(pygame.Rect(ncol * dx, nrow * dy, w, h))\r\n if col == 'q':\r\n self.q.append(pygame.Rect(ncol * dx, nrow * dy, w, h))\r\n if col == 'P':\r\n self.pipe.append(pygame.Rect(ncol * dx, nrow * dy - 28, self.pipe_block.rect.width, self.pipe_block.rect.height + 100))\r\n if col == 'p':\r\n self.pipe_1.append(pygame.Rect(ncol * dx, nrow * dy - 28, self.long_pipe.rect.width,\r\n self.long_pipe.rect.height))\r\n if col == 'c':\r\n self.coins.append(pygame.Rect(ncol * dx, nrow * dy, w, h))\r\n if col == '|':\r\n self.pole.append(pygame.Rect(ncol * dx, nrow * dy, self.pole_block.rect.width,\r\n self.pole_block.rect.height))\r\n if col == '>':\r\n self.flags.append(pygame.Rect(ncol * dx, nrow * dy, self.flag.rect.width,\r\n self.flag.rect.height))\r\n if col == 'o':\r\n self.tops.append(pygame.Rect(ncol * dx, nrow * dy, self.top.rect.width, self.top.rect.height))\r\n if col == 'C':\r\n self.castle.append(pygame.Rect(ncol * dx, nrow * dy, self.cas.rect.width, self.cas.rect.height))\r\n\r\n # shift blocks depending on mario's relation to the middle of the screen to simulate scrolling\r\n def shift_level(self, x):\r\n self.map_shift = x\r\n\r\n for block in self.stone:\r\n block.x += self.map_shift\r\n for block in self.metal:\r\n block.x += self.map_shift\r\n for block in self.rock:\r\n block.x += self.map_shift\r\n for block in self.brick:\r\n block.x += self.map_shift\r\n for block in self.q:\r\n block.x += self.map_shift\r\n for block in self.pipe:\r\n block.x += self.map_shift\r\n for block in self.pipe_1:\r\n block.x += self.map_shift\r\n for block in self.coins:\r\n block.x += self.map_shift\r\n for block in self.pole:\r\n block.x += self.map_shift\r\n for block in self.flags:\r\n block.x += self.map_shift\r\n for block in self.tops:\r\n block.x += self.map_shift\r\n for block in self.castle:\r\n block.x += self.map_shift\r\n\r\n def blitme(self):\r\n for rect in self.rock:\r\n if rect.right == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.rock_block.image, rect)\r\n for rect in self.stone:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.stone_block.image, rect)\r\n for rect in self.metal:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.metal_block.image, rect)\r\n for rect in self.brick:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.brick_block.image, rect)\r\n for rect in self.q:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.q_block.image, rect)\r\n for rect in self.pipe:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(pygame.transform.scale(self.pipe_block.image, (75, 75)), rect)\r\n for rect in self.pipe_1:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(pygame.transform.scale(self.long_pipe.image, (75, 75)), rect)\r\n for rect in self.coins:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n # self.pipe_block.image = pygame.transform.scale(self.pipe_block.image, (50, 50))\r\n self.screen.blit(pygame.transform.scale(self.coin.image, (75, 75)), rect)\r\n for rect in self.pole:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.pole_block.image, rect)\r\n for rect in self.flags:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.flag.image, rect)\r\n for rect in self.tops:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.top.image, rect)\r\n for rect in self.castle:\r\n if rect.left == self.screen_rect.left:\r\n del rect\r\n else:\r\n self.screen.blit(self.cas.image, rect)","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":7459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"304132064","text":"import HPMA115S0\nimport time\nimport sys\nimport urllib.request\nimport I2C_LCD_driver\nimport serial\nimport string\n\nmyAPI = '7J7INFSPI6A8BLPR'\n\nser=serial.Serial(\"/dev/ttyS0\",baudrate=9600,timeout=0.5)\nmylcd = I2C_LCD_driver.lcd()\nmylcd.lcd_display_string(\"Sleep\",1,0)\n\ntime.sleep(30)\n\nmylcd.lcd_display_string(\"Breathe\",1,0)\n\ndef dust_gps():\n try:\n print(\"Starting\")\n hpma115S0 = HPMA115S0.HPMA115S0(\"/dev/ttySOFT0\")\n\n hpma115S0.init()\n hpma115S0.startParticleMeasurement()\n\n while 1:\n if (hpma115S0.readParticleMeasurement()):\n pm2_5 = (hpma115S0._pm2_5)\n mylcd.lcd_clear()\n# mylcd.lcd_display_string(\"PM2.5: \" + str(pm2_5),1,0)\n print(\"PM2.5: %d ug/m3\" % (pm2_5))\n print(\"PM10: %d ug/m3\" % (hpma115S0._pm10))\n\n years_lost=round(pm2_5*0.098,2)\n# years_lost=round(years_lost,2)\n mylcd.lcd_display_string(\"PM2.5 = \"+str(pm2_5)+\"ug/m3\",1,0)\n mylcd.lcd_display_string(\"Life lost \"+str(years_lost)+\"yr\",2,0)\n\n data=ser.readline()\n while 1:\n# data=ser.readline()\n if((int(data[5])==65) and (int(data[4])==71)):\n break;\n else:\n data=ser.readline()\n print(data)\n lat1=int(data[17:19])\n lat2=int(data[19:21])\n lat3=int(data[22:24])\n latdir=chr(data[28])\n lat=round(lat1+ lat2/60+lat3/3600,7)\n lat=str(lat)+latdir\n# mylcd.lcd_display_string(lat,1,0)\n print(lat)\n long1=int(data[30:33])\n long2=int(data[33:35])\n long3=int(data[36:38])\n longdir=str(chr(data[42]))\n long=round(long1+long2/60+long3/3600,7)\n long=str(long)+longdir\n print(long)\n # mylcd.lcd_display_string(str(long)+longdir,2,0)\n time.sleep(0.5)\n\n conn = urllib.request.urlopen('http://rbiot.solveninja.org:3000/update?api_key='+(myAPI)+'&field1='+str(pm2_5)+'&field2='+str(lat)+'&field3='+str(long))\n print (conn)\n print (conn.read())\n conn.close()\n time.sleep(15)\n\n except(ValueError or IndexError):\n print(\"Error\")\n\nwhile 1:\n dust_gps();\n","sub_path":"HPMA115S0_Python_library/combo.py","file_name":"combo.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"391708100","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n###############################################################################\n# Name: arpeggio.py\n# Purpose: PEG parser interpreter\n# Author: Igor R. Dejanović \n# Copyright: (c) 2009-2016 Igor R. Dejanović \n# License: MIT License\n#\n# Arpeggio is an implementation of packrat parser interpreter based on PEG\n# grammars.\n# Parsers are defined using python language construction or PEG language.\n###############################################################################\n\n__author__ = \"Igor R. Dejanović \"\n__version__ = \"1.5\"\n\nfrom setuptools import setup\n\nNAME = 'Arpeggio'\nVERSION = __version__\nDESC = 'Packrat parser interpreter'\nAUTHOR = 'Igor R. Dejanovic'\nAUTHOR_EMAIL = 'igor DOT dejanovic AT gmail DOT com'\nLICENSE = 'MIT'\nURL = 'https://github.com/igordejanovic/Arpeggio'\nDOWNLOAD_URL = 'https://github.com/igordejanovic/Arpeggio/archive/v{}.tar.gz'\\\n .format(VERSION)\n\nsetup(\n name = NAME,\n version = VERSION,\n description = DESC,\n author = AUTHOR,\n author_email = AUTHOR_EMAIL,\n maintainer = AUTHOR,\n maintainer_email = AUTHOR_EMAIL,\n license = LICENSE,\n url = URL,\n download_url = DOWNLOAD_URL,\n packages = [\"arpeggio\"],\n keywords = \"parser packrat peg\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development :: Interpreters',\n 'Topic :: Software Development :: Compilers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python'\n ]\n\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"624472928","text":"'''\nItem tracking object.\n'''\n\nimport operator\nimport os.path\nimport typing\n\nfrom ..config.images import image\n\n__all__ = 'ItemObj',\n\n\nclass ItemObj(object):\n '''\n Inventory item\n\n Progressive items are listed as one.\n\n Instance variables:\n identifier: item identifier string\n location: (column, row) location on display\n length: number of item progressions\n displayname: name(s) displayed in UI\n icon: path to image file(s) associated with item\n disabled: black&white conversion\n link: items (and requirement) linked with this item\n linkitems: item objects linked to this item\n default: default numer of items in inventory\n inventory: current number of items in inventory\n tracker: world state tracker\n '''\n\n def __init__(self, identifier: str, location: typing.Sequence[int],\n length: int, displayname: typing.Sequence[str],\n icon: typing.Sequence[str], disabled: typing.Sequence[int],\n links: typing.Mapping[str, typing.Sequence[int]],\n default: int, tracker):\n '''\n Args:\n identifier: internal item name\n location: (row, column) of item on tracker GUI\n length: number of progressions in item\n displayname: name(s) of item displayed on tracker GUI\n icon: icon(s) used on tracker GUI\n disabled: black&white conversion\n links: items linked with posession of item\n default: initial item progression\n tracker: world state tracker\n '''\n\n self.identifier = identifier\n self.location = location\n self.length = length\n self.displayname = displayname\n self.icon = tuple(image(i) for i in icon)\n self.disabled = disabled\n self.link = links\n self.linkitems = {}\n self.default = default\n self.inventory = default\n self.tracker = tracker\n self.restore_inventory(self.default)\n\n def index(self) -> int:\n '''\n Return current displayname/image index.\n\n Returns:\n int: index used for sequence attributes\n '''\n\n idx = self.inventory if self.inventory < 1 else self.inventory - 1\n return idx\n\n def display(self) -> str:\n '''\n Return currently applicable item display string.\n\n Returns:\n str: name to be displayed in application\n '''\n\n idx = self.index()\n item_name = self.displayname[idx]\n return item_name\n\n def state(self) -> bool:\n '''\n Return current state of item.\n\n Returns:\n str: True if item is active, else False\n '''\n\n return self.inventory > 0\n\n def increase(self, *args) -> None:\n '''\n Left-click on item\n '''\n\n if self.inventory < self.length:\n self.inventory += 1\n self.update_links()\n self.tracker.set_item(self.identifier, self.inventory)\n\n def decrease(self, *args) -> None:\n '''\n Right-click on item\n '''\n \n if self.inventory > 0:\n self.inventory -= 1\n self.update_links()\n self.tracker.set_item(self.identifier, self.inventory)\n\n def reset(self) -> None:\n '''\n Reset item.\n '''\n\n to_remove = self.inventory - self.default\n if to_remove > 0:\n for _ in range(to_remove):\n self.decrease(_)\n elif to_remove < 0:\n for _ in range(-to_remove):\n self.increase(_)\n\n def restore_inventory(self, quantity: int) -> None:\n '''\n Set inventory number.\n\n Args:\n quantity: number to set inventory to\n '''\n\n self.inventory = quantity\n self.update_links()\n self.tracker.set_item(self.identifier, quantity)\n\n def add_links(linkitems: typing.Sequence) -> None:\n '''\n Register linked items.\n\n Args:\n linkitems: list of item objects\n '''\n\n for li in linkitems:\n self.linkitems[li.identifier] = li\n\n def update_links(self) -> None:\n '''\n Update state of linked items according to current inventory.\n '''\n\n for li in self.linkitems:\n if self.inventory in self.link[li]:\n self.linkitems[li].restore_inventory(1)\n else:\n self.linkitems[li].restore_inventory(0)\n","sub_path":"z3tracker/items/itemobj.py","file_name":"itemobj.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"431851077","text":"from BaseClasses import Region, Entrance, Location, MultiWorld, Item\nfrom .Technologies import tech_table, recipe_sources, technology_table, advancement_technologies, required_technologies\nfrom .Shapes import get_shapes\n\n\ndef gen_factorio(world: MultiWorld, player: int):\n static_nodes = world._static_nodes = {\"automation\", \"logistics\"} # turn dynamic/option?\n for tech_name, tech_id in tech_table.items():\n tech_item = Item(tech_name, tech_name in advancement_technologies, tech_id, player)\n tech_item.game = \"Factorio\"\n if tech_name in static_nodes:\n loc = world.get_location(tech_name, player)\n loc.item = tech_item\n loc.locked = True\n loc.event = tech_item.advancement\n else:\n world.itempool.append(tech_item)\n world.custom_data[player][\"custom_technologies\"] = custom_technologies = set_custom_technologies(world, player)\n set_rules(world, player, custom_technologies)\n\n\ndef factorio_create_regions(world: MultiWorld, player: int):\n menu = Region(\"Menu\", None, \"Menu\", player)\n crash = Entrance(player, \"Crash Land\", menu)\n menu.exits.append(crash)\n nauvis = Region(\"Nauvis\", None, \"Nauvis\", player)\n nauvis.world = menu.world = world\n for tech_name, tech_id in tech_table.items():\n tech = Location(player, tech_name, tech_id, nauvis)\n nauvis.locations.append(tech)\n tech.game = \"Factorio\"\n crash.connect(nauvis)\n world.regions += [menu, nauvis]\n\ndef set_custom_technologies(world: MultiWorld, player: int):\n custom_technologies = {}\n world_custom = getattr(world, \"_custom_technologies\", {})\n world_custom[player] = custom_technologies\n world._custom_technologies = world_custom\n allowed_packs = world.max_science_pack[player].get_allowed_packs()\n for technology_name, technology in technology_table.items():\n custom_technologies[technology_name] = technology.get_custom(world, allowed_packs, player)\n return custom_technologies\n\ndef set_rules(world: MultiWorld, player: int, custom_technologies):\n shapes = get_shapes(world, player)\n if world.logic[player] != 'nologic':\n from worlds.generic import Rules\n\n for tech_name, technology in custom_technologies.items():\n location = world.get_location(tech_name, player)\n Rules.set_rule(location, technology.build_rule(player))\n prequisites = shapes.get(tech_name)\n if prequisites:\n locations = {world.get_location(requisite, player) for requisite in prequisites}\n Rules.add_rule(location, lambda state,\n locations=locations: all(state.can_reach(loc) for loc in locations))\n\n # get all science pack technologies (but not the ability to craft them)\n world.completion_condition[player] = lambda state: all(state.has(technology, player)\n for technology in advancement_technologies)\n","sub_path":"worlds/factorio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"52594665","text":"from bottle import get, post, request, run\n\ndrawform = '''\n

どのボトルをお探しですか

\n
\n

Brand:

\n

Since:

\n

\n \n '''\n\n@get('/bottlesearch')\ndef search():\n return drawform\n\n# postメソッドでアクセスされたとき実行\n@post('/bottlesearch')\ndef result():\n brand = request.forms.brand\n since = request.forms.since\n result = ('

{0}の{1}年のものですね。お探しします。

').format(brand,since)\n return drawform + result\n\nrun(host=\"localhost\", port= 8080)\n\n","sub_path":"bottlesearch.py","file_name":"bottlesearch.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"321758302","text":"import os.path\n\nALLOWED_HOSTS = ['www.yellowsoft.com.br','yellowsoft.com.br','.yellowsoft.com.br']\nAPP_SERVER = 'http://www.yellowsoft.com.br'\n\nAPP_BASE = 'gmf'\nAPP_NOME = 'YellowSoft WEB Systems'\nAPP_TITLE = u'+F (MAIS FIDELIDADE)'\nAPP_ROOT = '/sgmf/'\n#APP_ROOT = '/'\n\nAPP_CSS = u'YELLOWSOFT'\nAPP_SYS = u'SGMF'\n\nAPP_IMG = \"http://www.yellowsoft.com.br/sgmf/static/img/logorel.jpg\"\nAPP_MANUAL = \"http://www.yellowsoft.com.br/sgmf/static/doc/pt-br/build/html/index.html\"\nAPP_ASSINATURA = \"http://www.yellowsoft.com.br/sgmf/static/img/assinatura.png\"\n\nAPP_VERSION = '1.0.0 [Jun/2015]'\nAPP_PROJECT = 'YELLOWSOFT/SG+Fidelidade'\nAPP_CLIENT = 'YELLOWSOFT'\n\nAPP_ALTERA_FILIAL = \"altera-filial\"\n\nCKEDITOR_MEDIA_PREFIX = APP_ROOT + \"static/js/ckeditor/\"\nCKEDITOR_UPLOAD_PREFIX = APP_ROOT + \"static/upload/\"\nCKEDITOR_UPLOAD_PATH = os.path.join(os.path.dirname(__file__), 'static/upload')\n\nHTTP_PROXY = '127.0.0.1:80'\nAUTH_LINK = 1\n\nEMAIL_POP = 'pop.yellowsoft.com.br'\nEMAIL_HOST = 'smtp.yellowsoft.com.br'\nEMAIL_HOST_USER = 'comercial@yellowsoft.com.br'\nEMAIL_HOST_PASSWORD = 'mlopes60'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\nEMAIL_HOST_ADDR = 'comercial@yellowsoft.com.br'\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n#TEMPLATE_CONTEXT_PROCESSORS = (\"django.contrib.auth.context_processors.auth\", \n# \"django.core.context_processors.debug\", \n# \"django.core.context_processors.i18n\", \n# \"django.core.context_processors.media\", \n# \"django.core.context_processors.static\", \n# 'django.core.context_processors.request',)\n# #'django.contrib.messages.context_processors.messages')\n\n#versao 1.4\nTEMPLATE_CONTEXT_PROCESSORS = (\n# \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n #\"django.contrib.messages.context_processors.messages\",\n)\n\nADMINS = (\n # ('Your Name', 'your_email@domain.com'),\n)\n\nMANAGERS = ADMINS\n\n# Config para uso do Django-pgpool\n\nDATABASES = {\n 'default': {\n #'ENGINE' : 'django.db.backends.postgresql_psycopg2', # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'ENGINE' : 'django_postgrespool', # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME' : 'yellowsoft17', # Or path to database file if using sqlite3.\n 'USER' : 'yellowsoft17', # Not used with sqlite3.\n 'PASSWORD' : 'ysoft17', # Not used with sqlite3.\n 'HOST' : 'pgsql.yellowsoft.com.br', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT' : '', # Set to empty string for default. Not used with sqlite3.\n 'OPTIONS': {\n 'autocommit': True,\n #'client_encoding': 'UTF8',\n #'default_transaction_isolation': 'read committed',\n #'timezone': 'America/Sao_Paulo',\n }\n }\n}\n\nDATABASE_POOL_ARGS = {\n 'max_overflow': 30,\n 'pool_size': 5,\n 'recycle': 30,\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Sao_Paulo'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'pt-BR.utf8'\n\nSITE_ID = 1\nSESSION_COOKIE_NAME = 'gmfid'\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\nUSE_L10N = True\n\n#USE_THOUSAND_SEPARATOR = True\n#NUMBER_GROUPING = 3\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = '/home/yellowsoft/apps_wsgi/sgmf/static/'\n\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = APP_ROOT+'static/'\n\nSTATIC_ROOT = '/home/yellowsoft/apps_wsgi/sgmf/static/'\n\nSTATIC_URL = '/static/'\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\nADMIN_MEDIA_PREFIX = '/media/'\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder'\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'l@ek)3l_c%i2nky!3&niay#p1fm1938$cg=0*ljkbrq)9*bbaw'\n\n#CSRF_FAILURE_VIEW\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n #'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.app_directories.Loader',\n #'django.template.loaders.eggs.load_template_source',\n\n)\n\nMIDDLEWARE_CLASSES = (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'sgmf.yls.middleware.AppMiddleware',\n #'django.middleware.locale.LocaleMiddleware',\n #'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\n#INTERNAL_IPS = ('127.0.0.1', '189.38.85.10','177.12.169.38','10.5.13.69','10.0.1.2') #,'189.5.243.82')\n\nDEBUG_TOOLBAR_PANELS = (\n 'debug_toolbar.panels.version.VersionDebugPanel',\n 'debug_toolbar.panels.timer.TimerDebugPanel',\n 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',\n 'debug_toolbar.panels.headers.HeaderDebugPanel',\n 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',\n 'debug_toolbar.panels.template.TemplateDebugPanel',\n 'debug_toolbar.panels.sql.SQLDebugPanel',\n 'debug_toolbar.panels.signals.SignalDebugPanel',\n 'debug_toolbar.panels.logger.LoggingPanel',\n)\n\nROOT_URLCONF = 'sgmf.urls'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(os.path.dirname(__file__),'templates'),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'sgmf.yls',\n 'sgmf.yls.base',\n 'sgmf.yls.ged',\n 'sgmf.yls.gen',\n 'sgmf.yls.lov',\n 'sgmf.gmf',\n 'sgmf.gmf.age',\n)\n\n# Modulo de perfil\nAUTH_PROFILE_MODULE = \"yls.perfil\"\nLOGIN_URL = APP_ROOT + 'login'\n","sub_path":"sgmf/king/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176560430","text":"'''\nCopyright: (c) 2017, Vladimir Vons, UA\nAuthor: Vladimir Vons \nCreated: 2018.06.26\nLicense: GNU, see LICENSE for more details\n'''\n\nimport machine\nimport time\nimport os\n\n\nclass TSerial():\n def __init__(self, aPort: int, aSpeed: int):\n # diasable terminal echo to uart\n os.dupterm(None, aPort)\n\n self.uart = machine.UART(aPort)\n self.uart.init(9600, timeout = 1000)\n\n def Send(self, aData, aRcvLen):\n try:\n self.uart.write(aData)\n time.sleep(0.2)\n Data = self.uart.read(aRcvLen)\n except:\n Data = None\n\n if not (Data and len(Data) == aRcvLen and self.IsCheckSum(Data)):\n Data = None\n return Data\n\n def IsCheckSum(self, aData):\n pass\n\n","sub_path":"src_arch/Inc/Dev/Serial.py","file_name":"Serial.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479659803","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport re\r\nfrom collections import defaultdict\r\nimport subprocess\r\n\r\n# FileName = '/home/utkinai2/Project1/lefties.csv'\r\nConstOffSet = 2000\r\nLineValuesORF = ()\r\n\r\nLeftCRISPRsWithORFFile = 'C:/Users/utkinai2/Desktop/Ipynb-scripts/OutputTest139.txt'\r\nFileName = 'C:/Users/utkinai2/Desktop/Ipynb-scripts/lefties.csv'\r\n# LeftCRISPRsWithORFFile = 'C:/Users/Ira/Desktop/Study/NIH/OutputTest134.txt'\r\n# FileName = 'C:/Users/Ira/Desktop/Study/NIH/lefties.csv'\r\ncount = 0\r\n\r\n\r\ndef findORFStartStop(File):\r\n ORFRanges = []\r\n Lines = [line[:-1].rstrip(' ') for line in open(File, \"r\")]\r\n for i in Lines:\r\n LineValuesORF = str(i)\r\n RangeMatch = re.search(r'range\\s([0-9]{1,})..([0-9]{1,})', LineValuesORF)\r\n if RangeMatch:\r\n range = str(RangeMatch.group(0))[6:]\r\n range = [int(i) for i in range.split(\"..\")]\r\n if range[0] < range[1]:\r\n ORFRanges.append([range[0] + SeqStart + ConstOffSet, range[1] + SeqStart + ConstOffSet]) # но для обратных рамок же иначе! там будет SeqEnd - range[0] (!) - ConstOFFSet, SeqEnd-range[1] - ConstOFF\r\n else:\r\n ORFRanges.append([SeqEnd - range[0] - ConstOffSet, SeqEnd - range[1] - ConstOffSet])\r\n return ORFRanges\r\n\r\ndef calculateCoverage(CrisprSeqStart, CrisprSeqEnd, ORFSeqStart, ORFSeqEnd):\r\n CoverageType = []\r\n if ORFSeqStart < ORFSeqEnd: # frames 1, 2, 3\r\n if (CrisprSeqStart > ORFSeqEnd) | (CrisprSeqEnd < ORFSeqStart) | (\r\n (CrisprSeqStart < ORFSeqStart) & (CrisprSeqEnd > ORFSeqEnd)):\r\n CoverageType.append([0, 'None'])\r\n if (CrisprSeqStart <= ORFSeqStart) & (CrisprSeqEnd <= ORFSeqEnd) & (CrisprSeqEnd >= ORFSeqStart):\r\n Coverage = round((CrisprSeqEnd - ORFSeqStart) / (CrisprSeqEnd - CrisprSeqStart), 1)\r\n print(Coverage)\r\n CoverageType.append([Coverage, 'right'])\r\n if (CrisprSeqStart > ORFSeqStart) & (CrisprSeqEnd > ORFSeqEnd) & (CrisprSeqStart < ORFSeqEnd):\r\n Coverage = round((ORFSeqEnd - CrisprSeqStart) / (CrisprSeqEnd - CrisprSeqStart), 1)\r\n CoverageType.append([Coverage, 'left'])\r\n if (CrisprSeqStart > ORFSeqStart) & (CrisprSeqEnd < ORFSeqEnd):\r\n Coverage = round((ORFSeqEnd - ORFSeqStart) / (CrisprSeqEnd - CrisprSeqStart), 1)\r\n CoverageType.append([Coverage, 'inside'])\r\n # if ORFSeqStart > ORFSeqEnd: # frames -1, -2, -3\r\n # if (CrisprSeqStart > ORFSeqStart) | (CrisprSeqEnd < ORFSeqEnd) | (\r\n # (CrisprSeqStart < ORFSeqEnd) & (CrisprSeqEnd > ORFSeqStart)):\r\n # CoverageType.append([0, 'None'])\r\n # if (CrisprSeqStart < ORFSeqEnd) & (CrisprSeqEnd < ORFSeqStart) & (CrisprSeqEnd > ORFSeqEnd):\r\n # Coverage = round((CrisprSeqEnd - ORFSeqEnd) / (CrisprSeqEnd - CrisprSeqStart), 1)\r\n # CoverageType.append([Coverage, 'right'])\r\n # if (CrisprSeqStart > ORFSeqEnd) & (CrisprSeqEnd > ORFSeqStart) & (CrisprSeqStart < ORFSeqStart):\r\n # Coverage = round((ORFSeqStart - CrisprSeqStart) / (CrisprSeqEnd - CrisprSeqStart), 1)\r\n # CoverageType.append([Coverage, 'left'])\r\n # if (CrisprSeqStart > ORFSeqEnd) & (CrisprSeqEnd < ORFSeqStart):\r\n # Coverage = round((ORFSeqStart - ORFSeqEnd) / (CrisprSeqEnd - CrisprSeqStart), 1)\r\n # CoverageType.append([Coverage, 'inside'])\r\n return CoverageType\r\n\r\nfor Line in open(FileName, \"r\"):\r\n count += 1\r\n if count < 2:\r\n continue # header is skipped now\r\n LineValues = Line[:-1].split(\",\") # the last one is perenos stroki, thats'why -1\r\n SeqStart = int(LineValues[3]) # Вручную вычтены 2000 (и прибавлены для следующей строки) при получении текстового файла с ОРФами для теста\r\n SeqEnd = int(LineValues[4])\r\n #print(SeqEnd , ' ', SeqStart)\r\n # но в текстовом файле (ниже) будет диапазон -2000 + SeqStart --- SeqEnd +2000 ! KEEP IN MIND\r\n\r\n subprocess.call(\"blastdbcmd -db /panfs/pan1/prokdata/db/all1603.nt\" + \" -entry \" + LineValues[2] + \" -range \" +\r\n SeqStart + \"-\" + SeqEnd, shell=True)\r\n\r\n\r\n# This part checks the length of ORF, then if length of array is shorter, it saves its ID and checks\r\n# whether the found ORF covers partially or fully the given array (through range parsing)\r\n\r\n ORFranges = findORFStartStop(LeftCRISPRsWithORFFile)\r\n print(ORFranges)\r\n CoverageTypesArrays = []\r\n for range in ORFranges:\r\n for elem in calculateCoverage(SeqStart, SeqEnd, range[0], range[1]):\r\n CoverageTypesArrays.append(elem)\r\n # print(SeqStart,' ', SeqEnd,' ', range[0],' ', range[1])\r\n # print(CoverageTypesArrays)\r\n\r\n BestCoverage = 0\r\n BestORFType = []\r\n for i in CoverageTypesArrays:\r\n if BestCoverage <= i[0]:\r\n BestORFType = i\r\n BestCoverage = i[0]\r\n print(BestORFType)\r\n\r\n\r\n #if LengthMatch:\r\n #length = LengthMatch.group(1)\r\n # SeqMatch = re.findall(r'(CP[0-9]{5,}.[0-9])', LineValuesORF)\r\n # for i in SeqMatch:\r\n #SeqBoundaries = [SeqStart + range[0] + ConstOffSet, SeqStart + range[1] + ConstOffSet]\r\n # if abs(SeqBoundaries[1] - SeqBoundaries[0]) > MaxORF:\r\n #print(dict(GoodRange))\r\n# if (range[1] > range[0])&(SeqStart < SeqStart - 2000 + range[0] < SeqEnd)&(SeqStart - 2000 + range[1] > SeqEnd):\r\n# # frames 1, 2, 3, partially covered, ORF covers only SeqEnd\r\n# GoodRange['right'].append(SeqBoundaries)\r\n# if (range[1] > range[0]) & (range[0] < 2000) & (SeqStart - 2000 + range[1] < SeqEnd):\r\n# # frames 1, 2, 3, partially covered, ORF covers only SeqStart\r\n# GoodRange['left'].append(SeqBoundaries)\r\n# if (range[1] > range[0])&(range[0] < 2000)&(SeqStart - 2000 + range[1] > SeqEnd):\r\n# # frames 1, 2, 3, fully covered\r\n# GoodRange['inside'].append(SeqBoundaries)\r\n# if (range[1] < range[0])&(SeqStart < SeqStart - 2000 + range[1] < SeqEnd)&(SeqStart - 2000 + range[0] > SeqEnd):\r\n# # frames -1, -2, -3, partially covered, ORF starts between SeqStart and SeqEnd\r\n# GoodRange['right'].append(SeqBoundaries)\r\n# if (range[1] < range[0]) & (range[1] < 2000) & (SeqStart - 2000 + range[0] < SeqEnd):\r\n# # frames -1, -2, -3, partially covered, ORF ends between SeqStart and SeqEnd\r\n# GoodRange['left'].append(SeqBoundaries)\r\n# if (range[1] < range[0])&(range[1] < 2000)&(SeqStart - 2000 + range[0] > SeqEnd):\r\n# # frames -1, -2, -3, fully covered\r\n# GoodRange['inside'].append(SeqBoundaries)\r\n# print(list(GoodORFs.items())[-1][1])\r\n# BestBoundaries = list(GoodORFs.items())[-1][1]\r\n\r\n\r\n# ListOfMappedORFs = str()\r\n# ListOfGoodORFs = str()\r\n# for i in list(GoodORFs.values()):\r\n# ListOfGoodORFs += str(i)+ \",\"\r\n# print(ListOfGoodORFs)\r\n# for i in GoodRange.values():\r\n# for j in i:\r\n# ListOfMappedORFs += str(j) + \",\"\r\n# print(ListOfMappedORFs)\r\n\r\n# subprocess.call(\r\n# \"blastdbcmd -db \" + Database + \" -entry_batch \" + ClusterGIListFileName + \" > \" + ClusterFASTA, shell=True)\r\n#","sub_path":"ORFfinder_from_txt_last.py","file_name":"ORFfinder_from_txt_last.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"290350354","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 26 15:30:17 2018\n\n@author: kennedy\n\"\"\"\n#import the required library from skleearn and matplotlib\nfrom sklearn.datasets import load_iris\nfrom matplotlib import pyplot as plt\nimport sys\n\n#load the iris dataset\ndef main():\n irisData = load_iris()\n X = irisData.data\n Y = irisData.target\n \n x=0\n y=1\n colors = [\"red\", \"green\", \"blue\"]\n \n \n for i in range(3):\n plt.scatter(X[Y==i][:, x], X[Y==i][:, y], c = colors[i], label = irisData.target_names[i])\n plt.legend()#set label for the features\n \n plt.xlabel(irisData.feature_names[x])\n plt.ylabel(irisData.feature_names[y])\n plt.title(\"Iris Data - size of the sepals only\")\n if len(sys.argv) > 1:\n plt.savefig(sys.argv[1])\n else:\n plt.show()\n \nif __name__ == '__main__':\n main()","sub_path":"pract1prog1.py","file_name":"pract1prog1.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"537348948","text":"# -*- coding: utf-8 -*-\n\nfrom sys import exit\nfrom random import randint,shuffle\nfrom question_sys import QuestionSys\nfrom question import Question\nfrom person import User,Person,UserState\n\n_question_admin = QuestionSys()\n\nchoice_head = \"ABCDEFGH\"\nchoice_head_s = \"abcdefgh\"\n\nglobal player\nplayer = User('')\n\nclass Scene(object):\n def __init__(self,name=''):\n #scene name\n self.name = name\n #question in this scene\n self.question_dict = _question_admin.offer_question()\n #question id\n self.question_id = self.question_dict.get('ID')\n #question\n self.question = self.question_dict.get('question').get_question()\n #choices\n self.choices = self.question_dict.get('question').get_choices()\n\n self.wrong_time = 0\n\n self.attack_value = 10\n\n def enter(self):\n pass\n\n def enter_room(self,room_name):\n print('*'*10,room_name,'*'*10)\n print(\"Your are enter in the %s\"%room_name)\n\n def print_question(self):\n print('请问:%s'%self.question)\n\n def print_choices(self):\n count = len(self.choices) - 1\n if self.choices and count>=3:\n for i,choice in enumerate(self.choices):\n print(\"%s: %s\"%(choice_head[i],choice))\n\n def print_question_and_choices(self):\n self.print_question()\n self.print_choices()\n\n def judge(self,choice):\n index = -1\n if choice in choice_head:\n index = choice_head.find(choice)\n elif choice in choice_head_s:\n index = choice_head_s.find(choice)\n\n if index == -1:\n return False\n else:\n user_answer = self.choices[index]\n if _question_admin.is_right(user_answer,self.question_id):\n return True\n else:\n return False\n\n def get_self_name(self):\n return self.name\n\n def update_question(self):\n self.question_dict = _question_admin.offer_question()\n #question id\n self.question_id = self.question_dict.get('ID')\n #question\n self.question = self.question_dict.get('question').get_question()\n #choices\n self.choices = self.question_dict.get('question').get_choices()\n\n def gamp_loop(self):\n pass\n\n\nclass Death(Scene):\n dead_word = [\"你这沙雕,你看你四不四被自己蠢哭了!\",\n \"大侠,请重新再来!\",\n \"如果老天再给你一次机会,你会选择这样狗带吗?\",\n \"被米库米库掉了,这样子的你开心吗?\",\n \"智商下线,请带上你的脑子!~\"]\n def enter(self):\n print(Death.dead_word[randint(0,len(self.dead_word)-1)])\n exit(1)\n\nclass Success(Scene):\n\n def enter(self):\n print(\"%s 大侠,你真棒!\"%player.name)\n exit(0)\n\n\n\nclass CentralCorridor(Scene):\n\n def __init__(self,name='central_corridor'):\n super(CentralCorridor,self).__init__(name)\n\n def enter(self):\n super(CentralCorridor,self).enter_room('central_corridor')\n print(\"一个白发苍苍的老人来到你面前.\")\n print(\"老人:年轻人我想问你几个问题.\\n你回答出来我才能放你走\")\n return self.gamp_loop()\n\n def gamp_loop(self):\n while player.is_alive():\n self.print_question_and_choices()\n user_answer = input(\"你的选择是>>> \")\n is_right = self.judge(user_answer)\n if not is_right:\n self.wrong_time += 1\n player.take_away_blood(self.attack_value)\n print(\"答错啦,注意你的血量!\")\n player.print_blood()\n else:\n #一次通过,active\n if self.wrong_time==0:\n player.get_active()\n player.print_blood()\n return 'laser_weapon_armory'\n #2次,poison\n if self.wrong_time == 2:\n player.get_poison()\n # 4次.weak\n elif self.wrong_time >= 3:\n player.get_weak()\n # update question\n self.update_question()\n\n return 'death'\n\n\nclass LaserWeaponArmory(Scene):\n\n def __init__(self,name='laser_weapon_armory'):\n super(LaserWeaponArmory,self).__init__(name)\n\n def enter(self):\n self.attack_value = 15\n super(LaserWeaponArmory,self).enter_room('laser_weapon_armory')\n print(\"一个不停抖动的电视机来到你面前.\")\n print(\"电视机:年轻人我想问你几个问题.\")\n return self.gamp_loop()\n\n def gamp_loop(self):\n while player.is_alive():\n self.print_question_and_choices()\n user_answer = input(\"你的选择是>>> \")\n is_right = self.judge(user_answer)\n if not is_right:\n self.wrong_time += 1\n player.take_away_blood(self.attack_value)\n print(\"答错啦,注意你的血量!\")\n player.print_blood()\n else:\n #一次通过,active\n if self.wrong_time==0:\n player.get_active()\n player.print_blood()\n return 'the_bridge'\n #2次,poison\n if self.wrong_time == 2:\n player.get_poison()\n # 4次.weak\n elif self.wrong_time >= 3:\n player.get_weak()\n # update question\n self.update_question()\n return 'death'\n\nclass TheBridge(Scene):\n def __init__(self,name='the_bridge'):\n super(TheBridge,self).__init__(name)\n\n def enter(self):\n super(TheBridge,self).enter_room('the_bridge')\n print(\"一个带着萌熊眼罩的少女来到你面前.\")\n print(\"少女:年轻人我想问你个问题.\")\n return self.gamp_loop()\n\n def gamp_loop(self):\n while player.is_alive():\n self.print_question_and_choices()\n user_answer = input(\"你的选择是>>> \")\n is_right = self.judge(user_answer)\n if not is_right:\n self.wrong_time += 1\n player.take_away_blood(self.attack_value)\n print(\"答错啦,注意你的血量!\")\n player.print_blood()\n else:\n #一次通过,active\n if self.wrong_time==0:\n player.get_active()\n player.print_blood()\n return 'escape_pod'\n #2次,poison\n if self.wrong_time == 2:\n player.get_poison()\n # 4次.weak\n elif self.wrong_time >= 3:\n player.get_weak()\n # update question\n self.update_question()\n return 'death'\n\nclass EscapePod(Scene):\n\n def __init__(self,name='escaped_pod'):\n super(EscapePod,self).__init__(name)\n\n\n def enter(self):\n print(\"一个衣冠楚楚的的绅士来到你面前.\")\n print(\"绅士:年轻人我想问你个问题.\")\n return self.gamp_loop()\n\n def gamp_loop(self):\n while player.is_alive():\n self.print_question_and_choices()\n user_answer = input(\"你的选择是>>> \")\n is_right = self.judge(user_answer)\n if not is_right:\n self.wrong_time += 1\n player.take_away_blood(self.attack_value)\n print(\"答错啦,注意你的血量!\")\n player.print_blood()\n else:\n #一次通过,active\n if self.wrong_time==0:\n player.get_active()\n player.print_blood()\n return 'success'\n #2次,poison\n if self.wrong_time == 2:\n player.get_poison()\n # 4次.weak\n elif self.wrong_time >= 3:\n player.get_weak()\n # update question\n self.update_question()\n return 'death'\n\nclass Map(object):\n all_scenes = {\n 'success':Success(),\n 'escape_pod':EscapePod(),\n 'the_bridge':TheBridge(),\n 'laser_weapon_armory':LaserWeaponArmory(),\n 'central_corridor':CentralCorridor(),\n 'death':Death()\n }\n\n def __init__(self,start_scene):\n self.start_scene = start_scene\n\n def next_scene(self,scene_name):\n val = Map.all_scenes.get(scene_name)\n return val\n\n def opening_scene(self):\n return self.next_scene(self.start_scene)\n\n\nclass Engine(object):\n\n def __init__(self,scene_map):\n self.scene_map = scene_map\n\n def play(self):\n player_name = input(\"请输入你的大名:\")\n player = User(player_name)\n cur_scene = self.scene_map.opening_scene()\n last_scene = self.scene_map.next_scene('success')\n\n while cur_scene!=last_scene:\n next_scene_name = cur_scene.enter()\n cur_scene = self.scene_map.next_scene(next_scene_name)\n cur_scene.enter()\n\na_map = Map('central_corridor')\na_game = Engine(a_map)\na_game.play()","sub_path":"Src/ex45_game/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":9049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"463060004","text":"import numpy as np\nimport cv2\n\ndef canny(img, thresh=(10,50), ksize=5):\n return cv2.Canny(img, thresh[0], thresh[1], ksize)\n\ndef abs_sobel_thresh(img, orient='x', thresh=(0,255), ksize=3):\n # check for invalid input\n if len(img) == 0:\n print('no image found')\n return\n if orient not in ['x', 'y']:\n print('invalid orient')\n return\n\n # convert img to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # apply sobel operator\n x = 1 if orient == 'x' else 0\n y = 1 if orient == 'y' else 0\n sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, x, y, ksize))\n\n scale_sobel = np.uint8(255*sobel/np.max(sobel))\n binary_output = np.zeros_like(scale_sobel)\n binary_output[(scale_sobel>=thresh[0])&(scale_sobel<=thresh[1])] = 1\n\n return binary_output\n\ndef mag_thresh(img, thresh=(0,255), ksize=3):\n # convert img to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize)\n mag = np.sqrt(sobelx**2 + sobely**2)\n scale_sobel = np.uint8(mag*255/np.max(mag))\n binary_output = np.zeros_like(scale_sobel)\n binary_output[(scale_sobel>=thresh[0])&(scale_sobel<=thresh[1])] = 1\n\n return binary_output\n\ndef dir_thresh(img, thresh=(0, np.pi/2), ksize=3):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize))\n sobely = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize))\n dir = np.arctan2(sobely, sobelx)\n binary_output = np.zeros_like(dir)\n binary_output[(dir>thresh[0])&(dir=thresh[0])&(s<=thresh[1])]=1\n\n return binary_output\n\ndef hsv_thresh(img, thresh=(100,255)):\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n v = hsv[:,:,2]\n binary_output=np.zeros_like(v)\n binary_output[(v>=thresh[0])&(v<=thresh[1])]=1\n\n return binary_output\n\ndef rgb_thresh(img):\n r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]\n binary_output, yellow, white = np.zeros_like(r), np.zeros_like(r), np.zeros_like(r)\n yellow[(r>=150)&(g>=130)&(b<85)] = 1\n white[(r>=220)&(g>=220)&(b>220)] = 1\n binary_output[(yellow==1)|(white==1)] = 1\n\n return binary_output\n","sub_path":"color_gradient_threshold.py","file_name":"color_gradient_threshold.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"540982901","text":"# Copyright 2020 SURF.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import annotations\n\nimport random\nfrom typing import Dict, List, NamedTuple, Tuple, Type\nfrom uuid import UUID\n\nimport structlog\nfrom more_itertools import flatten\nfrom sqlalchemy import and_, func, or_, orm\nfrom sqlalchemy.orm import aliased, joinedload\nfrom statemachine.exceptions import TransitionNotAllowed\nfrom structlog.stdlib import BoundLogger\n\nfrom supa import settings\nfrom supa.connection import requester\nfrom supa.connection.error import (\n CapacityUnavailable,\n GenericInternalError,\n InvalidLabelFormat,\n InvalidTransition,\n NoServiceplanePathFound,\n ReservationNonExistent,\n StpUnavailable,\n UnknownStp,\n Variable,\n)\nfrom supa.connection.fsm import LifecycleStateMachine, ReservationStateMachine\nfrom supa.db.model import Path, PathTrace, Port, Reservation, Segment\nfrom supa.grpc_nsi.connection_requester_pb2 import (\n ReserveCommitConfirmedRequest,\n ReserveCommitFailedRequest,\n ReserveConfirmedRequest,\n ReserveFailedRequest,\n)\nfrom supa.job.shared import Job, NsiException\nfrom supa.util.bandwidth import format_bandwidth\nfrom supa.util.converter import to_confirm_criteria, to_connection_states, to_header, to_service_exception\nfrom supa.util.vlan import VlanRanges\n\nlogger = structlog.get_logger(__name__)\n\n\nclass PortResources(NamedTuple):\n \"\"\"Capture port resources, as returned by an SQLAlchemy query, so that they can be referred to by name.\"\"\"\n\n bandwidth: int\n vlans: VlanRanges\n\n\nclass ReserveJob(Job):\n \"\"\"Handle reservation requests.\"\"\"\n\n connection_id: UUID\n log: BoundLogger\n\n def __init__(self, connection_id: UUID):\n \"\"\"Initialize the ReserveJob.\n\n Args:\n connection_id: The connection_id of the reservation request\n \"\"\"\n self.log = logger.bind(job=self.__class__.__name__)\n self.connection_id = connection_id\n\n def _port_resources_in_use(self, session: orm.Session) -> Dict[str, PortResources]:\n \"\"\"Calculate port resources in use for active reservations that overlap with ours.\n\n Active reservations being those that:\n\n - are currently being held\n - have been committed and not yet been terminated.\n\n Overlap as in: their start times and end times overlap with ours.\n\n The bandwidth in use is calculated per port.\n Eg, if a port is used in two active reservations,\n (one reservation for a connection with a bandwidth of 100 Mbps\n and another with a bandwidth of 400 Mbps)\n the bandwidth in use for the port will be:\n 100 + 400 = 500 Mbps.\n\n Similarly for the VLANs in use.\n Given the same port used in two active reservations\n (one reservation where the port has a VLAN of 100\n and another one where the port has a VLAN of 105),\n the VLANs in use for the port will be:\n VlanRanges([100, 105])\n\n Args:\n session: A SQLAlchemy session to construct and run the DB query\n\n Returns:\n A dict mapping port (names) to their port resources.\n\n \"\"\"\n # To calculate the active overlapping reservation we need to perform a self-join.\n # One part of the join is for our (current) reservation.\n # The other part is for joining the overlapping ones with our (current) reservation.\n CurrentReservation = aliased(Reservation, name=\"cr\")\n overlap_active = (\n # The other part\n session.query(Reservation)\n .join(\n (\n CurrentReservation,\n # Do they overlap?\n and_(\n CurrentReservation.start_time < Reservation.end_time,\n CurrentReservation.end_time > Reservation.start_time,\n ),\n )\n )\n .filter(\n # Only select active reservations\n or_(\n and_(\n Reservation.reservation_state == ReservationStateMachine.ReserveStart.name,\n Reservation.provisioning_state.isnot(None),\n Reservation.lifecycle_state == LifecycleStateMachine.Created.name,\n ),\n Reservation.reservation_state == ReservationStateMachine.ReserveHeld.name,\n )\n )\n # And only those that overlap with our reservation.\n .filter(CurrentReservation.connection_id == self.connection_id)\n ).subquery()\n OverlappingActiveReservation = aliased(Reservation, overlap_active, name=\"oar\")\n\n # To map ports to resources (bandwidth and vlan) in use\n # we need to unpivot the two pair of port columns from the reservations table into separate rows.\n # Eg, from:\n #\n # row 1: connection_id, ..., src_port, src_selected_vlan, dst_port, .dst_selected_vlan ..\n #\n # to:\n #\n # row 1: connection_id, port, vlan <-- former src_port, src_selected_vlan\n # row 2: connection_id, port, vlan <-- former dst_port, dst_selected_vlan\n src_port = session.query(\n Reservation.connection_id.label(\"connection_id\"),\n Reservation.src_port.label(\"port\"),\n Reservation.src_selected_vlan.label(\"vlan\"),\n )\n dst_port = session.query(\n Reservation.connection_id,\n Reservation.dst_port.label(\"port\"),\n Reservation.dst_selected_vlan.label(\"vlan\"),\n )\n ports = src_port.union(dst_port).subquery()\n\n # With the 'hard' work done for us in two subqueries,\n # calculating the port resources (bandwidth, VLANs) in use is now relatively straightforward.\n port_resources_in_use = (\n session.query(\n ports.c.port,\n func.sum(OverlappingActiveReservation.bandwidth).label(\"bandwidth\"),\n func.group_concat(ports.c.vlan, \",\").label(\"vlans\"), # yes, plural!\n )\n .select_from(OverlappingActiveReservation)\n .join(ports, OverlappingActiveReservation.connection_id == ports.c.connection_id)\n .filter(\n ports.c.port.in_(\n (\n OverlappingActiveReservation.src_port,\n OverlappingActiveReservation.dst_port,\n )\n )\n )\n .group_by(ports.c.port)\n .all()\n )\n\n return {\n rec.port: PortResources(bandwidth=rec.bandwidth, vlans=VlanRanges(rec.vlans))\n for rec in port_resources_in_use\n }\n\n def _send_reserve_failed(self, session: orm.Session, nsi_exc: NsiException) -> None:\n # the reservation is still in the session, hence no actual query will be performed\n reservation: Reservation = session.query(Reservation).get(self.connection_id)\n pb_rf_req = ReserveFailedRequest()\n\n pb_rf_req.header.CopyFrom(to_header(reservation, add_path_segment=False))\n pb_rf_req.connection_id = str(reservation.connection_id)\n pb_rf_req.connection_states.CopyFrom(to_connection_states(reservation, data_plane_active=False))\n pb_rf_req.service_exception.CopyFrom(to_service_exception(nsi_exc, reservation.connection_id))\n\n self.log.info(\"Sending message.\", method=\"ReserveFailed\", request_message=pb_rf_req)\n stub = requester.get_stub()\n stub.ReserveFailed(pb_rf_req)\n\n def _send_reserve_confirmed(self, session: orm.Session) -> None:\n # the reservation is still in the session, hence no actual query will be performed\n reservation: Reservation = session.query(Reservation).get(self.connection_id)\n\n pb_rc_req = ReserveConfirmedRequest()\n # Confirming the reservation means we have a Path. hence we should add it to the Header.\n pb_rc_req.header.CopyFrom(to_header(reservation, add_path_segment=True))\n pb_rc_req.connection_id = str(reservation.connection_id)\n pb_rc_req.global_reservation_id = reservation.global_reservation_id\n # We skip setting the description, cause we have nothing specific to set it to (suggestions?)\n pb_rc_req.criteria.CopyFrom(to_confirm_criteria(reservation))\n\n self.log.info(\"Sending message.\", method=\"ReserveConfirmed\", request_message=pb_rc_req)\n stub = requester.get_stub()\n stub.ReserveConfirmed(pb_rc_req)\n\n def __call__(self) -> None:\n \"\"\"Check reservation request.\n\n If the reservation can be made\n a ReserveConfirmed message will be send to the NSA/AG.\n If not, a ReserveFailed message will be send instead.\n \"\"\"\n self.log.info(\"Checking reservation request.\")\n from supa.db.session import db_session\n\n with db_session() as session:\n reservation: Reservation = (\n session.query(Reservation)\n .options(\n joinedload(Reservation.parameters),\n joinedload(Reservation.path_trace)\n .joinedload(PathTrace.paths)\n .joinedload(Path.segments)\n .joinedload(Segment.stps),\n )\n .get(self.connection_id)\n )\n rsm = ReservationStateMachine(reservation, state_field=\"reservation_state\")\n port_resources_in_use = self._port_resources_in_use(session)\n\n try:\n if rsm.current_state != ReservationStateMachine.ReserveChecking:\n raise NsiException(\n InvalidTransition,\n rsm.current_state.name,\n {Variable.RESERVATION_STATE: rsm.current_state.value},\n )\n if reservation.src_port == reservation.dst_port:\n raise NsiException(\n # Not sure if this is the correct error to use.\n # As its descriptive text refers to path computation\n # it suggests its an error typically returned by an aggregator.\n # On the other hand it is the only error related to a path/connection as a whole\n # and that is what is at issue here.\n NoServiceplanePathFound,\n \"source and destination ports are the same\",\n {\n Variable.PROVIDER_NSA: settings.nsa_id,\n Variable.SOURCE_STP: str(reservation.src_stp()),\n Variable.DEST_STP: str(reservation.dst_stp()),\n },\n )\n for target, var in ((\"src\", Variable.SOURCE_STP), (\"dst\", Variable.DEST_STP)):\n # Dynamic attribute lookups as we want to use the same code for\n # both src and dst ports/stps\n res_port = getattr(reservation, f\"{target}_port\")\n stp = str(getattr(reservation, f\"{target}_stp\")()) # <-- mind the func call\n domain = getattr(reservation, f\"{target}_domain\")\n network_type = getattr(reservation, f\"{target}_network_type\")\n requested_vlans = VlanRanges(getattr(reservation, f\"{target}_vlans\"))\n port = session.query(Port).filter(Port.name == res_port).one_or_none()\n if (\n port is None\n or not port.enabled\n or domain != settings.domain # only process requests for our domain\n or network_type != settings.network_type # only process requests for our network\n ):\n raise NsiException(UnknownStp, stp, {var: stp})\n if not requested_vlans:\n raise NsiException(InvalidLabelFormat, \"missing VLANs label on STP\", {var: stp})\n if port.name in port_resources_in_use:\n bandwidth_available = port.bandwidth - port_resources_in_use[port.name].bandwidth\n available_vlans = VlanRanges(port.vlans) - port_resources_in_use[port.name].vlans\n else:\n bandwidth_available = port.bandwidth\n available_vlans = VlanRanges(port.vlans)\n if bandwidth_available < reservation.bandwidth:\n raise NsiException(\n CapacityUnavailable,\n f\"requested: {format_bandwidth(reservation.bandwidth)}, \"\n f\"available: {format_bandwidth(bandwidth_available)}\",\n {\n Variable.CAPACITY: str(reservation.bandwidth),\n var: stp,\n },\n )\n if not available_vlans:\n raise NsiException(StpUnavailable, \"all VLANs in use\", {var: stp})\n candidate_vlans = requested_vlans & available_vlans\n if not candidate_vlans:\n raise NsiException(\n StpUnavailable,\n f\"no matching VLAN found (requested: {requested_vlans!s}, available: {available_vlans!s}\",\n {var: stp},\n )\n selected_vlan = random.choice(list(candidate_vlans))\n setattr(reservation, f\"{target}_selected_vlan\", selected_vlan)\n except NsiException as nsi_exc:\n self.log.info(\"Reservation failed.\", reason=nsi_exc.text)\n rsm.reserve_failed()\n self._send_reserve_failed(session, nsi_exc)\n except Exception as exc:\n self.log.exception(\"Unexpected error occurred.\", reason=str(exc))\n rsm.reserve_failed()\n nsi_exc = NsiException(GenericInternalError, str(exc)) # type: ignore[misc]\n self._send_reserve_failed(session, nsi_exc) # type: ignore[misc]\n else:\n rsm.reserve_confirmed()\n self._send_reserve_confirmed(session)\n\n @classmethod\n def recover(cls: Type[ReserveJob]) -> List[Job]:\n \"\"\"Recover ReserveJob's that did not get to run before SuPA was terminated.\n\n Returns:\n List of ReserveJob's that still need to be run.\n \"\"\"\n from supa.db.session import db_session\n\n with db_session() as session:\n connection_ids: List[UUID] = list(\n flatten(\n session.query(Reservation.connection_id)\n .filter(Reservation.reservation_state == ReservationStateMachine.ReserveChecking.value)\n .all()\n )\n )\n return [ReserveJob(cid) for cid in connection_ids]\n\n def trigger(self) -> Tuple:\n \"\"\"Return APScheduler trigger information for scheduling recovered ReserveJob's.\"\"\"\n return () # Run immediately after recovery\n\n\nclass ReserveCommitJob(Job):\n \"\"\"Handle reservation commit requests.\"\"\"\n\n connection_id: UUID\n log: BoundLogger\n\n def __init__(self, connection_id: UUID):\n \"\"\"Initialize the ReserveCommitJob.\n\n Args:\n connection_id: The connection_id of the reservation commit request\n \"\"\"\n self.log = logger.bind(job=self.__class__.__name__)\n self.connection_id = connection_id\n\n def _send_reserve_commit_failed(self, session: orm.Session, nsi_exc: NsiException) -> None:\n # the reservation is still in the session, hence no actual query will be performed\n reservation: Reservation = session.query(Reservation).get(self.connection_id)\n pb_rcf_req = ReserveCommitFailedRequest()\n\n pb_rcf_req.header.CopyFrom(to_header(reservation, add_path_segment=False))\n pb_rcf_req.connection_id = str(reservation.connection_id)\n pb_rcf_req.connection_states.CopyFrom(to_connection_states(reservation, data_plane_active=False))\n pb_rcf_req.service_exception.CopyFrom(to_service_exception(nsi_exc, reservation.connection_id))\n\n self.log.info(\"Sending message.\", method=\"ReserveCommitFailed\", request_message=pb_rcf_req)\n stub = requester.get_stub()\n stub.ReserveCommitFailed(pb_rcf_req)\n\n def _send_reserve_commit_confirmed(self, session: orm.Session) -> None:\n # the reservation is still in the session, hence no actual query will be performed\n reservation: Reservation = session.query(Reservation).get(self.connection_id)\n pb_rcc_req = ReserveCommitConfirmedRequest()\n\n pb_rcc_req.header.CopyFrom(to_header(reservation, add_path_segment=True)) # Yes, add our segment!\n pb_rcc_req.connection_id = str(reservation.connection_id)\n\n self.log.info(\"Sending message.\", method=\"ReserveCommitConfirmed\", request_message=pb_rcc_req)\n stub = requester.get_stub()\n stub.ReserveCommitFailed(pb_rcc_req)\n\n def __call__(self) -> None:\n \"\"\"Commit Reservation.\"\"\"\n self.log.info(\"Committing reservation\")\n\n from supa.db.session import db_session\n\n with db_session() as session:\n reservation = (\n session.query(Reservation).filter(Reservation.connection_id == self.connection_id).one_or_none()\n )\n if reservation is None:\n raise NsiException(\n ReservationNonExistent, str(self.connection_id), {Variable.CONNECTION_ID: str(self.connection_id)}\n )\n try:\n rsm = ReservationStateMachine(reservation, state_field=\"reservation_state\")\n rsm.reserve_commit_request()\n except NsiException as nsi_exc:\n self.log.info(\"Reserve commit failed.\", reason=nsi_exc.text)\n rsm.reserve_commit_failed()\n self._send_reserve_commit_failed(session, nsi_exc)\n except TransitionNotAllowed as tna:\n self.log.info(\"Invalid state transition\", reason=str(tna))\n rsm.reserve_commit_failed()\n nsi_exc = NsiException(\n InvalidTransition,\n str(tna),\n {Variable.RESERVATION_STATE: reservation.reservation_state},\n ) # type: ignore[misc]\n self._send_reserve_commit_failed(session, nsi_exc) # type: ignore[misc]\n except Exception as exc:\n self.log.exception(\"Unexpected error occurred.\", reason=str(exc))\n rsm.reserve_commit_failed()\n nsi_exc = NsiException(GenericInternalError, str(exc)) # type: ignore[misc]\n self._send_reserve_commit_failed(session, nsi_exc) # type: ignore[misc]\n else:\n self._send_reserve_commit_confirmed(session)\n\n @classmethod\n def recover(cls) -> List[Job]:\n \"\"\"Recover ReserveCommitJobs.\"\"\"\n # Nothing to recover -> no-op\n return super().recover()\n\n def trigger(self) -> Tuple:\n \"\"\"Trigger for recovered ReserveCommitJobs.\"\"\"\n return super().trigger()\n\n\nclass ReserveAbortJob(Job):\n \"\"\"Handle reservation obort requests.\"\"\"\n\n connection_id: UUID\n log: BoundLogger\n\n def __init__(self, connection_id: UUID):\n \"\"\"Initialize the ReserveAbortJob.\n\n Args:\n connection_id: The connection_id of the reservation abort request\n \"\"\"\n self.log = logger.bind(job=self.__class__.__name__)\n self.connection_id = connection_id\n\n def __call__(self) -> None:\n \"\"\"Abort Reservation.\"\"\"\n # FIXME Implement!!!\n raise NotImplementedError(\"Implementation is basically the same as `ReserveCommitJob.__call__`.\")\n\n @classmethod\n def recover(cls) -> List[Job]:\n \"\"\"Recover ReserveAbortJobs.\"\"\"\n # Nothing to recover -> no-op\n return super().recover()\n\n def trigger(self) -> Tuple:\n \"\"\"Trigger for ReserveAbortJobs.\"\"\"\n return super().trigger()\n","sub_path":"src/supa/job/reserve.py","file_name":"reserve.py","file_ext":"py","file_size_in_byte":20667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166739114","text":"'''1015_client.py\r\n1. 클라이언트 소켓을 생성하여 서버로 접속(서버 주소로, 포트번호로)\r\n2. 멀티쓰레드를 사용하여 두가지 동작을 돌림.\r\n 1) work2는 클라이언트 소켓은\r\n - sock.send() : input으로 입력받은 데이터를 자신 클라이언트 소켓으로 올려.\r\n - 이 데이터는 서버로 전달된다.\r\n 2) work1\r\n - sock.recv() : 연결된 서버가 클라이언트 소켓을 이용해 s.send() 했으므로 바로 클라이언트는 recv()하면 됨.\r\n \r\n'''\r\nimport threading\r\nimport socket\r\nimport time\r\n\r\nisRunning = True\r\n\r\n''' 브로드 캐스트된 데이터 받기 쓰레드'''\r\n# 서버가 접속한 클라이언트에게 그들의 소켓에 send() 부쳤으므로\r\n# 클라이언트 소켓은 바로 자신의 소켓을 recv()해도 데이터를 받을 수있다.\r\ndef work1(sock):\r\n global isRunning\r\n while isRunning:\r\n try:\r\n data = sock.recv(1024) # 서버의 소켓은 데이터를 추출해와.\r\n if data == None or len(data) <= 0:\r\n # 데이터가 None이거나 연결이끊기면\r\n isRunning = False\r\n return\r\n except socket.error:\r\n isRunning = False\r\n return\r\n sdata = data[4:].decode()\r\n print(sdata)\r\n\r\n''' 데이터 쓰기 쓰레드'''\r\n# 클라이언트의 데이터를 입력받아 클라이언트 소켓에 올려.\r\ndef work2(sock):\r\n global isRunning\r\n while isRunning:\r\n s = input(\">> \")\r\n if s == \"/quit\":\r\n isRunning = False\r\n return\r\n data = s.encode()\r\n packet = (\"%04d\"%len(data)).encode()+data #인코딩 된 데이터의 길이를 헤더로 함.\r\n sock.send(packet)\r\n # 입력받으면 내용인코딩하여 자신(클라이언트) socket에다 올려줌.\r\n # 그러면 서버가 클라이언트가 보낸 소켓으로 sock.recv()하면 이를 디코딩하여 등록 및 mesg로 만들어\r\n # mesg를 만들어 다시 인코딩하여 sock.send()를 하여 소켓을 보내주면\r\n # 접속한 클라이언트들에게 서버의 socket으로 sock.recv\r\n\r\n# 클라이언트 소켓을 생성한다.\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ntry:\r\n sock.connect((\"127.0.0.1\", 8888)) # 서버 주소에 + 이 포트번호로 접근한다.\r\n # server.py에서 client, addr = sock.accept() : addr는 주소 및 포트번호를 가짐.\r\nexcept socket.error:\r\n print(f\"Network error {socket.error}\")\r\n quit()\r\n\r\nprint(\"Connection is established\")\r\nthread1 = threading.Thread(target=work1, args=(sock,)) # work1에 sock을 넣어준다.\r\nthread1.start()\r\nthread2 = threading.Thread(target=work2, args=(sock,))\r\nthread2.start()\r\n","sub_path":"Metaverse/1025_metaverse/1025_client.py","file_name":"1025_client.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"441542460","text":"import xml.etree.ElementTree as ET\nimport pickle\nimport os\nimport glob\nfrom os import listdir, getcwd\nfrom os.path import join\n\nsets=[('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')]\n\nclasses = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\nclasses2 = [\"buddha\"]\n\ndef convert(size, box):\n dw = 1./size[0]\n dh = 1./size[1]\n x = (box[0] + box[1])/2.0\n y = (box[2] + box[3])/2.0\n w = box[1] - box[0]\n h = box[3] - box[2]\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n return (x,y,w,h)\n\ndef convert_annotation(img_xml, img_txt):\n in_file = open(img_xml)\n out_file = open(img_txt, 'w')\n tree=ET.parse(in_file)\n root = tree.getroot()\n size = root.find('size')\n w = int(size.find('width').text)\n h = int(size.find('height').text)\n\n for obj in root.iter('object'):\n difficult = obj.find('difficult').text\n cls = obj.find('name').text\n if cls not in classes2 or int(difficult) == 1:\n continue\n cls_id = classes2.index(cls)\n xmlbox = obj.find('bndbox')\n b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))\n bb = convert((w,h), b)\n out_file.write(str(cls_id) + \" \" + \" \".join([str(a) for a in bb]) + '\\n')\n\nwd = getcwd()\nprint(wd)\n\ndef extract_filenames(folder_path):\n allfiles = glob.glob(os.path.join(folder_path, \"*.xml\"))\n return allfiles\n\nfolder_path = 'buddha_label'\nimagexml_list = extract_filenames(folder_path)\n\n\nif not os.path.exists(folder_path+'Text'):\n os.makedirs(folder_path+'Text')\n#list_file = open('%s_%s.txt'%(year, image_set), 'w')\nftrain = open('train.txt','w')\nftest = open('test.txt','w')\n\ni = 0\nfor img_xml in imagexml_list:\n i+=1\n if i<=70:\n ftrain.write('data\\\\obj\\\\' + img_xml.replace('xml','jpg').replace('_label','train')+'\\n')\n img_txt = img_xml.replace('xml', 'txt').replace('_label', 'train')\n else:\n ftest.write('data\\\\obj\\\\'+ img_xml.replace('xml', 'jpg').replace('_label', 'test') + '\\n')\n img_txt = img_xml.replace('xml','txt').replace('_label','test')\n #list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\\n'%(wd, year, image_id))\n convert_annotation(img_xml, img_txt)\n#list_file.close()\nftrain.close()\nftest.close()","sub_path":"images/buddha_label.py","file_name":"buddha_label.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"378336510","text":"from django import template\nfrom django.utils.safestring import mark_safe\nfrom rest_framework.renderers import HTMLFormRenderer\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef render_field(field):\n \"\"\"\n Render a API serializer field as a form element.\n\n This is an alternative implementation of `rest_framework`'s\n `render_field` that does not require the `style` parameter.\n \"\"\"\n renderer = HTMLFormRenderer()\n return renderer.render_field(field, {})\n\n\n@register.simple_tag\ndef render_form(serializer):\n \"\"\"\n Render an API serializer as a form.\n\n This is an alternative implementation of `rest_framework`'s\n `render_form` that does not have the `template_pack` parameter.\n \"\"\"\n renderer = HTMLFormRenderer()\n return renderer.render(serializer.data, None, {\"style\": {}})\n\n\n@register.simple_tag\ndef render_submit(classes=\"is-primary\", icon=\"\", text=\"Submit\"):\n \"\"\"Render a submit \n \"\"\".format(\n classes=classes, icon=icon, text=text\n )\n )\n\n\n@register.simple_tag\ndef handle_form(is_update=False, success_url=None):\n \"\"\"\n Create a \".format(\n is_update=\"true\" if is_update else \"false\",\n success_url=\"'{}'\".format(success_url) if success_url else \"null\",\n )\n )\n","sub_path":"director/projects/templatetags/form_helpers.py","file_name":"form_helpers.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423833532","text":"import numpy as np\nfrom scipy.io import loadmat, savemat, whosmat\n\nobj_arr = np.zeros((2,), dtype=np.object)\nobj_arr[0] = 1\nobj_arr[1] = 'a string'\n\nsavemat('np_cells.mat', {'obj_arr': obj_arr})\nmat = loadmat('np_cells.mat', squeeze_me=True)\nobj = mat['obj_arr']\nprint(obj)\nprint(obj[0])\nprint(obj[1])\n","sub_path":"scipyExercise/matlabfile/cellExample/save_cells.py","file_name":"save_cells.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"23826084","text":"#! /user/bin/python3\n# -*- coding:utf-8 -*-\n# @Author : Paul C G LUO\n# @Email : 673951437@qq.com\n# @DateTime: 2021/7/14 14:29\n\nimport numpy as np\nimport tensorflow as tf2\nfrom matplotlib import pyplot as plt\n\nfrom trainDataProcess.dataAugmentation import augmentation_generator\nfrom trainDataProcess.generateBatchData import get_dataset\n\nIMAGESIZE = 512\nGRIDSIZE = 16\nANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]\nANCHORS_NUM = len(ANCHORS) // 2\n\n\ndef process_true_boxes(gt_boxes, anchors):\n # 512 // 16 = 32\n scale = IMAGESIZE // GRIDSIZE\n # [5, 2]\n anchors = np.array(anchors).reshape((ANCHORS_NUM, 2))\n\n detector_mask = np.zeros([GRIDSIZE, GRIDSIZE, 5, 1])\n\n matching_gt_box = np.zeros([GRIDSIZE, GRIDSIZE, 5, 5])\n\n gt_boxes_grid = np.zeros(gt_boxes.shape)\n\n # DB: tensor => numpy\n gt_boxes = gt_boxes.numpy()\n\n for i, box in enumerate(gt_boxes): # [40, 5]\n # box: [5], x1-y1-x2-y2-l\n # 0~512 -> 0~16\n x = ((box[0] + box[2]) / 2) / scale\n y = ((box[1] + box[3]) / 2) / scale\n w = (box[2] - box[0]) / scale\n h = (box[3] - box[1]) / scale\n\n gt_boxes_grid[i] = np.array([x, y, w, h, box[4]])\n\n if w * h > 0:\n best_anchor = 0\n best_iou = 0\n for j in range(5):\n interct = np.minimum(w, anchors[j, 0]) * np.minimum(h, anchors[j, 1])\n union = w * h + (anchors[j, 0] * anchors[j, 1]) - interct\n iou = interct / union\n\n if iou > best_iou:\n best_anchor = j\n best_iou = iou\n\n # found the best anchors\n if best_iou > 0:\n x_coord = np.floor(x).astype(np.int32)\n y_coord = np.floor(y).astype(np.int32)\n # [b,h,w,5,1]\n detector_mask[y_coord, x_coord, best_anchor] = 1\n # [b,h,w,5,x-y-w-h-l]\n matching_gt_box[y_coord, x_coord, best_anchor] = \\\n np.array([x, y, w, h, box[4]])\n\n return matching_gt_box, detector_mask, gt_boxes_grid\n\ndef ground_truth_generator(db):\n\n for imgs, imgs_boxes in db:\n # imgs: [b, 512, 512, 3]\n # imgs_boxes: [b, 40, 5]\n\n batch_matching_gt_box = []\n batch_detector_mask = []\n batch_gt_boxes_grid= []\n\n b = imgs.shape[0]\n for i in range(b):\n matching_gt_box, detector_mask, gt_boxes_grid = process_true_boxes(imgs_boxes[i], ANCHORS)\n batch_matching_gt_box.append(matching_gt_box)\n batch_detector_mask.append(detector_mask)\n batch_gt_boxes_grid.append(gt_boxes_grid)\n\n # [b, 16,16,5,1]\n detector_mask = tf2.cast(np.array(batch_detector_mask), dtype=tf2.float32)\n # [b,16,16,5,5] x-y-w-h-l\n matching_gt_box = tf2.cast(np.array(batch_matching_gt_box), dtype=tf2.float32)\n # [b,40,5] x-y-w-h-l\n gt_boxes_grid = tf2.cast(np.array(batch_gt_boxes_grid), dtype=tf2.float32)\n\n # [b,16,16,5]\n matching_classes = tf2.cast(matching_gt_box[..., 4], dtype=tf2.int32)\n # [b,16,16,5,3]\n matching_classes_oh = tf2.one_hot(matching_classes, depth=3)\n # x-y-w-h-conf-l1-l2\n # [b,16,16,5,2]\n matching_classes_oh = tf2.cast(matching_classes_oh[..., 1:], dtype=tf2.float32)\n # [b,512,512,3]\n # [b,16,16,5,1]\n # [b,16,16,5,5]\n # [b,16,16,5,2]\n # [b,40,5]\n yield imgs, detector_mask, matching_gt_box, matching_classes_oh, gt_boxes_grid\n\n\nif __name__ == '__main__':\n # %%\n # 2.3 visualize object mask\n # train_db -> aug_train_db -> train_gen\n labels = ['sugarbeet', 'weed']\n train_db = get_dataset('D:\\\\Tensorflow_version2.0\\\\[更新]目标检测\\\\yolov2-tf2\\\\yolov2-tf2\\\\data\\\\train\\\\image',\n 'D:\\\\Tensorflow_version2.0\\\\[更新]目标检测\\\\yolov2-tf2\\\\yolov2-tf2\\\\data\\\\train\\\\annotation',\n labels, 4)\n\n aug_train_db = augmentation_generator(train_db)\n train_gen = ground_truth_generator(aug_train_db)\n\n img, detector_mask, matching_gt_box, matching_classes_oh, gt_boxes_grid = \\\n next(train_gen)\n img, detector_mask, matching_gt_box, matching_classes_oh, gt_boxes_grid = \\\n img[0], detector_mask[0], matching_gt_box[0], matching_classes_oh[0], gt_boxes_grid[0]\n\n fig, (ax1, ax2) = plt.subplots(2, figsize=(5, 10))\n ax1.imshow(img)\n # [16,16,5,1] => [16,16,1]\n mask = tf2.reduce_sum(detector_mask, axis=2)\n ax2.matshow(mask[..., 0]) # [16,16]\n plt.show()\n","sub_path":"composeGroundTruth/groundTruth.py","file_name":"groundTruth.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"346698830","text":"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport keras\nimport time\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import train_test_split\nclass_names = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\ndef makemodel(X_train, y_train, X_valid, y_valid):\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Dense(300, input_shape=[64,], activation=\"relu\"))\n model.add(tf.keras.layers.Dense(100, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(10, activation=\"softmax\"))\n model.summary()\n model.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=\"sgd\",\n metrics=[\"accuracy\"])\n #tb_hist = keras.callbacks.TensorBoard(log_dir='./graph', histogram_freq=0, write_graph=True, write_images=True)\n start = time.time()\n history = model.fit(X_train, y_train, epochs=30, validation_data=(X_valid, y_valid), verbose=1)\n print(\"time :\", time.time() - start)\n return model, history\ndef data_normalization(X_train_full, y_train_full):\n X_valid, X_train = X_train_full[:200] , X_train_full[200:]\n y_valid, y_train = y_train_full[:200], y_train_full[200:]\n return X_valid, X_train, y_valid, y_train\ndef printmod(model, x_test,y_test) :\n model.evaluate(x_test, y_test)\n x_new = x_test[:10]\n y_proba = model.predict(x_new)\n plt.figure(figsize=(10 * 1.2, 10 * 1.2))\n for i in range(10):\n pic = x_test[i].reshape(8,8)\n plt.subplot(1, 10, i+1)\n plt.imshow(pic, cmap=\"binary\", interpolation=\"nearest\")\n plt.axis('off')\n yindex = list(y_proba[i]).index(y_proba[i].max())\n print(yindex)\n plt.title(class_names[y_test[i]], fontsize=12)\n plt.subplots_adjust(wspace=0.2, hspace=0.5)\n plt.show()\ndef main():\n digits = load_digits()\n x_data = digits.data\n y_data = digits.target\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3)\n x_valid, x_train, y_valid, y_train = data_normalization(x_train, y_train)\n model, history= makemodel(x_train, y_train, x_valid, y_valid)\n printmod(model,x_test, y_test)\nmain()\n","sub_path":"실습6/6주차60161879_송윤근.py","file_name":"6주차60161879_송윤근.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"14959829","text":"#!/usr/bin/env python3\n\n# CHANGEME\np = 167\nq = 281\nm = 16346 # The message Alice sends to Bob\ne = 39423\nphi_n = (p - 1) * (q - 1)\nn = p * q\ngcd_cnt, eea_cnt = 0, 0\nabsval = lambda i: (\"+\" if i > 0 else \"\") + str(i)\n\n\n# Modular exponentiation:\ndef modexp(b, n, m):\n retval = 1\n tmpstr = \"\\t \"\n prodvals = []\n binx = bin(n)[2:]\n print(f\"\\t{n}={binx} | {b}^{n} mod {m}\")\n binx = list(reversed(bin(n)[2:])) # reverse to make the number correct\n indexone = [i for i in range(len(binx)) if binx[i] == \"1\"]\n val = b\n for i in range(0, len(binx)):\n pval = val\n if i == 0:\n val = (val ** (2 ** i)) % m\n else:\n val = (val ** 2) % m\n if i in indexone:\n print(\"\\t* \", end=\"\")\n prodvals.append(val)\n else:\n print(\"\\t \", end=\"\")\n print(i, end=\" | \")\n print(f\"\\t{pval}^2 = {val} (mod {m})\")\n for i in prodvals:\n tmpstr += \" \" + str(i) + \" *\"\n retval = (retval * i) % m\n tmpstr = tmpstr[:-1] + \"mod \" + str(m) + \" = \" + str(retval)\n print(tmpstr)\n return retval\n\n\ndef xgcd(a, b):\n global gcd_cnt, eea_cnt\n if a == 0:\n return b, 0, 1\n if gcd_cnt > 0: # skip the first print\n print(f\"{b}={a}*{(b // a)}+{(b % a)}\")\n gcd_cnt += 1\n gcd, x1, y1 = xgcd(b % a, a)\n x = y1 - (b // a) * x1\n y = x1\n if eea_cnt > 1: # skip the two first prints..\n print(f\"1={y1}*{a}{absval(x1)}*{(b % a)}\")\n eea_cnt += 1\n return gcd, x, y\n\n\nprint(\"[INFO]: Determining Alices keys\")\nprint(f\"p={p} | q={q} | e={e}\")\nprint(f\"Public key = (n=p*q,e) = ({n}, {e})\")\nprint(f\"ϕ(n) = ({p - 1})*({q - 1}) = {phi_n}\")\nprint(f\"d = e^(-1) mod ϕ(n)\")\nprint(f\"({phi_n}, {e}) = 1\")\nd = xgcd(phi_n, e)[2] % phi_n\nprint(f\"d = {e}^(-1) = {d}\")\nprint(f\"Alice keys: Pub --> ({n}, {e}) | Priv --> {d}\")\nprint(\"- \" * 20)\nprint(f\"[INFO]: Alice signing message m={m}\")\nprint(f\"s = m^(d) mod n = {m}^{e} mod {n} | (Bobs public key)\")\ns = modexp(m, d, n)\nprint(f\"s = {s}\")\nprint(\"- \" * 20)\nprint(f\"[INFO]: Bob verifying signature s={s}\")\nprint(f\"m = s^(e) mod n = {s}^{e} mod {n} | (Bobs private key)\")\nm_d = modexp(s, e, n)\nprint(f\"m = {m_d}\")\n\nassert m_d == m # Decryption works\n","sub_path":"05_signatures/rsa-sign.py","file_name":"rsa-sign.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"325509298","text":"import abjad\nimport itertools\nimport os\nimport pathlib\nimport time\nimport abjadext.rmakers\n# from abjadext import rmakers\nfrom RhythmMusicMaker import RhythmMusicMaker\n\nprint('Interpreting file ...')\n\n# Define the time signatures we would like to apply against the timespan structure.\n\ntime_signatures = [\n abjad.TimeSignature(pair) for pair in [\n (4, 4),\n ]\n]\n\nbounds = abjad.mathtools.cumulative_sums([_.duration for _ in time_signatures])\n\n# Define rhythm-makers: two for actual music, one for silence.\ntalea_rmaker = abjadext.rmakers.TaleaRhythmMaker(\n talea=abjadext.rmakers.Talea(\n counts=[2, 2, 5, 3, 1, 1, 3, 1],\n denominator=8\n ),\n extra_counts_per_division=[1, 0, 0, 1, 0, 3, 0, 0],\n )\n\n\nmusicmaker_one = RhythmMusicMaker(\n rmaker=talea_rmaker,\n pitches=[0],\n)\n\nrmaker_one = abjadext.rmakers.NoteRhythmMaker()\n\n# Define a small class so that we can annotate timespans with additional\n# information:\n\n\nclass MusicSpecifier:\n\n def __init__(self, rhythm_maker, voice_name):\n self.rhythm_maker = rhythm_maker\n self.voice_name = voice_name\n\nprint('Collecting timespans and rmakers ...')\n\nvoice_1_timespan_list = abjad.TimespanList([\n abjad.AnnotatedTimespan(\n start_offset=start_offset,\n stop_offset=stop_offset,\n annotation=MusicSpecifier(\n rhythm_maker=rhythm_maker,\n voice_name='Voice 1',\n ),\n )\n for start_offset, stop_offset, rhythm_maker in [\n [0, 1, musicmaker_one],\n [2, 3, musicmaker_one],\n ]\n])\n\nvoice_2_timespan_list = abjad.TimespanList([\n abjad.AnnotatedTimespan(\n start_offset=start_offset,\n stop_offset=stop_offset,\n annotation=MusicSpecifier(\n rhythm_maker=rhythm_maker,\n voice_name='Voice 2',\n ),\n )\n for start_offset, stop_offset, rhythm_maker in [\n [0, 1, rmaker_one],\n [2, 3, rmaker_one],\n ]\n])\n\nall_timespan_lists = {\n 'Voice 1': voice_1_timespan_list,\n 'Voice 2': voice_2_timespan_list,\n}\n\nglobal_timespan = abjad.Timespan(\n start_offset=0,\n stop_offset=max(_.stop_offset for _ in all_timespan_lists.values())\n)\n\n# Using the global timespan, create silence timespans for each timespan list.\n# We don't need to create any silences by-hand if we now the global start and\n# stop offsets of all voices combined:\n\nfor voice_name, timespan_list in all_timespan_lists.items():\n # Here is another technique for finding where the silence timespans are. We\n # create a new timespan list consisting of the global timespan and all the\n # timespans from our current per-voice timespan list. Then we compute an\n # in-place logical XOR. The XOR will replace the contents of the \"silences\"\n # timespan list with a set of timespans representing those periods of time\n # where only one timespan from the original was present. This has the\n # effect of cutting out holes from the global timespan wherever a per-voice\n # timespan was found, but also preserves any silence before the first\n # per-voice timespan or after the last per-voice timespan. Then we merge\n # the newly-created silences back into the per-voice timespan list.\n silences = abjad.TimespanList([global_timespan])\n silences.extend(timespan_list)\n silences.sort()\n silences.compute_logical_xor()\n # Add the silences into the voice timespan list. We create new *annotated*\n # timespans so we can maintain the voice name information:\n for silence_timespan in silences:\n timespan_list.append(\n abjad.AnnotatedTimespan(\n start_offset=silence_timespan.start_offset,\n stop_offset=silence_timespan.stop_offset,\n annotation=MusicSpecifier(\n rhythm_maker=None,\n voice_name=voice_name,\n ),\n )\n )\n timespan_list.sort()\n\n# Split the timespan list via the time signatures and collect the shards into a\n# new timespan list\n\nfor voice_name, timespan_list in all_timespan_lists.items():\n shards = timespan_list.split_at_offsets(bounds)\n split_timespan_list = abjad.TimespanList()\n for shard in shards:\n split_timespan_list.extend(shard)\n split_timespan_list.sort()\n # We can replace the original timespan list in the dictionary of\n # timespan lists because we know the key it was stored at (its voice\n # name):\n all_timespan_lists[voice_name] = timespan_list\n\nscore = abjad.Score([\n abjad.Staff(lilypond_type='TimeSignatureContext', name='Global Context'),\n abjad.StaffGroup(\n [abjad.Staff(\n [abjad.Voice(\n name='Voice {}'.format(n))\n ],\n name='Staff {}'.format(n),\n lilypond_type='Staff')\n for n in [1, 2]\n ],\n name='Staff Group',\n )\n])\n\n# Add skips and time signatures to the global context\n\nfor time_signature in time_signatures:\n skip = abjad.Skip(1)\n abjad.attach(abjad.Multiplier(time_signature), skip)\n abjad.attach(time_signature, skip)\n score['Global Context'].append(skip)\n\ndef make_container(rhythm_maker, durations):\n state=rhythm_maker.state\n selections = rhythm_maker(durations, previous_state=state)\n container = abjad.Container(selections)\n return container\n\ndef key_function(timespan):\n \"\"\"\n Get the timespan's annotation's rhythm-maker.\n\n If the annotation's rhythm-maker is None, return the silence maker.\n \"\"\"\n return timespan.annotation.rhythm_maker or rmaker_one\n\nfor voice_name, timespan_list in all_timespan_lists.items():\n for rhythm_maker, grouper in itertools.groupby(\n timespan_list,\n key=key_function,\n ):\n # We know the voice name of each timespan because a) the timespan\n # list is in a dictionary, associated with that voice name and b)\n # each timespan's annotation is a MusicSpecifier instance which\n # knows the name of the voice the timespan should be used for.\n # This double-reference to the voice is redundant here, but in a\n # different implementation we could put *all* the timespans into\n # one timespan list, split them, whatever, and still know which\n # voice they belong to because their annotation records that\n # information.\n durations = [timespan.duration for timespan in grouper]\n container = make_container(rhythm_maker, durations)\n voice = score[voice_name]\n voice.append(container)\n\nprint('Splitting and rewriting ...')\n\n# # split and rewite meters\n# for voice in abjad.iterate(score['Staff Group']).components(abjad.Voice):\n# for i, shard in enumerate(abjad.mutate(voice[:]).split(time_signatures)):\n# time_signature = time_signatures[0]\n# abjad.mutate(shard).rewrite_meter(time_signature)\n\nprint('Beautifying score ...')\n# cutaway score\nfor staff in abjad.iterate(score['Staff Group']).components(abjad.Staff):\n for selection in abjad.select(staff).components(abjad.Rest).group_by_contiguity():\n start_command = abjad.LilyPondLiteral(\n r'\\stopStaff \\once \\override Staff.StaffSymbol.line-count = #1 \\startStaff',\n format_slot='before',\n )\n stop_command = abjad.LilyPondLiteral(\n r'\\stopStaff \\startStaff',\n format_slot='after',\n )\n abjad.attach(start_command, selection[0])\n abjad.attach(stop_command, selection[-1])\n\n# Make pitches\nprint('Adding pitch material ...')\ndef cyc(lst):\n count = 0\n while True:\n yield lst[count%len(lst)]\n count += 1\n\nsopranino_scale = [30]\nsoprano1_scale = [20]\n\nscales = [\n sopranino_scale,\n soprano1_scale,\n]\n\nstaffs = [staff for staff in abjad.iterate(score['Staff Group']).components(abjad.Staff)]\n\nfor staff , scale in zip(staffs , scales):\n logicl_ties = [i for i in abjad.iterate(staff).logical_ties(pitched=True)]\n pitches = cyc(scale)\n for i , logicl_tie in enumerate(logicl_ties):\n if logicl_tie.is_pitched ==True:\n pitch = next(pitches)\n for note in logicl_tie:\n note.written_pitch = pitch\n\n#attach instruments and clefs\n\nprint('Adding attachments ...')\n\n# instruments = cyc([\n# abjad.SopraninoSaxophone(),\n# abjad.SopranoSaxophone(),\n# )]\n\n# abbreviations = cyc([abjad.MarginMarkup(markup=abjad.Markup(abbrev))\n# for abbrev in (\n# 'spro.',\n# 'spr.1',\n# )\n# ])\n\n# names = cyc([abjad.StartMarkup(markup=abjad.Markup(name))\n# for name in (\n# 'Sopranino',\n# 'Soprano 1',\n# )\n# ])\n\n# for staff in abjad.iterate(score['Staff Group']).components(abjad.Staff):\n# leaf1 = abjad.select(staff).leaves()[0]\n# abjad.attach(next(instruments), leaf1)\n# abjad.attach(next(abbreviations), leaf1)\n# abjad.attach(next(names), leaf1)\n# # abjad.attach(next(clefs), leaf1)\n\n# for staff in abjad.iterate(score['Staff Group']).components(abjad.Staff):\n# abjad.Instrument.transpose_from_sounding_pitch(staff)\n\n# Make a lilypond file and show it:\n\nscore_file = abjad.LilyPondFile.new(\n score,\n includes=['first_stylesheet.ily'],\n )\n\nabjad.show(score)","sub_path":"Test/chord_2.py","file_name":"chord_2.py","file_ext":"py","file_size_in_byte":9202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"513693936","text":"# Develop an application using Beeglebone Black/ARM Cortex A5 \n# development board to simulate the operations of LIFT.\n\n# Python script for Fizzible.com lift simulation kits\n# Author: Manish Raj\n\nimport Adafruit_BBIO.GPIO as GPIO\nimport time\nimport math\n\nfloor_lights = [\n [\"P9_14\", \"P9_15\"], # floor1: red, green\n [\"P9_12\", \"P9_13\"], # floor2: red, green\n [\"P8_19\", \"P9_11\"], # floor3: red, green\n [\"P8_17\", \"P8_18\"], # floor4: red, green\n [\"P8_9\", \"P8_10\"] # floor5: red, green\n]\narrow = [\"P8_8\", \"P8_7\"]\nswitch = [\"P8_16\", \"P8_15\", \"P8_14\", \"P8_13\", \"P8_12\"]\nsegment = [\"P9_24\", \"P9_23\", \"P9_22\",\"P9_21\"]\n\ndef high(pin):\n GPIO.output(pin, GPIO.HIGH)\n\ndef low(pin):\n GPIO.output(pin, GPIO.LOW)\n\ndef off(pin):\n GPIO.output(pin, GPIO.HIGH)\n\ndef on(pin):\n GPIO.output(pin, GPIO.LOW)\n\ndef setup(pin):\n GPIO.setup(pin, GPIO.OUT)\n\nfor x in floor_lights:\n for y in x:\n setup(y)\n off(y)\n\nfor x in segment, arrow:\n for y in x:\n setup(y)\n off(y)\n\nfor x in switch:\n GPIO.setup(x, GPIO.IN)\n\ndef disp(decimal):\n bcd_num = ('000' + str(bin(decimal)).replace('0b', ''))[-4:]\n for i in range(0, 4):\n high(segment[i]) if bcd_num[i] == '1' else low(segment[i])\n\ndef get_floor():\n i = 0\n for x in switch:\n if(GPIO.input(x)):\n return i\n i = i + 1\n return -1\n\ndef up():\n on(arrow[1])\n off(arrow[0])\n\ndef down():\n on(arrow[0])\n off(arrow[1])\n\ndef arrow_off():\n off(arrow[0])\n off(arrow[1])\n\ndef red(floor):\n on(floor_lights[floor][0])\n off(floor_lights[floor][1])\n\ndef green(floor):\n on(floor_lights[floor][1])\n off(floor_lights[floor][0])\n\ndef all_off():\n for x in floor_lights:\n for y in x:\n off(y) \n\ncurrent_floor = 0\nwhile True:\n all_off()\n arrow_off()\n disp(current_floor)\n green(current_floor)\n next_floor = get_floor()\n while next_floor != -1 and next_floor != current_floor:\n all_off()\n green(current_floor)\n red(next_floor) \n if(next_floor > current_floor):\n up()\n current_floor += 1\n else:\n down()\n current_floor -= 1\n disp(current_floor)\n time.sleep(1)\n","sub_path":"lift.py","file_name":"lift.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"432021114","text":"from __future__ import absolute_import\nimport autograd.scipy.misc as asm\n\nimport autograd.numpy as anp\n\ndef make_fwd_grad_logsumexp(g, ans, gvs, vs, x, axis=None, b=1.0, keepdims=False):\n if not keepdims:\n if isinstance(axis, int):\n ans = anp.expand_dims(ans, axis)\n elif isinstance(axis, tuple):\n for ax in sorted(axis):\n ans = anp.expand_dims(ans, ax)\n return anp.sum(g * b * anp.exp(x - ans), axis=axis, keepdims=keepdims)\nasm.logsumexp.defjvp(make_fwd_grad_logsumexp)\n","sub_path":"autograd_forward/scipy/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"358573164","text":"\"\"\" sp_controller: serial parser\n\nSerial parser class\n\n\"\"\"\n\nfrom __future__ import print_function # We require Python 2.6+\nimport datetime\n\n\nclass SerialParser():\n \"\"\"SerialParser: parser for SerialPacket data\"\"\"\n def __init__(self):\n pass\n #self.converter = DataConverter()\n \n def parse(self, packet):\n \"\"\"Parse a packet\"\"\"\n self.now = datetime.datetime.now()\n if self.validate_packet(packet):\n if self.get_packet_type(packet) == 0x01: # \n return(self.parse_data(packet))\n if self.get_packet_type(packet) == 0x02: # COMMAND_REPLY\n return(self.parse_data(packet))\n else:\n return 0\n\n def parse_data(self, packet):\n \"\"\"Parse the data\"\"\"\n# print (self.get_sensor_id(packet),\n# \" becomes \",\n# self.converter.serial_to_db(self.get_sensor_id(packet)))\n return(self.get_node_id(packet),\n self.get_sensor_id(packet),\n self.get_payload(packet),\n str(self.now))\n\n def validate_packet(self, packet):\n \"\"\"Validate an incoming packet using parity control\"\"\"\n if len(packet) == 15:\n self.received_parity = self.get_quality_check(packet)\n self.calculated_parity = (int(packet[1:3], 16)\n ^ int(packet[4:6], 16)\n ^ int(packet[7:9], 16)\n ^ int(packet[10:12], 16))\n# print(self.received_parity)\n# print(self.calculated_parity)\n if self.received_parity == self.calculated_parity:\n return True\n else:\n return False\n\n def get_packet_type(self, packet):\n \"\"\"Get the packet type\"\"\"\n if self.validate_packet(packet):\n if packet[0] == 'T':\n return self.hex_to_dec(packet[1:3])\n else:\n return 0\n\n def get_node_id(self, packet):\n \"\"\"Get the node id\"\"\"\n if packet[3] == 'N':\n return self.hex_to_dec(packet[4:6])\n else:\n return 0\n\n def get_sensor_id(self, packet):\n \"\"\"Get the sensor id\"\"\"\n if packet[6] == 'I':\n return self.hex_to_dec(packet[7:9])\n else:\n return 0\n\n def get_payload(self, packet):\n \"\"\"Get the payload\"\"\"\n if packet[9] == 'P':\n return self.hex_to_dec(packet[10:12])\n else:\n return 0\n\n def get_quality_check(self, packet):\n \"\"\"Get the parity 'quality check'\"\"\"\n if packet[12] == 'Q':\n return self.hex_to_dec(packet[13:15])\n else:\n return 0\n\n def hex_to_dec(self, hexvalue):\n \"\"\"Convert hex value to decimal\"\"\"\n return int(hexvalue, 16)\n","sub_path":"Clients/Python/sp_controller/serialparser.py","file_name":"serialparser.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"123836361","text":"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport inspect\nfrom typing import Any, Callable, Dict, Generator, Iterator, Mapping, Optional, overload, TypeVar, Union\n\nimport torch\nfrom lightning_utilities.core.apply_func import apply_to_collection\nfrom torch import nn as nn\nfrom torch import Tensor\nfrom torch.nn.modules.module import _IncompatibleKeys\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\n\nfrom lightning_fabric.plugins import Precision\nfrom lightning_fabric.plugins.precision.utils import _convert_fp_tensor\nfrom lightning_fabric.strategies import Strategy\nfrom lightning_fabric.utilities import move_data_to_device\nfrom lightning_fabric.utilities.device_dtype_mixin import _DeviceDtypeModuleMixin\nfrom lightning_fabric.utilities.types import Optimizable\n\nT_destination = TypeVar(\"T_destination\", bound=Dict[str, Any])\n\n\nclass _FabricOptimizer:\n def __init__(self, optimizer: Optimizer, strategy: Strategy) -> None:\n \"\"\"FabricOptimizer is a thin wrapper around the :class:`~torch.optim.Optimizer` that delegates the\n optimizer step calls to the strategy plugin.\n\n The underlying wrapped optimizer object can be accessed via the property :attr:`optimizer`.\n\n Args:\n optimizer: The optimizer to wrap\n strategy: Reference to the strategy for handling the optimizer step\n \"\"\"\n # `__del__` is skipped in case the optimizer has implemented custom destructor logic which we would\n # not want to call on destruction of the `_FabricOptimizer\n self.__dict__ = {\n k: v for k, v in optimizer.__dict__.items() if k not in (\"state_dict\", \"step\", \"zero_grad\", \"__del__\")\n }\n self.__class__ = type(\"Fabric\" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {})\n self._optimizer = optimizer\n self._strategy = strategy\n\n @property\n def optimizer(self) -> Optimizer:\n return self._optimizer\n\n def state_dict(self) -> Dict[str, Tensor]:\n return self._strategy.get_optimizer_state(self.optimizer)\n\n def step(self, closure: Optional[Callable] = None) -> Any:\n kwargs = dict(closure=closure) if closure is not None else {}\n if hasattr(self._strategy, \"model\") and isinstance(self._strategy.model, Optimizable):\n # only DeepSpeed defines this\n optimizer = self._strategy.model\n else:\n optimizer = self.optimizer\n return self._strategy.optimizer_step(\n optimizer,\n **kwargs,\n )\n\n def zero_grad(self, **kwargs: Any) -> None:\n kwargs = _process_optimizer_zero_grad_kwargs(self.optimizer, kwargs)\n self.optimizer.zero_grad(**kwargs)\n\n\nclass _FabricModule(_DeviceDtypeModuleMixin):\n def __init__(\n self, forward_module: nn.Module, precision: Precision, original_module: Optional[nn.Module] = None\n ) -> None:\n \"\"\"The FabricModule is a thin wrapper around the :class:`torch.nn.Module` and handles precision / autocast\n automatically for the forward pass.\n\n The underlying wrapped module can be accessed via the property :attr:`module`.\n\n Args:\n forward_module: The module to wrap the ``forward`` method on.\n precision: Reference to the precision plugin for handling precision context\n original_module: The original, unmodified module as passed into the\n :meth:`lightning_fabric.fabric.Fabric.setup` method. This is needed when attribute lookup\n on this wrapper should pass through to the original module.\n \"\"\"\n super().__init__()\n self._forward_module = forward_module\n self._original_module = original_module or forward_module\n self._precision = precision\n\n @property\n def module(self) -> nn.Module:\n return self._original_module or self._forward_module\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Casts all inputs to the right precision and handles autocast for operations in the module forward\n method.\"\"\"\n args, kwargs = apply_to_collection([args, kwargs], function=self._precision.convert_input, dtype=Tensor)\n\n with self._precision.forward_context():\n output = self._forward_module(*args, **kwargs)\n\n output = apply_to_collection(\n output, function=_convert_fp_tensor, dtype=Tensor, dst_type=torch.get_default_dtype()\n )\n return output\n\n @overload\n def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination:\n ...\n\n @overload\n def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]:\n ...\n\n def state_dict(\n self, destination: Optional[T_destination] = None, prefix: str = \"\", keep_vars: bool = False\n ) -> Optional[Dict[str, Any]]:\n return self._original_module.state_dict(\n destination=destination, # type: ignore[type-var]\n prefix=prefix,\n keep_vars=keep_vars,\n )\n\n def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True) -> _IncompatibleKeys:\n return self._original_module.load_state_dict(state_dict=state_dict, strict=strict)\n\n def __getattr__(self, item: Any) -> Any:\n try:\n # __getattr__ gets called as a last resort if the attribute does not exist\n # call nn.Module's implementation first\n return super().__getattr__(item)\n except AttributeError:\n # If the attribute is not available on the _FabricModule wrapper, redirect to the wrapped nn.Module\n original_module = super().__getattr__(\"_original_module\")\n return getattr(original_module, item)\n\n\nclass _FabricDataLoader:\n def __init__(self, dataloader: DataLoader, device: Optional[torch.device] = None) -> None:\n \"\"\"The FabricDataLoader is a wrapper for the :class:`~torch.utils.data.DataLoader`. It moves the data to\n the device automatically if the device is specified.\n\n Args:\n dataloader: The dataloader to wrap\n device: The device to which the data should be moved. By default the device is `None` and no data\n transfers will be made (identical behavior as :class:`~torch.utils.data.DataLoader`).\n \"\"\"\n self.__dict__.update(dataloader.__dict__)\n self._dataloader = dataloader\n self._device = device\n self._num_iter_calls = 0\n\n @property\n def device(self) -> Optional[torch.device]:\n return self._device\n\n def __len__(self) -> int:\n return len(self._dataloader)\n\n def __iter__(self) -> Union[Iterator[Any], Generator[Any, None, None]]:\n if hasattr(self._dataloader.sampler, \"set_epoch\"):\n # Without setting the epoch, the distributed sampler would return the same indices every time, even when\n # shuffling is enabled. In PyTorch, the user would normally have to call `.set_epoch()` on the sampler.\n # In Lite, we take care of this boilerplate code.\n self._dataloader.sampler.set_epoch(self._num_iter_calls)\n self._num_iter_calls += 1\n\n iterator = iter(self._dataloader)\n if self._device is None:\n yield from iterator\n return\n\n for item in iterator:\n yield move_data_to_device(item, self._device)\n\n\ndef _process_optimizer_zero_grad_kwargs(optimizer: Optimizer, kwargs: Dict[str, Any]) -> Dict[str, Any]:\n if \"set_to_none\" in kwargs and \"set_grads_to_None\" in inspect.signature(optimizer.zero_grad).parameters:\n # Some optimizers out there, for example DeepSpeedZeroOptimizer, use a different name than PyTorch\n kwargs[\"set_grads_to_None\"] = kwargs.pop(\"set_to_none\")\n return kwargs\n","sub_path":"src/lightning_fabric/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"278411696","text":"list1 = []\r\nlist2 = []\r\nnum1 = eval(input(\"Enter the size of list1\"))\r\nnum2 = eval(input(\"Enter the size of list2\"))\r\nprint(\"Enter the elements of list1:\")\r\nfor i in range(num1):\r\n e = eval(input())\r\n list1.append(e)\r\nfor i in range(num2):\r\n e = eval(input())\r\n list2.append(e)\r\ndef sample(l1,l2):\r\n if len(l1) != len(l2):\r\n print(\"Lists are not equal\")\r\n elif sorted(l1) == sorted(l2):\r\n print(\"Lists are equal\")\r\n else:\r\n print(\"Lists are not equal\")\r\nsample(list1,list2)\r\n","sub_path":"week3(d).py","file_name":"week3(d).py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"581675994","text":"import math\nimport numpy as np\nimport scipy as sp\nfrom numpy import linalg as LA\n#---------------------------------\ndef diag(M):\n\n # This function returns the eigenvalues and eigenvectors.\n E,V= LA.eigh(M)\n return E,V\n#---------------------------------\ndef x_Hermit():\n\n # This function retuen the grid points for Hermit polynomial.\n x_ij = np.zeros((tot_xgrid,tot_xgrid))\n for row in range(tot_xgrid):\n for colm in range(tot_xgrid):\n x_ij[row,colm] = math.sqrt(float(row+1)/(2.0*mass*freq))* \\\n float(row==colm-1) + x_eq * float(row==colm)+ math.sqrt\\\n (float(row)/(2.0*mass*freq))*float(row==colm+1)\n\n x_i,vect = diag(x_ij)\n return x_i, vect\n#----------------------------------\ndef weight_Hermit():\n\n # This function returs the weight for each grid point x_i.\n w_i = np.zeros(tot_xgrid)\n x_i,vect = x_Hermit()\n for i in range(tot_xgrid):\n w_i[i] = ((mass*freq/math.pi)**(-0.25)*math.exp(0.5*mass*\\\n freq*(x_i[i]-x_eq)**2)*vect[0,i])**2\n\n return w_i\n#---------------------------------\ndef second_derivavtive_Hermit():\n\n # This function returns the second derivavtive matrix.\n dif2mat = np.zeros((tot_xgrid,tot_xgrid))\n x_i,vect=x_Hermit()\n for row in range(tot_xgrid):\n for colm in range(tot_xgrid):\n for n in range(tot_xgrid):\n dif2mat[row,colm] += vect[n,row] * (n+0.5) * vect[n,colm]* -2.0 * mass * freq\n dif2mat[row,colm] += mass**2 * freq**2 *(x_i[row]-x_eq)**2 * float(row==colm)\n \n return dif2mat\n#--------------------------------\ndef Hamiltonian_Harmonic():\n \n# This function returns the energy and the wavefunction. hbar=1\n H_ij = np.zeros((tot_xgrid,tot_xgrid))\n x_i,vect = x_Hermit()\n dif2mat = second_derivavtive_Hermit()\n\n # Compute the Hamiltonian.\n for row in range(tot_xgrid):\n for colm in range(tot_xgrid):\n H_ij[row,colm] = -0.5 * (1.0/mass) * dif2mat[row,colm] + \\\n 0.5* mass * freq**2 * (x_i[row])**2 * float(row==colm)\n\n energy,coef = diag(H_ij)\n\n return energy,coef\n#---------------------------------\ndef second_derivative_FFT():\n\n # This function return the second derivavtive \n dif2mat= np.zeros((tot_ygrid,tot_ygrid))\n \n dtheta = (2.0*math.pi)/(tot_ygrid+1)\n K=math.pi/dtheta\n\n for row in range(tot_ygrid):\n for colm in range(tot_ygrid):\n dif2mat[row,colm]=float(row==colm)*(0.5*red_mass)*(K**2/3.0)*(1+(2.0/(tot_ygrid)**2))\n if row!=colm:\n dif2mat[row,colm]=float(row!=colm)*(0.5*red_mass)*(2.0*K**2/(tot_ygrid)**2)*((-1)**(colm-row)/\\\n (math.sin(math.pi*(colm-row)/(tot_ygrid))**2))\n\n return dif2mat\n#---------------------------------\ndef FFT_grid():\n \n # This function return the FFT grid point.\n theta = np.zeros(tot_ygrid)\n dtheta = (2.0*math.pi)/(tot_ygrid+1)\n iy=0\n for igrid in range(-tot_ygrid/2,tot_ygrid/2):\n theta[iy] = float(igrid)*dtheta\n iy+=1\n \n return theta,dtheta\n#---------------------------------\ndef Hamiltonian_FFT():\n # This function compute the energy and the eigenvector.\n\n H_ij=np.zeros((tot_ygrid,tot_ygrid))\n dif2mat = second_derivative_FFT()\n theta,dtheta= FFT_grid()\n\n for row in range(tot_ygrid):\n for colm in range(tot_ygrid):\n H_ij[row,colm]= dif2mat[row,colm] + float(row==colm)* 0.5*W_0*(1.0-math.cos(theta[colm]))\n\n energy,coef = diag(H_ij)\n\n return energy,coef\n\n#---------------------------------\nif __name__ == \"__main__\":\n\n # Define the input parameters\n\n # The paramteres for the x coordinate.\n tot_xgrid=21\n freq=0.19/27.211\n mass=1.0/freq\n x_eq=0.0\n\n # The parameters for the isomerization coordinate.\n tot_ygrid=400 \n red_mass=0.002806/27.211 \n\n # The potential energy surface parameters\n W_0=3.56/27.211\n W_1=1.19/27.211\n E_1=2.58/27.211\n kappa=0.19/27.211\n lamda=0.19/27.211\n\n energy_HO,coef_HO = Hamiltonian_Harmonic()\n x_i,vect = x_Hermit()\n dx = weight_Hermit()\n\n# gs_wf= open(\"wf-HO-gs.txt\", \"w\")\n# erg = open(\"energy_HO.txt\", \"w\")\n# for i in range(tot_xgrid):\n# wf = (coef[i,0])**2/dx[i]\n# gs_wf.write(str(x_i[i]) + \" \" + str(wf) + \"\\n\")\n# erg.write(str(energy[i]) + \"\\n\")\n# gs_wf.close()\n# erg.close()\n\n energy_FFT,coef_FFT = Hamiltonian_FFT()\n theta,dtheta= FFT_grid()\n\n# FFT_wf=open(\"wf-FFT-g0.txt\",\"w\")\n# FFT_erg=open(\"energy-FFT.txt\",\"w\")\n# for i in range(tot_ygrid):\n# wf = (coef[i,0])**2/dtheta\n# FFT_wf.write(str(theta[i])+ \" \" + str(wf) + \"\\n\")\n# FFT_erg.write(str(energy[i])+ \"\\n\")\n \n# FFT_wf.close()\n# FFT_erg.close()\n\n# gs_wf_total = open(\"gs_wf_init_total.txt\",\"w\")\n coef_gs= open(\"coef_init_gs.txt\",\"w\")\n tot_wf=np.zeros((tot_xgrid,tot_ygrid))\n norm=0.0\n for i in range(tot_xgrid):\n for j in range(tot_ygrid):\n# tot_wf[i,j] = (coef_HO[i,0]*coef_FFT[j,0])**2/(dx[i]*dtheta)\n tot_wf[i,j] = coef_HO[i,0]*coef_FFT[j,0]\n norm+=tot_wf[i,j]**2\n \n coef_gs.write(str(tot_wf[i,j])+\"\\n\") \n# gs_wf_total.write(str(x_i[i]) + \" \" + str(theta[j]) + \" \" + str(tot_wf[i,j]) + \"\\n\")\n# gs_wf_total.write(\"\\n\")\n\n# gs_wf_total.close()\n # Check normalization\n# print(norm)\n coef_gs.close()\n","sub_path":"initial-wf.py","file_name":"initial-wf.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536217255","text":"from pyramid.settings import aslist\n\n\ndef parse_resources(raw_resources):\n resources = {}\n for res in aslist(raw_resources):\n if \";\" not in res:\n msg = (\"Resources should be defined as \"\n \"'bucket/coll;bucket/coll'. Got %r\" % res)\n raise ValueError(msg)\n source, destination = res.split(';')\n\n def _get_resource(resource):\n parts = resource.split('/')\n if len(parts) != 2:\n msg = (\"Resources should be defined as bucket/collection. \"\n \"Got %r\" % resource)\n raise ValueError(msg)\n return {\n 'bucket': parts[0],\n 'collection': parts[1]\n }\n\n resources[source] = {\n 'source': _get_resource(source),\n 'destination': _get_resource(destination),\n }\n return resources\n","sub_path":"kinto_signer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"254714305","text":"import os\nimport copy\nimport time\nimport json\nimport numpy as np\nimport sys\nimport open3d\n\nsys.path.insert(0, \"../\")\nfrom point_cloud_clean import clean_point_cloud\nfrom sklearn.neighbors import KDTree\nfrom p2p_matching_in_organ.landmark_utils import get_skeleton, get_mesh_landmarks\nfrom utils import save_off, \\\n point_cloud_to_mesh, \\\n visualization_basis_function, \\\n plot_skeleton_matching\nfrom organ_matching.organ_matching_lr import match_organ_two_days\nfrom skeleton_matching.skeleton_match import get_skeleton_landmarks_pairs\nfrom p2p_matching_in_organ.p2p_matching_evaluation import evaluate_p2p_matching\nfrom p2p_matching_in_organ.p2p_matching_visualization import visualize_p2p_matching\nfrom functional_map_.functional import FunctionalMapping\n\n\ndef get_p2p_from_mesh(mesh1, mesh2, lm,\n process_params=None, fit_params=None, refine_params=None, verbose=False):\n if not process_params:\n process_params = {\n 'n_ev': (70, 70), # Number of eigenvalues on source and Target\n 'landmarks': lm,\n 'subsample_step': 5, # In order not to use too many descriptors\n 'descr_type': 'MIX', # WKS or HKS\n }\n\n process_params['landmarks'] = lm\n\n if not fit_params:\n fit_params = {\n 'descr_mu': 1e0,\n 'lap_mu': 1e-3,\n 'descr_comm_mu': 1e-1,\n 'orient_mu': 0,\n }\n\n if not refine_params:\n refine_params = {\n 'icp_iter_num': 10,\n 'zoomout_iter_num': 20\n }\n\n model1 = FunctionalMapping(mesh1, mesh2)\n model1.preprocess(**process_params, verbose=verbose)\n\n # Define parameters for optimization and fit the Functional Map\n\n model1.fit(**fit_params, verbose=verbose)\n\n if refine_params['icp_iter_num'] > 0:\n model1.icp_refine(nit=refine_params['icp_iter_num'], verbose=verbose)\n # model.change_FM_type('classic') # Chose between 'classic', 'icp' or 'zoomout'\n\n if refine_params['zoomout_iter_num'] > 0:\n if verbose:\n print(\"max dimenstion of laplacian basis function: \", model1.max_dim)\n if model1.max_dim > model1.k1 + refine_params['zoomout_iter_num']:\n model1.zoomout_refine(nit=refine_params['zoomout_iter_num'], verbose=verbose) # This refines the current\n # model.FM, be careful which FM type is\n # used\n else:\n if model1.max_dim > model1.k1:\n model1.zoomout_refine(nit=max(model1.max_dim - model1.k1, 1), verbose=verbose)\n p2p = model1.p2p\n return p2p\n\n\ndef double_direction_refinement(mesh1, mesh2, T21, T12, lm1=None, lm2=None):\n vertices1 = np.asarray(mesh1.vertices)\n vertices2 = np.asarray(mesh2.vertices)\n\n tree = KDTree(vertices1)\n NN_mesh1 = tree.query(np.asarray(vertices1), k=5, return_distance=False)\n\n # if lm1 is not None and lm2 is not None:\n # T21[lm2] = lm1\n # T12[lm1] = lm2\n\n for target_value in np.unique(T21):\n source_index = np.where(T21 == target_value)[0]\n if len(source_index) == 1:\n continue\n reflect_distance_min = 10 ** 4\n ind_keep = source_index[0]\n for ind in source_index:\n v_source = vertices2[ind]\n v_reflect = vertices2[T12[T21[ind]]]\n reflect_distance = np.linalg.norm(v_source - v_reflect)\n if reflect_distance < reflect_distance_min:\n reflect_distance_min = reflect_distance\n ind_keep = ind\n\n for ind in source_index:\n if ind != ind_keep:\n if ind in np.unique(T12):\n target_candidates = np.where(T12 == ind)[0]\n else:\n target_candidates = NN_mesh1[target_value, 1:]\n target_keep = target_candidates[0]\n for ind_t in target_candidates:\n if ind_t not in np.unique(T21):\n target_keep = ind_t\n T21[ind] = target_keep\n\n # NN_mesh1 = tree.query(np.asarray(vertices1), k=5, return_distance=False)\n for target in range(vertices1.shape[0]):\n if target not in np.unique(T21):\n for neighbor in NN_mesh1[target, 1:]:\n source_index = np.where(T21 == neighbor)[0]\n if len(source_index) < 1:\n continue\n reflect_distance_min = 10 ** 4\n ind_keep = source_index[0]\n for ind in source_index:\n v_source = vertices2[ind]\n v_reflect = vertices2[T12[T21[ind]]]\n reflect_distance = np.linalg.norm(v_source - v_reflect)\n if reflect_distance < reflect_distance_min:\n reflect_distance_min = reflect_distance\n ind_keep = ind\n for ind in source_index:\n if ind != ind_keep:\n T21[ind] = target\n\n return T21, T12\n\n\ndef get_p2p_mapping_organ(pcd1, pcd2,\n skel_landmarks1, skel_landmarks2,\n verbose=False,\n plot_pcd=False,\n plot_mesh=False,\n process_params=None,\n apply_double_direction_refinement=True,\n fit_params=None):\n options_path = \"../hyper_parameters/lyon2.json\"\n with open(options_path, \"r\") as json_file:\n options = json.load(json_file)\n\n # Load the parameters from the option\n pc_path = options['pc_path']\n skel_noted_path = options['skel_noted_path'] # the xyz of skeleton nodes with the segment index labelled\n segment_connect_path = options['segment_connect_path'] # the connection relationship among the segments\n stem_node_path = options['stem_node_path'] #\n mesh_radius_factor = options['mesh_radius_factor']\n pcd_clean_option = options['pcd_clean_option']\n pcd1, description_1 = clean_point_cloud(copy.deepcopy(pcd1), option=pcd_clean_option, verbose=verbose,\n translate=False)\n pcd2, description_2 = clean_point_cloud(copy.deepcopy(pcd2), option=pcd_clean_option, verbose=verbose,\n translate=False)\n if plot_pcd:\n open3d.visualization.draw_geometries([pcd1, pcd2])\n voxel_size = (description_1['voxel_size'] + description_2['voxel_size']) / 2\n\n mesh2 = point_cloud_to_mesh(pcd2, radius_factor=mesh_radius_factor)\n mesh1 = point_cloud_to_mesh(pcd1, radius_factor=mesh_radius_factor)\n # open3d.visualization.draw_geometries([mesh1, mesh2])\n if skel_landmarks2.shape[0] > 0 and skel_landmarks1.shape[0] > 0:\n lm1 = get_mesh_landmarks(mesh1, skel_landmarks1)\n lm2 = get_mesh_landmarks(mesh2, skel_landmarks2)\n # lm_len = min(lm1.shape[0], lm2.shape[0])\n lm = np.vstack([lm1, lm2]).T\n else:\n lm = None\n\n if plot_mesh:\n open3d.visualization.draw_geometries([mesh1, mesh2])\n\n T21 = get_p2p_from_mesh(mesh1, mesh2, lm, verbose=verbose, process_params=process_params, fit_params=fit_params)\n if lm is not None:\n lm = np.vstack([lm2, lm1]).T\n T12 = get_p2p_from_mesh(mesh2, mesh1, lm, verbose=verbose, process_params=process_params, fit_params=fit_params)\n if lm is not None and apply_double_direction_refinement == True:\n T21, T12 = double_direction_refinement(mesh1, mesh2, T21, T12, lm1, lm2)\n\n return mesh1, mesh2, T21, T12\n\n\ndef get_p2p_mapping_organ_collection(organ_collection_1,\n organ_collection_2,\n organ_matching,\n day1,\n day2,\n dataset,\n plot_mesh=False,\n plot_pcd=False,\n plot_skeleton=False,\n show_all=True,\n options=None):\n options_path = \"../hyper_parameters/lyon2.json\"\n if not options:\n with open(options_path, \"r\") as json_file:\n options = json.load(json_file)\n mesh_collection_1 = []\n mesh_collection_2 = []\n\n p2p_mappings = []\n scores = []\n for index, org1 in enumerate(organ_collection_1):\n if organ_matching[index] < 0:\n continue\n org2 = organ_collection_2[int(organ_matching[index])]\n\n # skel_landmarks1 = get_skeleton_landmarks(org1)\n # skel_landmarks2 = get_skeleton_landmarks(org2)\n\n xyz_1, connect_1, xyz_noted_1 = get_skeleton(org1)\n xyz_2, connect_2, xyz_noted_2 = get_skeleton(org2)\n #\n if plot_skeleton:\n plot_skeleton_matching(xyz_1, xyz_2, connect_1, connect_2)\n skel_landmarks1, skel_landmarks2 = get_skeleton_landmarks_pairs(xyz_1, xyz_2, connect_1, connect_2,\n visualize=False)\n skel_landmarks1 = skel_landmarks1[:, :3]\n skel_landmarks2 = skel_landmarks2[:, :3]\n print(\"Used: \", skel_landmarks1.shape[0], \" landmarks\")\n # skel_landmarks2[[-2, -1], :] = skel_landmarks2[[-1, -2], :]\n\n pcd1 = org1[\"pcd\"]\n pcd2 = org2[\"pcd\"]\n\n mesh1, mesh2, T21, T12 = get_p2p_mapping_organ(pcd1, pcd2, skel_landmarks1, skel_landmarks2,\n verbose=True,\n plot_pcd=plot_pcd,\n plot_mesh=plot_mesh)\n\n score = evaluate_p2p_matching(mesh1, mesh2, skel_landmarks1, skel_landmarks2, T21, T12, sample_proportion=0.5)\n print(\"score: \", score)\n scores.append(score)\n print(64 * \"=\")\n mesh_collection_1.append(mesh1)\n mesh_collection_2.append(mesh2)\n p2p_mappings.append(copy.deepcopy(T21))\n\n save_path_format = options[\"save_path_segment\"]\n i = 0\n for m1, m2 in zip(mesh_collection_1, mesh_collection_2):\n np.savetxt(save_path_format.format(dataset, day1, day2) + \"{}_{}.csv\".format(day1, i), np.asarray(m1.vertices))\n np.savetxt(save_path_format.format(dataset, day1, day2) + \"{}_{}.csv\".format(day2, i), np.asarray(m2.vertices))\n np.savetxt(save_path_format.format(dataset, day1, day2) + \"match_{}.csv\".format(i), p2p_mappings[i])\n i += 1\n\n visualize_p2p_matching(day1, day2, dataset, save_path_format=save_path_format, show_all=True)\n print(scores)\n print(np.mean(scores))\n open3d.visualization.draw_geometries(pcd1 + pcd2)\n\n\nif __name__ == \"__main__\":\n day1 = \"03-22_PM\"\n day2 = \"03-23_PM\"\n t_start = time.time()\n matches_index, org_collection_1, org_collection_2 = \\\n match_organ_two_days(day1, day2, visualize=True)\n # org_collection_1 = preprocess_pcd(org_collection_1)\n # org_collection_2 = preprocess_pcd(org_collection_2)\n t_end = time.time()\n print(\"Get the organs matched, used \", t_end - t_start, \" s\")\n\n t_start = time.time()\n # get_p2p_mapping_organ_collection(org_collection_1[0:1] + org_collection_1[3:],\n # org_collection_2,\n # np.append(matches_index[0:1], matches_index[3:]),\n # day1=day1,\n # day2=day2,\n # plot_mesh=False,\n # plot_pcd=False,\n # show_all=False)\n #\n # get_p2p_mapping_organ_collection(org_collection_1[3:5],\n # org_collection_2,\n # matches_index[3:5],\n # day1=day1,\n # day2=day2,\n # plot_mesh=False,\n # plot_pcd=False,\n # show_all=False)\n\n org_1 = org_collection_1[0:1] + org_collection_1[3:]\n org_2 = org_collection_2[0:1] + org_collection_2[3:]\n match = np.append(matches_index[0:1], matches_index[3:])\n pcd_1 = open3d.geometry.PointCloud()\n pcd_1.points = open3d.utility.Vector3dVector(\n np.vstack([np.asarray(org[\"pcd\"].points) for org in org_1])\n )\n\n pcd_2 = open3d.geometry.PointCloud()\n pcd_2.points = open3d.utility.Vector3dVector(\n np.vstack([np.asarray(org[\"pcd\"].points) for org in org_2])\n )\n\n # open3d.visualization.draw_geometries([pcd_1, pcd_2])\n # skel_landmarks1, skel_landmarks2 = [], []\n skel_landmarks1 = np.loadtxt(\"landmarks/landmark_{}.csv\".format(day1))\n skel_landmarks2 = np.loadtxt(\"landmarks/landmark_{}.csv\".format(day2))\n # for index, org1 in enumerate(org_1):\n # org2 = org_collection_2[int(match[index])]\n #\n # xyz_1, connect_1 = get_skeleton(org1)\n # xyz_2, connect_2 = get_skeleton(org2)\n # skel_lm1, skel_lm2 = get_skeleton_landmarks_pairs(xyz_1, xyz_2, connect_1, connect_2,\n # visualize=False)\n # skel_landmarks1.append(skel_lm1[::5])\n # skel_landmarks2.append(skel_lm2[::5])\n #\n # skel_landmarks1 = np.vstack(skel_landmarks1)\n # skel_landmarks2 = np.vstack(skel_landmarks2)\n\n mesh1, mesh2, T21, T12 = get_p2p_mapping_organ(pcd_1, pcd_2, skel_landmarks1, skel_landmarks2,\n verbose=True,\n plot_pcd=True,\n plot_mesh=True)\n score = evaluate_p2p_matching(mesh1, mesh2, skel_landmarks1, skel_landmarks2, T21, T12, sample_proportion=0.5)\n print(\"score: \", score)\n mesh_collection_1 = []\n mesh_collection_2 = []\n p2p_mappings = []\n\n mesh_collection_1.append(mesh1)\n mesh_collection_2.append(mesh2)\n p2p_mappings.append(copy.deepcopy(T21))\n\n color_bound_box = np.array([[-1000.0, -1000.0, -1000.0], [1000.0, 1000.0, 1000.0]])\n for i in range(len(mesh_collection_2)):\n m2 = mesh_collection_2[i]\n P = np.asarray(m2.vertices)\n P_min = np.min(P, axis=0)\n P_max = np.max(P, axis=0)\n color_bound_box[0, :] = np.max(np.vstack([color_bound_box[0, :], P_max]), axis=0)\n color_bound_box[1, :] = np.min(np.vstack([color_bound_box[1, :], P_min]), axis=0)\n\n # print(color_bound_box)\n\n for index, m2 in enumerate(mesh_collection_2):\n P = np.asarray(m2.vertices)\n color_value = np.zeros(P.shape)\n color_value = (P - color_bound_box[0, :]) / (color_bound_box[1, :] - color_bound_box[0, :])\n m2.vertex_colors = open3d.utility.Vector3dVector(copy.deepcopy(color_value))\n\n m1 = mesh_collection_1[index]\n color_value_1 = 0.8 * np.ones((np.asarray(m1.vertices).shape[0], 3))\n p2p = p2p_mappings[index]\n color_value_1[p2p] = color_value\n points_mapped = np.unique(p2p)\n tree = KDTree(np.asarray(m1.vertices)[points_mapped])\n all_mapped = tree.query(np.asarray(m1.vertices), k=1, return_distance=False).flatten()\n # color_value_1 = color_value_1[points_mapped[all_mapped]]\n m1.vertex_colors = open3d.utility.Vector3dVector(color_value_1)\n m1.translate([0, 100, 0])\n\n pcd1 = []\n pcd2 = []\n for m1, m2 in zip(mesh_collection_1, mesh_collection_2):\n p1 = open3d.geometry.PointCloud()\n p2 = open3d.geometry.PointCloud()\n p1.points = m1.vertices\n p1.colors = m1.vertex_colors\n p2.points = m2.vertices\n p2.colors = m2.vertex_colors\n pcd1.append(p1)\n pcd2.append(p2)\n print(score)\n\n open3d.visualization.draw_geometries(pcd1 + pcd2)\n\n t_end = time.time()\n print(t_end - t_start)\n","sub_path":"p2p_matching_in_organ/p2p_matching_fm.py","file_name":"p2p_matching_fm.py","file_ext":"py","file_size_in_byte":15875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"80532424","text":"# Copyright 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport sparseconvnet as s\nimport time\nimport os\nimport math\nimport numpy as np\nfrom PIL import Image\nimport sparseconvnet.legacy as scn\nimport sys\nsys.path.append(\"../eval/\")\nsys.path.append(\"../util/\")\nfrom logger import Logger\nfrom weighted_cross_entropy import weighted_cross_entropy\nfrom data import SUNCG_DATA, SUNCGTestDataset\nfrom metric import eval_suncg\n\n\ndef TrainValidate(model, train_dataset, test_dataset, config):\n\n optimizer = optim.SGD(model.parameters(),\n lr=config.initial_lr,\n momentum = config.momentum,\n weight_decay = config.weight_decay,\n nesterov=True)\n\n check_point_path = 'log/'+config.prefix+'-model.pth'\n\n if os.path.isfile(check_point_path):\n check_point = torch.load(check_point_path)\n train_iter = check_point['train_iter']\n start_epoch = int(train_iter / config.train_num) + 1\n best_acc = check_point['best_acc']\n print('Restarting at iter ' + str(train_iter) + ' from '+ check_point_path + '..')\n model.load_state_dict(check_point['state_dict'])\n optimizer.load_state_dict(check_point['optimizer'])\n logger_train = Logger('log/log_train.txt', title='ssc', resume=True)\n logger_valid = Logger('log/log_valid.txt', title='ssc', resume=True)\n else:\n train_iter = 0\n start_epoch = 1\n best_acc = -1\n if not os.path.exists('log'):\n os.mkdir('log')\n logger_train = Logger('log/log_train.txt', title='ssc')\n logger_train.set_names(['Learning Rate', 'Train Loss1', 'Train Loss2'])\n logger_valid = Logger('log/log_valid.txt', title='ssc')\n logger_valid.set_names(['SSC IOU', 'CMP IOU'])\n\n print('#parameters', sum([x.nelement() for x in model.parameters()]))\n \n for epoch in range(start_epoch, config.n_epochs + 1):\n model.train()\n for param_group in optimizer.param_groups:\n cur_lr = config.initial_lr * math.exp((1 - epoch) * config.lr_decay)\n param_group['lr'] = cur_lr\n print('set learning rate '+ str(cur_lr))\n\n for batch_idx, batch in enumerate(train_dataset['train']()):\n train_start = time.time()\n if config.use_gpu:\n batch['input'], batch['target'], batch['weight'], batch['target2'], batch['weight2'] \\\n = batch['input'].cuda(), batch['target'].cuda(), batch['weight'].cuda(), batch['target2'].cuda(), batch['weight2'].cuda()\n\n batch['input'].to_variable(requires_grad=True)\n batch['target'], batch['weight'], batch['target2'], batch['weight2'] \\\n = Variable(batch['target']), Variable(batch['weight']), Variable(batch['target2']), Variable(batch['weight2'])\n optimizer.zero_grad()\n output = model(batch['input'], batch['input_groups'])\n output1 = output[0][:,:,config.output_offset[0]:(config.output_offset[0]+config.dataset_outputSize[0]),\\\n config.output_offset[1]:(config.output_offset[1]+config.dataset_outputSize[1]),\\\n config.output_offset[2]:(config.output_offset[2]+config.dataset_outputSize[2])]\n\n output2 = output[1][:,:,config.output_offset2[0]:(config.output_offset2[0]+config.dataset_outputSize2[0]),\\\n config.output_offset2[1]:(config.output_offset2[1]+config.dataset_outputSize2[1]),\\\n config.output_offset2[2]:(config.output_offset2[2]+config.dataset_outputSize2[2])]\n\n loss1 = weighted_cross_entropy(output1, batch['target'], batch['weight'])\n loss2 = weighted_cross_entropy(output2, batch['target2'], batch['weight2'])\n loss = loss1 + loss2\n loss.backward()\n optimizer.step()\n train_time = time.time() - train_start\n print('epoch:' + str(epoch) + ', batch:' + str((train_iter) % config.train_num) + \\\n ', loss1:' + str(loss1.data[0]) +', loss2:' + str(loss2.data[0]) + ', time: ' + str(train_time))\n logger_train.append([cur_lr, loss1.data[0], loss2.data[0]])\n \n train_iter = train_iter + 1\n\n # Check if we want to write or validation\n b_validate = (train_iter % config.train_num) == 0\n if b_validate:\n ssc_iou, cmp_iou = valid_model(model, test_dataset)\n logger_valid.append([ssc_iou, cmp_iou])\n if ssc_iou > best_acc:\n print('Saving best model with va_res = {}'.format(ssc_iou))\n best_acc = ssc_iou\n torch.save({\n 'train_iter': train_iter,\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict(),\n 'state_dict': model.state_dict()\n }, 'log/'+config.prefix+'-best_model.pth')\n\n b_save = (train_iter % 200) == 0\n if b_save:\n torch.save({\n 'train_iter': train_iter,\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict(),\n 'state_dict': model.state_dict()\n }, check_point_path)\n if train_iter % config.train_num == 0:\n break\n\ndef valid_model(model, test_dataset):\n model.eval()\n predictions = []\n output_offset = test_dataset.output_offset\n dataset_outputSize = test_dataset.dataset_outputSize\n\n for idx in range(len(test_dataset)):\n test_input = test_dataset[idx]\n input = test_input['input']\n input_groups = test_input['input_groups']\n input = input.cuda()\n output = model(input, input_groups)\n predictions.append(output[0].cpu().data.numpy()[:,:,output_offset[0]:(output_offset[0]+dataset_outputSize[0]),\\\n output_offset[1]:(output_offset[1]+dataset_outputSize[1]),\\\n output_offset[2]:(output_offset[2]+dataset_outputSize[2])])\n predictions = np.vstack(predictions)\n # import h5py\n # fp = h5py.File('predictions.msg', \"w\")\n # result = fp.create_dataset(\"result\", predictions.shape, dtype='f')\n # result[...] = predictions\n # fp.close()\n return eval_suncg(predictions)\n\n\n","sub_path":"ssc/sgc-pattern4/trainValidate.py","file_name":"trainValidate.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"437474793","text":"import django, regex, srsly, random\nfrom collections import defaultdict\nfrom tqdm import tqdm\ndjango.setup()\nfrom sefaria.model import *\nfrom sefaria.system.exceptions import InputError\n\nclass ProdigyInputWalker:\n def __init__(self):\n self.prodigyInput = []\n self.prodigyInputByVersion = defaultdict(list)\n \n @staticmethod\n def get_refs_with_location(text, lang, citing_only=True):\n unique_titles = set(library.get_titles_in_string(text, lang, citing_only))\n title_node_dict = {title: library.get_schema_node(title,lang) for title in unique_titles}\n titles_regex_str = library.get_multi_title_regex_string(unique_titles, lang)\n titles_regex = regex.compile(titles_regex_str, regex.VERBOSE)\n \n def get_ref_from_match(match, outer_start=0):\n try:\n gs = match.groupdict()\n assert gs.get(\"title\") is not None\n node = title_node_dict[gs.get(\"title\")]\n ref = library._get_ref_from_match(match, node, lang)\n return ref.normal(), match.group(0), match.start(0)+outer_start, match.end(0)+outer_start\n except InputError:\n return None\n except AssertionError:\n return None\n\n refs_with_loc = []\n if lang == \"en\":\n refs_with_loc = [get_ref_from_match(m) for m in titles_regex.finditer(text)]\n else:\n outer_regex_str = r\"[({\\[].+?[)}\\]]\"\n outer_regex = regex.compile(outer_regex_str, regex.VERBOSE)\n for outer_match in outer_regex.finditer(text):\n refs_with_loc += [get_ref_from_match(m, outer_match.start(0)) for m in titles_regex.finditer(outer_match.group(0))]\n refs_with_loc = list(filter(None, refs_with_loc))\n return refs_with_loc\n\n def normalize(self, text):\n return regex.sub(r\"\\s*<[^>]+>\\s*\", \" \", text)\n\n def split_text(self, text):\n return text.split('. ')\n\n def get_input(self, text, en_tref, language):\n refs_with_loc = ProdigyInputWalker.get_refs_with_location(text, language)\n temp_input = {\n \"text\": text,\n \"spans\": [\n {\"start\": s, \"end\": e, \"label\": \"פשוט\"} for _, _, s, e in refs_with_loc\n ],\n \"meta\": {\n \"Ref\": en_tref\n }\n }\n return temp_input\n \n def action(self, text, en_tref, he_tref, version):\n norm_text = self.normalize(text)\n temp_input = self.get_input(norm_text, en_tref, version.language)\n self.prodigyInputByVersion[(version.versionTitle, version.title, version.language)] += [temp_input]\n \n def make_final_input(self, sample_size):\n for temp_input_list in self.prodigyInputByVersion.values():\n self.prodigyInput += random.sample(temp_input_list, sample_size)\n\n\ndef make_prodigy_input(title_list, vtitle_list, lang_list):\n walker = ProdigyInputWalker()\n for title, vtitle, lang in tqdm(zip(title_list, vtitle_list, lang_list), total=len(title_list)):\n if vtitle is None:\n version = VersionSet({\"title\": title, \"language\": lang}, sort=[(\"priority\", -1)], limit=1).array()[0]\n else:\n version = Version().load({\"title\": title, \"versionTitle\": vtitle, \"language\": lang})\n version.walk_thru_contents(walker.action)\n walker.make_final_input(200)\n srsly.write_jsonl('research/prodigy/data/test_input.jsonl', walker.prodigyInput)\n\nif __name__ == \"__main__\":\n title_list = ['Rashba on Eruvin', 'Responsa Benei Banim', 'Magen Avraham', 'Beit Yosef', 'Be\\'er HaGolah', 'Havot Yair', 'Peninei Halakhah, Festivals', 'Pele Yoetz', 'Teshuvot HaRadbaz Volume 1']\n make_prodigy_input(title_list, [None]*len(title_list), ['he']*len(title_list))","sub_path":"research/prodigy/make_prodigy_input.py","file_name":"make_prodigy_input.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"348139188","text":"from .pmg2vesta import VestaFile\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description='Transform a POSCAR to VESTA file. If not output filename is given, '\n 'the structure will be written to stdout.')\n parser.add_argument('input', metavar='input', type=str, nargs=1,\n help='input POSCAR file')\n parser.add_argument('--output', '-o', type=str, nargs=1,\n help='output VESTA structure to file')\n\n\n args = parser.parse_args()\n\n vesta_file = VestaFile.from_poscar(args.input[0])\n if args.output:\n with open(args.output, \"w\") as fp:\n vesta_file.write(fp)\n else:\n print(vesta_file.writes())\n","sub_path":"src/pmg2vesta/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"367475047","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/9/21 下午1:20\n# @Author : xiaowei\n# @Site : \n# @File : serializers.py\n# @Software: PyCharm\n\nfrom rest_framework import serializers\nfrom .models import *\n\n\nclass ManufacturerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Manufacturer\n fields = \"__all__\"\n\n\nclass ProductModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductModel\n fields = \"__all__\"\n\n def to_representation(self, instance):\n vendor = instance.vendor\n ret = super(ProductModelSerializer, self).to_representation(instance)\n ret['vendor'] = {\n \"id\": vendor.id,\n \"name\": vendor.vendor_name,\n }\n return ret\n\n def validate(self, attrs):\n manufactuer_obj=attrs[\"vendor\"]\n try:\n manufactuer_obj.productmodel_set.get(model_name__exact=attrs[\"model_name\"])\n raise serializers.ValidationError(\"该型号已经存在\")\n except ProductModel.DoesNotExist:\n\n return attrs\n","sub_path":"apps/manufacturer/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"445259012","text":"__author__ = 'Kalyan'\n\nnotes = '''\nThis problem deals with circular single linked lists, the tail of the list points back to head.\n'''\n\nfrom listutils import *\n\n\n# delete a node in the list so that the resulting list is still circular.\ndef delete_node(head, value):\n if head is None or (head.value == value and head.next is None):\n head = None\n return head\n temp = head.next\n if head.value == value:\n while temp.next != head:\n temp = temp.next\n temp.next = head.next\n head.next = None\n return temp.next\n\n while temp != head and temp.next.value != value:\n temp = temp.next\n if temp.next.value == value:\n temp1 = temp.next\n temp.next = temp1.next\n temp1.next = None\n\n return head\n\n\ndef check_deletion(input, value, output):\n head = to_circular_list(input)\n head = delete_node(head, value)\n assert output == from_circular_list(head)\n\n\ndef test_delete_node():\n check_deletion(range(1,6), 1, range(2,6))\n check_deletion(range(1,6), 5, range(1,5))\n check_deletion(range(1,6), 3, [1,2,4,5])\n check_deletion(range(1,6), 10, range(1,6))\n check_deletion([1], 10, [1])\n check_deletion([1], 1, [])\n check_deletion([], 1, [])\n\n\n\n","sub_path":"pythoncourse(IIIT_HYD)/question_15.py","file_name":"question_15.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"404634939","text":"#!/usr/bin/python3\n\nimport sys, os, subprocess, argparse\nfrom datetime import datetime\nimport shoutconfig\n\nVERSION = '1.0.0'\n\n# Shout data structure\nclass Shout:\n def __init__(self, shout):\n self.date = datetime.today().strftime('%Y-%m-%d')\n self.time = datetime.today().strftime('%H:%M:%S')\n self.user = os.environ.get(shoutconfig.environment_USER)\n self.shout = shout\n \n def printShout(self):\n print(self.user)\n print(\" \" + self.date, self.time)\n print(\" \" + self.shout)\n\n# Main application\nclass ShoutApp:\n def __init__(self):\n self.txtfile = os.environ[shoutconfig.environment_TXTFILE]\n \n def shout(self, shouted):\n data = \"| \" + shouted.date + \" | \" + shouted.time + \" | \" + shouted.shout + \"\\n\"\n with open(self.txtfile, 'a') as outf:\n outf.seek(2,0)\n outf.write(data)\n\ndef main():\n shoutconfig.setEnvironmentVars()\n app = ShoutApp()\n\n parser = argparse.ArgumentParser(description=\"Some Hard Opinions Uttered Tenderly (Shout)\")\n parser.add_argument('-v', '--version', action=\"store_true\", help=\"display version number\")\n parser.add_argument('-i', '--init', action=\"store_true\", help=\"initialize configuration file\") \n parser.add_argument('-s', '--shout', type=str, help=\"add shout to the void\")\n \n args = parser.parse_args()\n\n if len(sys.argv[1:]) is 0:\n parser.print_help()\n parser.exit()\n\n if args.version:\n print(\"shout version\", VERSION)\n parser.exit()\n \n if args.init:\n print(\"INIT!\")\n parser.exit()\n \n if len(args.shout) > 0:\n new_shout = Shout(args.shout)\n app.shout(new_shout)\n\n parser.exit()\n\nif __name__ == \"__main__\":\n main()","sub_path":"shout.py","file_name":"shout.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"554585290","text":"class EmptyCollection(Exception):\n pass\n\n\nclass DoublyLinkedList:\n class Node:\n def __init__(self, data=None, next=None, prev=None):\n self.data = data\n self.next = next\n self.prev = prev\n\n def disconnect(self):\n self.data = None\n self.next = None\n self.prev = None\n\n\n def __init__(self):\n self.header = DoublyLinkedList.Node()\n self.trailer = DoublyLinkedList.Node()\n self.header.next = self.trailer\n self.trailer.prev = self.header\n self.size = 0\n\n def __len__(self):\n return self.size\n\n def is_empty(self):\n return (len(self) == 0)\n\n def first_node(self):\n if (self.is_empty()):\n raise EmptyCollection(\"List is empty\")\n return self.header.next\n\n def last_node(self):\n if (self.is_empty()):\n raise EmptyCollection(\"List is empty\")\n return self.trailer.prev\n\n def add_first(self, elem):\n return self.add_after(self.header, elem)\n\n def add_last(self, elem):\n return self.add_after(self.trailer.prev, elem)\n\n def add_after(self, node, elem):\n prev = node\n succ = node.next\n new_node = DoublyLinkedList.Node()\n new_node.data = elem\n new_node.prev = prev\n new_node.next = succ\n prev.next = new_node\n succ.prev = new_node\n self.size += 1\n return new_node\n\n def add_before(self, node, elem):\n return self.add_after(node.prev, elem)\n\n def delete(self, node):\n prev = node.prev\n succ = node.next\n prev.next = succ\n succ.prev = prev\n self.size -= 1\n data = node.data\n node.disconnect()\n return data\n\n def __iter__(self):\n if(self.is_empty()):\n return\n cursor = self.first_node()\n while(cursor is not self.trailer):\n yield cursor.data\n cursor = cursor.next\n\n def __str__(self):\n return '[' + '<-->'.join([str(elem) for elem in self]) + ']'\n\n def __repr__(self):\n return str(self)\n\n\n # if len(other) < len(self):\n # for i in range(len(other)):\n # if (i > 0):\n # # print(\"initial\", carry)\n # toReturn.i.add_first(((temp2.i.delete(temp2.i.last_node()) + temp1.i.delete(temp1.i.last_node())+carry)%10))\n # print(\"len\", len(temp2))\n # if len(temp2) > 0:\n # carry = (((temp2.i.last_node().data + temp1.i.last_node().data)+carry)//10)\n # print(\"new\", carry)\n #\n #\n # else:\n # toReturn.i.add_first(((temp2.i.delete(temp2.i.last_node()) + temp1.i.delete(temp1.i.last_node()))%10))\n # carry = (temp2.i.last_node().data + temp1.i.last_node().data) // 10\n# sheep = Integer('000')\n# cow = Integer('0')\n# n3 = sheep + cow\n# print(n3)\n\n# if (isinstance(lnk_lst, int)):\n # result.add_last(lnk_lst)\n # return result\n # else:\n # result.add_last(deep_copy_linked_list(lnk_lst.first_node().data))\n # # start = lnk_lst.header.next.data\n # # print(start)\n # # result.add_last(deep_copy_linked_list(start))\n # start = lnk_lst.header.next.data\n # print(start)\n # return result\n\n\n def merge3(lnk1, lnk2, now1, now2):\n result = DoublyLinkedList()\n result2 = DoublyLinkedList()\n add = 0\n\n if (len(result) != (len(lnk1) + len(lnk2))):\n if now1.data and now2.data is not None:\n if now1.data <= now2.data:\n # result.add_last(now1.data)\n add = now1.data\n now1 = now1.next\n\n elif now1.data > now2.data:\n # result.add_last(now2.data)\n add = now2.data\n now2 = now2.next\n\n\n elif now1.data is not None:\n result.add_last(now1.data)\n add = now1.data\n now1 = now1.next\n\n elif now2.data is not None:\n add = now2.data\n result.add_last(now2.data)\n now2 = now2.next\n\n\n else:\n return result\n if now1.data or now2.data is not None:\n result2 = merge3(lnk1, lnk2, now1, now2)\n result.add_last(result2.first_node().data)\n\n return result","sub_path":"ClassesTests/DoublyLinkedList.py","file_name":"DoublyLinkedList.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"115200150","text":"import os, argparse, h5py, codecs\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom nltk import ParentedTree\nfrom subwordnmt.apply_bpe import BPE, read_vocabulary\nfrom model import SynPG\nfrom utils import Timer, make_path, load_data, load_embedding, load_dictionary, deleaf, sent2str, synt2str\nfrom pprint import pprint\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_dir', type=str, default=\"./model/\", \n help=\"directory to save models\")\nparser.add_argument('--output_dir', type=str, default=\"./output/\",\n help=\"directory to save outputs\")\nparser.add_argument('--bpe_codes_path', type=str, default='./data/bpe.codes',\n help=\"bpe codes file\")\nparser.add_argument('--bpe_vocab_path', type=str, default='./data/vocab.txt',\n help=\"bpe vcocabulary file\")\nparser.add_argument('--bpe_vocab_thresh', type=int, default=50, \n help=\"bpe threshold\")\nparser.add_argument('--dictionary_path', type=str, default=\"./data/dictionary.pkl\", \n help=\"dictionary file\")\nparser.add_argument('--train_data_path', type=str, default=\"./data/train_data.h5\",\n help=\"training data\")\nparser.add_argument('--valid_data_path', type=str, default=\"./data/valid_data.h5\",\n help=\"validation data\")\nparser.add_argument('--emb_path', type=str, default=\"./data/glove.840B.300d.txt\", \n help=\"initialized word embedding\")\nparser.add_argument('--max_sent_len', type=int, default=40,\n help=\"max length of sentences\")\nparser.add_argument('--max_synt_len', type=int, default=160,\n help=\"max length of syntax\")\nparser.add_argument('--word_dropout', type=float, default=0.4,\n help=\"word dropout ratio\")\nparser.add_argument('--n_epoch', type=int, default=5,\n help=\"number of epoches\")\nparser.add_argument('--batch_size', type=int, default=64,\n help=\"batch size\")\nparser.add_argument('--lr', type=float, default=1e-4,\n help=\"learning rate\")\nparser.add_argument('--weight_decay', type=float, default=1e-5,\n help=\"weight decay for adam\")\nparser.add_argument('--log_interval', type=int, default=250,\n help=\"print log and validation loss evry 250 iterations\")\nparser.add_argument('--gen_interval', type=int, default=5000,\n help=\"generate outputs every 500 iterations\")\nparser.add_argument('--save_interval', type=int, default=10000,\n help=\"save model every 10000 iterations\")\nparser.add_argument('--temp', type=float, default=0.5,\n help=\"temperature for generating outputs\")\nparser.add_argument('--seed', type=int, default=0, \n help=\"random seed\")\nargs = parser.parse_args()\npprint(vars(args))\nprint()\n\n# fix random seed\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\ntorch.backends.cudnn.enabled = False\n\ndef train(epoch, model, train_data, valid_data, train_loader, valid_loader, optimizer, criterion, dictionary, bpe, args):\n \n timer = Timer()\n n_it = len(train_loader)\n \n for it, data_idxs in enumerate(train_loader):\n model.train()\n \n data_idxs = np.sort(data_idxs.numpy())\n \n # get batch of raw sentences and raw syntax\n sents_ = train_data[0][data_idxs]\n synts_ = train_data[1][data_idxs]\n \n batch_size = len(sents_)\n \n # initialize tensors\n sents = np.zeros((batch_size, args.max_sent_len), dtype=np.long) # words without position\n synts = np.zeros((batch_size, args.max_synt_len+2), dtype=np.long) # syntax\n targs = np.zeros((batch_size, args.max_sent_len+2), dtype=np.long) # target output\n \n for i in range(batch_size):\n \n # bpe segment and convert to tensor\n sent_ = sents_[i]\n sent_ = bpe.segment(sent_).split()\n sent_ = [dictionary.word2idx[w] if w in dictionary.word2idx else dictionary.word2idx[\"\"] for w in sent_]\n sents[i, :len(sent_)] = sent_\n \n # add and for target output\n targ_ = [dictionary.word2idx[\"\"]] + sent_ + [dictionary.word2idx[\"\"]]\n targs[i, :len(targ_)] = targ_\n \n # parse syntax and convert to tensor\n synt_ = synts_[i]\n synt_ = ParentedTree.fromstring(synt_)\n synt_ = deleaf(synt_)\n synt_ = [dictionary.word2idx[f\"<{w}>\"] for w in synt_ if f\"<{w}>\" in dictionary.word2idx]\n synt_ = [dictionary.word2idx[\"\"]] + synt_ + [dictionary.word2idx[\"\"]]\n synts[i, :len(synt_)] = synt_\n \n sents = torch.from_numpy(sents).cuda()\n synts = torch.from_numpy(synts).cuda()\n targs = torch.from_numpy(targs).cuda()\n \n # forward\n outputs = model(sents, synts, targs)\n \n # calculate loss\n targs_ = targs[:, 1:].contiguous().view(-1)\n outputs_ = outputs.contiguous().view(-1, outputs.size(-1))\n optimizer.zero_grad()\n loss = criterion(outputs_, targs_)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n optimizer.step()\n \n if it % args.log_interval == 0:\n # print current loss\n valid_loss = evaluate(model, valid_data, valid_loader, criterion, dictionary, bpe, args)\n print(\"| ep {:2d}/{} | it {:3d}/{} | {:5.2f} s | loss {:.4f} | g_norm {:.6f} | valid loss {:.4f} |\".format(\n epoch, args.n_epoch, it, n_it, timer.get_time_from_last(), loss.item(), model.grad_norm, valid_loss))\n \n if it % args.gen_interval == 0:\n # generate output to args.output_dir\n generate(epoch, it, model, valid_data, valid_loader, dictionary, bpe, args)\n \n if it % args.save_interval == 0:\n # save model to args.model_dir\n torch.save(model.state_dict(), os.path.join(args.model_dir, \"synpg_epoch{:02d}.pt\".format(epoch)))\n \ndef evaluate(model, data, loader, criterion, dictionary, bpe, args):\n model.eval()\n total_loss = 0.0\n max_it = len(loader)\n with torch.no_grad():\n for it, data_idxs in enumerate(loader):\n data_idxs = np.sort(data_idxs.numpy())\n \n # get batch of raw sentences and raw syntax\n sents_ = data[0][data_idxs]\n synts_ = data[1][data_idxs]\n\n batch_size = len(sents_)\n \n # initialize tensors\n sents = np.zeros((batch_size, args.max_sent_len), dtype=np.long) # words without position\n synts = np.zeros((batch_size, args.max_synt_len+2), dtype=np.long) # syntax\n targs = np.zeros((batch_size, args.max_sent_len+2), dtype=np.long) # target output\n\n for i in range(batch_size):\n \n # bpe segment and convert to tensor\n sent_ = sents_[i]\n sent_ = bpe.segment(sent_).split()\n sent_ = [dictionary.word2idx[w] if w in dictionary.word2idx else dictionary.word2idx[\"\"] for w in sent_]\n sents[i, :len(sent_)] = sent_\n \n # add and for target output\n targ_ = [dictionary.word2idx[\"\"]] + sent_ + [dictionary.word2idx[\"\"]]\n targs[i, :len(targ_)] = targ_\n \n # parse syntax and convert to tensor\n synt_ = synts_[i]\n synt_ = ParentedTree.fromstring(synt_)\n synt_ = deleaf(synt_)\n synt_ = [dictionary.word2idx[f\"<{w}>\"] for w in synt_ if f\"<{w}>\" in dictionary.word2idx]\n synt_ = [dictionary.word2idx[\"\"]] + synt_ + [dictionary.word2idx[\"\"]]\n synts[i, :len(synt_)] = synt_\n\n sents = torch.from_numpy(sents).cuda()\n synts = torch.from_numpy(synts).cuda()\n targs = torch.from_numpy(targs).cuda()\n \n # forward\n outputs = model(sents, synts, targs)\n \n # calculate loss\n targs_ = targs[:, 1:].contiguous().view(-1)\n outputs_ = outputs.contiguous().view(-1, outputs.size(-1))\n loss = criterion(outputs_, targs_)\n \n total_loss += loss.item()\n \n return total_loss / max_it\n\ndef generate(epoch, eit, model, data, loader, dictionary, bpe, args, max_it=10):\n model.eval()\n with open(os.path.join(args.output_dir, \"sents_valid_epoch{:02d}_it{:06d}.txt\".format(epoch, eit)), \"w\") as fp:\n with torch.no_grad():\n for it, data_idxs in enumerate(loader):\n if it >= max_it:\n break\n \n data_idxs = np.sort(data_idxs.numpy())\n \n # get batch of raw sentences and raw syntax\n sents_ = data[0][data_idxs]\n synts_ = data[1][data_idxs]\n\n batch_size = len(sents_)\n \n # initialize tensors\n sents = np.zeros((batch_size, args.max_sent_len), dtype=np.long) # words without position\n synts = np.zeros((batch_size, args.max_synt_len+2), dtype=np.long) # syntax\n targs = np.zeros((batch_size, args.max_sent_len+2), dtype=np.long) # target output\n\n for i in range(batch_size):\n \n # bpe segment and convert to tensor\n sent_ = sents_[i]\n sent_ = bpe.segment(sent_).split()\n sent_ = [dictionary.word2idx[w] if w in dictionary.word2idx else dictionary.word2idx[\"\"] for w in sent_]\n sents[i, :len(sent_)] = sent_\n \n # add and for target output\n targ_ = [dictionary.word2idx[\"\"]] + sent_ + [dictionary.word2idx[\"\"]]\n targs[i, :len(targ_)] = targ_\n \n # parse syntax and convert to tensor\n synt_ = synts_[i]\n synt_ = ParentedTree.fromstring(synt_)\n synt_ = deleaf(synt_)\n synt_ = [dictionary.word2idx[f\"<{w}>\"] for w in synt_ if f\"<{w}>\" in dictionary.word2idx]\n synt_ = [dictionary.word2idx[\"\"]] + synt_ + [dictionary.word2idx[\"\"]]\n synts[i, :len(synt_)] = synt_\n \n sents = torch.from_numpy(sents).cuda()\n synts = torch.from_numpy(synts).cuda()\n targs = torch.from_numpy(targs).cuda()\n \n # generate\n idxs = model.generate(sents, synts, sents.size(1), temp=args.temp)\n \n # write output\n for sent, idx, synt in zip(sents.cpu().numpy(), idxs.cpu().numpy(), synts.cpu().numpy()):\n fp.write(synt2str(synt[1:], dictionary)+'\\n')\n fp.write(sent2str(sent, dictionary)+'\\n')\n fp.write(synt2str(idx, dictionary)+'\\n')\n fp.write(\"--\\n\")\n\nprint(\"==== loading data ====\")\n\n# load bpe codes\nbpe_codes = codecs.open(args.bpe_codes_path, encoding='utf-8')\nbpe_vocab = codecs.open(args.bpe_vocab_path, encoding='utf-8')\nbpe_vocab = read_vocabulary(bpe_vocab, args.bpe_vocab_thresh)\nbpe = BPE(bpe_codes, '@@', bpe_vocab, None)\n\n# load dictionary and data\ndictionary = load_dictionary(args.dictionary_path)\ntrain_data = load_data(args.train_data_path)\nvalid_data = load_data(args.valid_data_path)\n\ntrain_idxs = np.arange(len(train_data[0]))\nvalid_idxs = np.arange(len(valid_data[0]))\nprint(f\"number of train examples: {len(train_data[0])}\")\nprint(f\"number of valid examples: {len(valid_data[0])}\")\n\ntrain_loader = DataLoader(train_idxs, batch_size=args.batch_size, shuffle=True)\nvalid_loader = DataLoader(valid_idxs, batch_size=args.batch_size, shuffle=False)\n\n# build model and load initialized glove embedding\nembedding = load_embedding(args.emb_path, dictionary)\nmodel = SynPG(len(dictionary), 300, word_dropout=args.word_dropout)\nmodel.load_embedding(embedding)\n\noptimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\ncriterion = nn.CrossEntropyLoss(ignore_index=dictionary.word2idx[\"\"])\n\nmodel = model.cuda()\ncriterion = criterion.cuda()\n\n# create folders\nmake_path(args.model_dir)\nmake_path(args.output_dir)\n\nprint(\"==== start training ====\")\nfor epoch in range(1, args.n_epoch+1):\n # training\n train(epoch, model, train_data, valid_data, train_loader, valid_loader, optimizer, criterion, dictionary, bpe, args)\n # save model\n torch.save(model.state_dict(), os.path.join(args.model_dir, \"synpg_epoch{:02d}.pt\".format(epoch)))\n # shuffle training data\n train_loader = DataLoader(train_idxs, batch_size=args.batch_size, shuffle=True)\n","sub_path":"train_synpg.py","file_name":"train_synpg.py","file_ext":"py","file_size_in_byte":13167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"159338729","text":"import sys\nimport csv\n\ndef dictC(nfile):\n file = open(nfile,\"r\")\n\n dict = {}\n lst = []\n\n for k,row in enumerate(file):\n if k>0:\n lst = row[:-1].split(',')\n dict[lst[0]] = lst[1:]\n #print(lst[1:])\n\n file.close\n return dict\n\n#---------------------------------------------------------------\n\ndef trovaC(dict1, dict2):\n for e2 in dict2.keys():\n #print(f\"e2: {e2}\")\n for e1 in dict1.keys():\n #print(f\"e1: {e1}\")\n if e2 == e1:\n print(dict1[e1],dict2[e2])\n\n#----------------------------------------------------------------\n\ndef main():\n anomalie = 'Anomalie_drone.csv'\n volo = 'Volo_drone.csv'\n\n dictAnomalie = dictC(anomalie)\n dictVolo = dictC(volo)\n\n #print(dictAnomalie)\n #print(dictVolo)\n\n trovaC(dictVolo, dictAnomalie)\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"Python/EsercitazioneVerifica/Es2/Es.py","file_name":"Es.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"77049546","text":"import requests, json\n\ndef _startMessage(messageName, processInstance, variables=None):\n\t#invia un messaggio\n\tpost_url = \"http://localhost:8080/engine-rest/message\"\n\t#myobj = { \"processInstanceId\": id, \"messageName\": \"Message_0fnc6rd\"}\n\tmyobj = { \"processInstanceId\": processInstance, \"messageName\": messageName}\n\tif variables != None:\n\t\tmyobj[\"processVariables\"] = variables\n\n\tprint(\"=========== DEBUG ===========\")\n\tprint(\"provo a inviare un messaggio a \", messageName, \"(instance \", processInstance, \")\")\n\n\tx = requests.post(post_url, json = myobj, headers={\"Content-Type\": \"application/json\"})\n\n\tprint(\"Esito messaggio: \", x.text)\n\t\n\treturn x.status_code\n\ndef _intermediateMessage(messageName, processInstance, variables=None):\n\tprint(\"Invio messaggio a process instance:\", processInstance)\n\t#invia un messaggio\n\tpost_url = \"http://localhost:8080/engine-rest/message\"\n\t#myobj = { \"processInstanceId\": id, \"messageName\": \"Message_0fnc6rd\"}\n\tmyobj = { \"processInstanceId\": processInstance, \"messageName\": messageName, \"all\": True}\n\tif variables != None:\n\t\tmyobj[\"processVariables\"] = variables\n\tx = requests.post(post_url, json = myobj, headers={\"Content-Type\": \"application/json\"})\n\n\tprint(x.text)\n\n\ndef sendMessage(messageName, variables=None):\n\t\"\"\"\n\tvariables = {\t\n\t\t\"aVariable\" : {\"value\" : \"aNewValue\", \"type\": \"String\"},\n\t\t\"anotherVariable\" : {\"value\" : true, \"type\": \"Boolean\"}\t\n\t}\n\t\"\"\"\n\n\t#cerca il processo che si chiama \"acmesky\"\n\n\tget_url = \"http://localhost:8080/engine-rest/deployment\"\n\n\tresponse = requests.request(\"GET\", get_url)\n\n\tresponse_j = json.loads(response.text)\n\n\tid = \"\"\n\n\tfor element in response_j:\n\t\tif element[\"name\"] == \"acmesky\":\n\t\t\tid = element[\"id\"]\n\n\tget_url = \"http://localhost:8080/engine-rest/process-instance?deploymentId={}\".format(id)\t\n\tresponse = requests.request(\"GET\", get_url)\n\tresponse_j = json.loads(response.text)\n\n\tstatus = _startMessage(messageName, id, variables)\n\tif status == 400:\n\t\tfor r in response_j:\n\t\t\t_intermediateMessage(messageName, r[\"id\"], variables)\n\n\t","sub_path":"acmesky_source_code/server_python_jolie_e_client_java/soseng-project-prontogram/python_server/simpleCamundaRESTPost.py","file_name":"simpleCamundaRESTPost.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"37205467","text":"import numpy as np\nfrom scene import Scene\nfrom mediator import Keeper\nfrom FlyingVehicles import Aircraft, Rocket\nfrom config import Config\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mpl\n\ndef main():\n Config.load()\n keepper = Keeper()\n scene = Scene(Config.SceneConfig.DeltaTime, keepper, Config.SceneConfig.BlastRadius)\n\n #Primary conditions for rocket\n rX = np.array([Config.RocketConfig.x, Config.RocketConfig.y, Config.RocketConfig.z, \n Config.RocketConfig.vx, Config.RocketConfig.vy, Config.RocketConfig.vz, Config.RocketConfig.FuelMass])\n\n #Primary conditions for attack Aircraft\n aX = np.array([Config.AtackAircraftConfig.x, Config.AtackAircraftConfig.y, Config.AtackAircraftConfig.z, \n Config.AtackAircraftConfig.vx, Config.AtackAircraftConfig.vy, Config.AtackAircraftConfig.vz, Config.AtackAircraftConfig.FuelMass])\n\n #Primary conditions for target Aircraft \n tX = np.array([Config.TargetAircraftConfig.x, Config.TargetAircraftConfig.y, Config.TargetAircraftConfig.z, \n Config.TargetAircraftConfig.vx, Config.TargetAircraftConfig.vy, Config.TargetAircraftConfig.vz, Config.TargetAircraftConfig.FuelMass])\n\n #thrustcoff, FuelMassPerSecond, MassWithoutFuel, Vector, mediator \n atackAircraft = Aircraft(Config.AtackAircraftConfig.thrustCoff, Config.AtackAircraftConfig.FuelMassPerSecond,\n Config.AtackAircraftConfig.MassWithoutFuel, aX, keepper) \n\n #:46,47s/AtackAircraft/TargetAircraft/g\n targetAircraft = Aircraft(Config.TargetAircraftConfig.thrustCoff, Config.TargetAircraftConfig.FuelMassPerSecond,\n Config.TargetAircraftConfig.MassWithoutFuel, tX, keepper)\n \n for _ in range(Config.AtackAircraftConfig.rockets):\n atackAircraft.add( Rocket(Config.RocketConfig.thrustCoff, Config.RocketConfig.FuelMassPerSecond,\n Config.RocketConfig.MassWithoutFuel, rX, keepper) )\n \n scene.add(atackAircraft) \n scene.add(targetAircraft)\n scene.add(atackAircraft.dettachRocket())\n \n while not scene.simulate():\n print(\"Target: {}\".format(keepper.X[7:10]))\n print(\"Rocket: {}\".format(keepper.X[14:17]))\n # pass\n\n print(\"The Target is striked\\n\")\n print(\"Last Vector:\")\n print(\"---------------------------------------------------------------------\")\n print(scene.res[-1])\n print(\"---------------------------------------------------------------------\")\n print(\"Target Coordinates:\")\n print(\"---------------------------------------------------------------------\")\n print(keepper.X[7:10]) \n print(\"---------------------------------------------------------------------\")\n print(\"Rocket Coordinates:\")\n print(\"---------------------------------------------------------------------\")\n print(keepper.X[14:17])\n print(\"---------------------------------------------------------------------\")\n print(\"time: {}\\n\".format(scene.T[-1]))\n\n \n T = np.linspace(0, float(scene.T[-1]), scene.count + 1)\n a = 221\n plt.subplot(a)\n plt.plot(scene.res[: , 14], scene.res[:, 15], 'r', scene.res[:, 7], scene.res[:, 8])\n plt.title('Rocket X, Target X')\n\n plt.subplot(222)\n plt.plot(scene.res[:, 14], scene.res[: , 16], 'r', scene.res[:, 7], scene.res[:, 9]) \n plt.title('Rocket Y, Target Y')\n\n plt.subplot(223)\n plt.plot(scene.res[:, 15], scene.res[:, 16], 'r', scene.res[:, 8], scene.res[:, 9])\n plt.title('Rocket Z, Target Z')\n\n mpl.rcParams['legend.fontsize'] = 10\n fig = plt.figure()\n ax = fig.gca(projection = '3d')\n ax.plot(scene.res[:, 14], scene.res[:, 15], scene.res[:, 16])#, scene.res[:, 7], scene.res[:, 8], scene.res[:, 9])\n ax.plot(scene.res[:, 7], scene.res[:, 8], scene.res[:, 9])\n ax.legend()\n\n plt.show()\n \n\nif __name__ == '__main__':\n main()","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"142831917","text":"# https://atcoder.jp/contests/abc079/tasks/abc079_c\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda :sys.stdin.readline().rstrip()\ndef resolve():\n a,b,c,d=input()\n from itertools import product\n for i,j,k in product(['+','-'],repeat=3):\n s=a+i+b+j+c+k+d\n if(eval(s)==7):\n print(s+'=7')\n return\nresolve()\n","sub_path":"ABC079/c_train_ticket.py","file_name":"c_train_ticket.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"148122035","text":"#Imports\r\nfrom stompclient import PublishClient\r\nimport time\r\n\r\n#Method to publish messages\r\ndef send(clicker, channel, choice, status, count, rangeVal):\r\n statusCode = \"\"\r\n client = PublishClient('127.0.0.1', 61613)\r\n client.connect()\r\n #Keep track of whether channel is opened or closed\r\n if status == '0':\r\n statusCode = 'Closed'\r\n else:\r\n statusCode = 'Open'\r\n \r\n try:\r\n if count == 1:\r\n if clicker == \"Professor\":\r\n #Classroom Open/Close\r\n client.send(\"/queue/\" + channel, '-- channel ' + channel + ' is ' + statusCode, extra_headers={'channel': channel, 'status':statusCode, 'count': count, 'clicker':clicker, 'range':rangeVal, 'choice':choice})\r\n else:\r\n #Register clicker\r\n client.send(\"/queue/\" + channel, '-- clicker ' + clicker + ' registered on channel ' + channel, extra_headers={'channel': channel, 'status':statusCode, 'count': count, 'clicker':clicker, 'range':rangeVal, 'choice':choice})\r\n else:\r\n #Queue response in ActiveMQ\r\n client.send(\"/queue/\" + channel, \"received a valid response from clicker \" + clicker, extra_headers={'channel': channel, 'status':statusCode, 'count': count, 'clicker':clicker, 'range':rangeVal, 'choice':choice}) \r\n #Short sleep so that all messages are sent\r\n time.sleep(1.0)\r\n finally:\r\n #Disconnect client once all messeges are queued\r\n client.disconnect()","sub_path":"classroom-clicker/clicker/classroomclicker/MessagePublisher.py","file_name":"MessagePublisher.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269581150","text":"#!/usr/bin/python\n\"\"\"\nProject: INCsim\nDeveloper: Ming Liu\nDescription:\n(1) End host application --> receive packet\n\"\"\"\nimport socket\nimport struct\nimport binascii\n\nclass DataStore(object):\n\tdef __init__(self):\n\t\tself.send_sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)\n\t\tself.send_sock.bind((\"eth0\", 0))\n\t\tself.recv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0801))\n\t\tself.dict = {}\n\n\tdef put(self, key, value):\n\t\tself.dict[key] = value\n\t\n\tdef get(self, key):\n\t\tsrc_addr = \"\\x00\\x04\\x00\\x00\\x00\\x00\"\n\t\tdst_addr = \"\\x00\\x04\\x00\\x00\\x00\\x01\"\n\t\tethertype = \"\\x08\\x01\"\n\n\t\tif self.dict.has_key(key):\n\t\t\tvalue = self.dict.get(key)\n\t\telse:\n\t\t\tvalue = \"00000000\"\n\n\t\tpayload = \"\\x02\" + binascii.unhexlify(key) + binascii.unhexlify(value)\n\t\tself.send_sock.send(dst_addr+src_addr+ethertype+payload)\n\n\tdef run(self):\n\t\twhile True:\n\t\t\tpkt = self.recv_socket.recvfrom(2048)\n\t\t\tethernetHeader = pkt[0][0:14]\n\t\t\tpayload = pkt[0][14:21]\n\t\t\teth_hdr = struct.unpack(\"!6s6s2s\",ethernetHeader)\n\t\t\tpayload_data = struct.unpack(\"!1s2s4s\", payload)\n\n\t\t\tf = open('log_receive.txt', 'a')\n\n\t\t\tif binascii.hexlify(payload_data[0]) == \"00\":\n\t\t\t\tself.put(binascii.hexlify(payload_data[1]), binascii.hexlify(payload_data[2]))\n\t\t\t\tprint >> f, \"Put a packet:\"\n\t\t\telse:\n\t\t\t\tself.get(binascii.hexlify(payload_data[1]))\n\t\t\t\tprint >> f, \"Get a packet:\"\n\n\t\t\t#print >> f, \"Header:\"\n\t\t\t#print >> f, \"\\t\" + binascii.hexlify(eth_hdr[0])\n\t\t\t#print >> f, \"\\t\" + binascii.hexlify(eth_hdr[1])\n\t\t\t#print >> f, \"\\t\" + binascii.hexlify(eth_hdr[2])\n\t\t\t#print >> f, \"Payload:\"\n\t\t\tprint >> f, \"\\tOperation code:\" + binascii.hexlify(payload_data[0])\n\t\t\tprint >> f, \"\\tKey:\" + binascii.hexlify(payload_data[1])\n\t\t\tprint >> f, \"\\tValue:\" + binascii.hexlify(payload_data[2])\n\t\t\tf.close()\n\ndef main():\n\tserver = DataStore()\n\tserver.run()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Workloads/version0/host_receive.py","file_name":"host_receive.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"276286980","text":"import requests\nimport re\ndef getHtmlText(url):\n headers={\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3861.400 QQBrowser/10.7.4313.400'\n }\n try:\n r=requests.get(uel,headers=headers)\n r.raise_for_status()\n r.encoding=r.apparent_encoding\n return r.text\n except:\n return\"异常1\"\ndef parsePage(html):\n try:\n plt=re.findall(r'(.*)',html)\n return plt\n except:\n print(\"异常2\")\ndef printGoodsList(ilt):\n tplt=\"{:8}\\t{:8}\"\n print(tplt.format(\"序号\",\"价格\"))\n count=0\n for i in ilt:\n count+=1\n print(tplt.format(count,i[0]))\ndef main():\n url='https://search.jd.com/Search?keyword=书包&qrst=1&stock=1&page=3&s=56&click=0'\n ##infoList=[]\n try:\n html=getHtmlText(url)\n plt=parsePage(html)\n except:\n print(\"异常3\")\n printGoodsList(plt)\n \nmain()\n \n \n \n","sub_path":"京东爬虫.py","file_name":"京东爬虫.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"46555475","text":"# --- System Libraries ---------------------------------------------------------\nfrom pathlib import Path\nimport datetime\nimport time\nimport json\n\n# --- Project Libraries --------------------------------------------------------\nfrom PacteClient.Admin import Admin\nfrom PacteClient.CorpusManager import CorpusManager\nfrom PacteUtil.QuickConfig import QuickConfig, UserType\n\nSMALL_CORPUS_SIZE = 2\nTESTCORPUS = \"CorpusTest-999999998\"\nTRANSCODEGROUP = \"Transcode task bucket\"\n\n\ndef createSmallCorpus(corpusManager):\n corpusId = corpusManager.getcorpus_id(TESTCORPUS)\n currentTime = datetime.datetime.now().isoformat()\n cptFail = 0\n if corpusId:\n corpusManager.deleteCorpus(corpusId)\n\n # if corpusManager.getSize(corpusId) == SMALL_CORPUS_SIZE:\n # return corpusId\n # else:\n # print(\"Deleting corrupted small sample corpus...\")\n # corpusManager.deleteCorpus(corpusId)\n\n # Create the new corpus\n corpusId = corpusManager.createCorpus(TESTCORPUS, [\"fr-fr\", \"en-en\"])\n\n if corpusId:\n transcodeGroup = corpusManager.createBucket(corpusId, TRANSCODEGROUP)\n while not transcodeGroup and cptFail < 10:\n time.sleep(5)\n transcodeGroup = corpusManager.getGroupId(TRANSCODEGROUP, corpusId)\n cptFail += 1\n\n if not transcodeGroup:\n print(\"Cannot find transcoder group id\")\n return None\n\n # Register schemas\n try:\n with Path(__file__).parent.parent.joinpath(\"PacteClient\").joinpath(\"data\").joinpath(\n CorpusManager.DOCMETA).open(\"r\") as docmeta_file:\n transcodeSchema = json.load(docmeta_file)\n except IOError as e:\n print(e)\n return None\n schemaId = corpusManager.getSchemaId(transcodeSchema[\"schemaType\"])\n if not schemaId:\n schemaId = corpusManager.registerSchema(transcodeSchema)\n corpusManager.copySchemaToGroup(schemaId, corpusId, transcodeGroup)\n\n # Documents and their metadata\n docId = corpusManager.addDocument(corpusId, \"bla bla bla\", \"testExport1\", \"yep1\", \"fr-fr\")\n time.sleep(5)\n annot = dict(document_size=11, source=\"tamere.zip\", file_edit_date=currentTime,\n detectedLanguageProb=99.99972436012376,\n file_type=\"text/plain; charset=UTF-8\",\n _documentID=docId, file_path=\"/\", indexedLanguage=\"fr-fr\", schemaType=\"DOCUMENT_META\",\n file_name=\"1.txt\", file_encoding=\"UTF-8\", _corpusID=corpusId, detectedLanguage=\"fr-fr\",\n file_size=12, file_creation_date=currentTime, file_extension=\".txt\")\n\n annotationId = corpusManager.addAnnotation(corpusId, transcodeGroup, annot)\n\n if not annotationId:\n print(\"empty annotation 1\")\n\n docId = corpusManager.addDocument(corpusId, \"bli bli bli\", \"testExport2\", \"yep2\", \"fr-fr\")\n time.sleep(5)\n annot = dict(document_size=11, source=\"tamere.zip\", file_edit_date=currentTime,\n detectedLanguageProb=99.99972436012376,\n file_type=\"text/plain; charset=UTF-8\",\n _documentID=docId, file_path=\"/\", indexedLanguage=\"fr-fr\", schemaType=\"DOCUMENT_META\",\n file_name=\"2.txt\", file_encoding=\"UTF-8\", _corpusID=corpusId, detectedLanguage=\"fr-fr\",\n file_size=12, file_creation_date=currentTime, file_extension=\".txt\")\n\n annotationId = corpusManager.addAnnotation(corpusId, transcodeGroup, annot)\n\n if not annotationId:\n print(\"empty annotation 2\")\n\n # Groups\n corpusManager.createBucket(corpusId, \"group1\")\n corpusManager.createBucket(corpusId, \"group2\")\n corpusManager.createBucket(corpusId, \"group3\");\n\n # TODO Ajouter schémas et annotations\n\n return corpusId\n\n\ndef createTestingUser():\n QuickConfig.config_file_path = \"config.properties\"\n config = QuickConfig.fromConfigfile()\n admin = Admin(config)\n user = config.getUserCredential(UserType.CustomUser)\n userId = admin.checkUser(user.username, user.password)\n\n if not userId:\n admin.createUser(user.username, user.password, \"TestUser\", \"011\")\n userId = admin.checkUser(user.username, user.password)\n\n return userId is not None\n","sub_path":"tests/TestUtil.py","file_name":"TestUtil.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"104617210","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport hashlib\n\n''' Implementation of Merkle tree '''\n\n#------------------------------------------------------------------------------\n\n__author__ = 'Ao Song'\n__email__ = 'ao.song@outlook.com'\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\nclass MerkleTree:\n def __init__(self):\n self.__root = None\n self.__nodeList = []\n self.__tokenList = []\n\n def generate_binary_mt_tree(self, nodes, isHashedNode = False):\n '''\n - generate a binary hash tree from a node list\n \n Input: \n - nodes: a list of nodes with type Node\n - isHashedNode: if the node is of type Node with hashed data\n\n Return: the merkle tree in the form of binary tree\n '''\n if isHashedNode == False:\n nodes = [Node(node) for node in nodes]\n\n roots = []\n\n l = len(nodes)\n if l == 0:\n return\n\n if l == 1:\n self.__root = nodes[0]\n calculate_tokens()\n return\n\n for i, j in zip(nodes[::2], nodes[1::2]):\n md5 = hashlib.md5()\n md5.update(i.data.encode())\n md5.update(j.data.encode())\n root = Node(md5.hexdigest())\n root.left = i\n root.right = j\n roots.append(root)\n\n if l % 2 != 0:\n roots.append(nodes[-1])\n\n generate_binary_mt_tree(roots, True)\n\n\n def calculate_tokens(self):\n '''\n calculate token of nodes compose the binary mt tree\n '''\n pass\n\n\n def add_node(self, node, isHashedNode = False):\n '''\n - use md5 method;\n - new node will only be added to the right of the tree and \n calculate with the previous root;\n\n Input:\n - node: node to be added in the tree\n - isHashedNode: if the node is of type Node with hashed data\n\n Return: the merkle tree\n '''\n if isHashedNode == False:\n md5 = hashlib.md5()\n md5.update(node.encode())\n node = Node(md5.hexdigest())\n\n if self.__root == None:\n self.__root = node\n self.__tokenList.append([None]) \n else:\n md5 = hashlib.md5() \n md5.update(self.__root.data.encode())\n md5.update(node.data.encode())\n rootData = md5.hexdigest()\n\n leftNode = self.__root\n self.__root = Node(rootData)\n self.__root.left = leftNode\n self.__root.right = node\n\n for token in self.__tokenList:\n if isinstance(token, list):\n token.append(node.data.encode())\n else:\n token = list([token, node.data.encode()])\n\n self.__tokenList.append(leftNode.data)\n\n self.__nodeList.append(node.data)\n\n def get_token(self, nodeHash):\n '''\n - get token list by hash key of node hash\n - the first token of the token list is used at the left while the other\n ones are used at the right\n - if the first token is None, all tokens are used at the right\n '''\n if nodeHash in self.__nodeList:\n i = self.__nodeList.index(nodeHash)\n return self.__tokenList[i]\n else: \n print(\"Node not in the list!\\n\")\n return []\n\n def clear(self):\n self.__root = None\n self.__nodeList = []\n self.__tokenList = []\n\n def get_root(self):\n if self.__root == None:\n return None\n\n return self.__root.data\n\n def get_node_list(self):\n return self.__nodeList\n\n def get_token_list(self):\n return self.__tokenList ","sub_path":"MerkleTree.py","file_name":"MerkleTree.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182152704","text":"'''Trains a simple convnet on the MNIST dataset.\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nfrom __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\nbatch_size = 128\nnum_classes = 10\nepochs = 12\n\n# Défini les dimensions des images\nimg_rows, img_cols = 28, 28\n\n# Sépare les data en deux jeux de données: test et train\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nprint (K.image_data_format())\n# Redimensione x_train et x_test en 4 dimensions\n# 1. on conserve la 1er dimension \n# 2. on ajoute une dimension égale à 1\n# 3 et 4 on utilise les dimensions de l'image definies plus tôt\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n# Converti toutes les données en float32\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\n# On divise par 255 (correspondant à la couleur) le poids des données dans les jeux de données x\nx_train /= 255\nx_test /= 255\n# Affiche le nombre de valeur dans le jeu de train et le jeu de test\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# Converti les valeurs des données dans y de integer vers binaire\n# Une liste contien 10 objets, les valeurs sont toutes à 0 sauf pour la position correspondant au chiffre qui sera à 1\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# Défini le model comme séquenciel\nmodel = Sequential()\n\n\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\n# Le model va mainteannt retourner une liste de shape (*, 128)\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\n# Le model va mainteannt retourner une liste de shape (*, 10) correspondant à la variable num_classes (le nombre de chiffre que nous avons de 0 à 9)\nmodel.add(Dense(num_classes, activation='softmax'))\n\n# Compilation de notre modèle\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n# Entrainement de notre modèle sur les jeux de données train et valdiation à partir des jeux de données test\n# Epochs correspond au nombre de passage que l'on souhaite faire\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\n# Calcul le pourcentage de précision de notre modèle\nprint(f\"Test loss: {score[0]*100} %\")\nprint(f\"Test accuracy: {score[1]*100} %\")","sub_path":"CNN/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"284896774","text":"__author__ = 'MatthewRowe'\nimport sys\n\n\nuserName = \"\"\n\ndef displayIntroScreen():\n print('\\n')\n print(\"//////////////////////////////////////////////\")\n print(\"//////////////////////////////////////////////\")\n print(\"//////// HANGMAN ////////\")\n print(\"//////////////////////////////////////////////\")\n print(\"//////////////////////////////////////////////\")\n print('\\n')\n\n\ndef getUserInputBeforeGame():\n readyToPlay = False\n while not readyToPlay:\n userInput = input(\"Enter 1 to play and 2 to quit.\")\n if userInput == \"1\":\n readyToPlay = True\n elif userInput == \"2\":\n print(\"\\nBye bye.\\n\")\n sys.exit()\n else:\n print(\"Incorrect input. Enter 1 to play and 2 to quit.\")\n print(\"\\nLet's play!\\n\")\n\n\ndef getUserName():\n userNameEntered = False\n userInput = input(\"What's your name?\")\n while not userNameEntered:\n userSelect = input(\"Your name is \" + userInput + \"? Enter 1 for yes, 2 to re-enter your name, and 3 to quit.\")\n if userSelect == \"1\":\n userName = userInput\n print(\"Your name is \" + userInput + \"!\")\n userNameEntered = True\n elif userSelect == \"3\":\n print(\"Goodbye!\")\n sys.exit()\n elif userSelect == \"2\":\n print(\"You must have typed in the wrong name, that's ok.\")\n userInput = input(\"What's your name?\")\n else:\n print(\"That's not a valid entry. Try again.\")\n\n\ndef displayGameBoard():\n print(\"Ok \" + userName + \" here's the gameboard.\")\n","sub_path":"src/scripts/nonOO/functionsIntro.py","file_name":"functionsIntro.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"457938609","text":"# def conbainKey(nums,lst):\n# for i in range(nums):\n\n\nnums=int(input())\ndic = {}\nfor i in range(nums):\n data = input().split(' ')\n key = int(data[0])\n value = int(data[1])\n dic[key] = dic.get(key,0) + value\nfor i in sorted(dic):\n print(i,dic.get(i)) \n\n\n\n","sub_path":"nowcoder/nowcoder2.py","file_name":"nowcoder2.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"217198019","text":"#!python3\n# -*- coding: utf-8 -*-\n# @Author : lee \n# @Date : 2020/5/5 13:44\n'项目:空行插入程序'\n'''\n程序从表格第n行开始,插入m个空行;\n'''\nimport openpyxl\nrow_num = int(input('行号:'))\nenter_num = int(input('插入空行:'))\nfilename = input('表格名称:')\nwb = openpyxl.load_workbook(filename)\nsheet = wb.active\n\nwb1 = openpyxl.Workbook()\nsheet1 = wb1.active\nsheet1.title = 'news'\n\nfor i in range(1,row_num):\n for j in range(1,sheet.max_column+1):\n sheet1.cell(row=i,column=j).value = sheet.cell(row=i,column=j).value\n\nfor x in range(row_num,sheet.max_row+1):\n for y in range(1,sheet.max_column+1):\n sheet1.cell(row=x+enter_num,column=y).value = sheet.cell(row=x,column=y).value\n\nwb1.save('new_'+filename)","sub_path":"book_learning/automation_py/ch12_Excel/project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"535146564","text":"\"\"\"\ncoupled symmetric model with from-below coupling\n --top-down is freely parameterized num-channels but from-below and top-down have same spatial extent \n --top-down and bottom-up are combined via convolution to the correct num-channel shape:\n I = ReluConv(concat(top_down, bottom_up))\n --error is compuated as:\n (future_bottom_up - current_I)**2\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport zmq\n#hrm, check out paths\nfrom curiosity.models.model_building_blocks import ConvNetwithBypasses\n\nfrom curiosity.utils.io import recv_array\n\n\n\ndef getEncodeDepth(rng, cfg, slippage=0):\n val = None\n if 'encode_depth' in cfg:\n val = cfg['encode_depth']\n elif 'encode' in cfg:\n val = max(cfg['encode'].keys())\n if val is not None and rng.uniform() > slippage:\n return val\n d = rng.choice([1, 2, 3, 4, 5])\n return d\n\ndef getEncodeConvFilterSize(i, encode_depth, rng, cfg, prev=None, slippage=0):\n val = None\n if 'encode' in cfg and (i in cfg['encode']):\n if 'conv' in cfg['encode'][i]:\n if 'filter_size' in cfg['encode'][i]['conv']:\n val = cfg['encode'][i]['conv']['filter_size'] \n if val is not None and rng.uniform() > slippage:\n return val\n L = [1, 3, 5, 7, 9, 11, 13, 15, 23]\n if prev is not None:\n L = [_l for _l in L if _l <= prev]\n return rng.choice(L)\n\ndef getEncodeConvNumFilters(i, encode_depth, rng, cfg, slippage=0):\n val = None\n if 'encode' in cfg and (i in cfg['encode']):\n if 'conv' in cfg['encode'][i]:\n if 'num_filters' in cfg['encode'][i]['conv']:\n val = cfg['encode'][i]['conv']['num_filters']\n if val is not None and rng.uniform() > slippage:\n return val\n L = [3, 48, 96, 128, 256, 128]\n return L[i]\n \ndef getEncodeConvStride(i, encode_depth, rng, cfg, slippage=0):\n val = None\n if 'encode' in cfg and (i in cfg['encode']):\n if 'conv' in cfg['encode'][i]:\n if 'stride' in cfg['encode'][i]['conv']:\n val = cfg['encode'][i]['conv']['stride']\n if val is not None and rng.uniform() > slippage:\n return val\n if encode_depth > 1:\n return 2 if i == 1 else 1\n else:\n return 3 if i == 1 else 1\n\ndef getEncodeDoPool(i, encode_depth, rng, cfg, slippage=0):\n val = None\n if 'encode' in cfg and (i in cfg['encode']):\n if 'do_pool' in cfg['encode'][i]:\n val = cfg['encode'][i]['do_pool']\n elif 'pool' in cfg['encode'][i]:\n val = True\n if val is not None and rng.uniform() > slippage:\n return val\n if i < 3 or i == encode_depth:\n return rng.uniform() < .75\n else:\n return rng.uniform() < .25\n \ndef getEncodePoolFilterSize(i, encode_depth, rng, cfg, slippage=0):\n val = None\n if 'encode' in cfg and (i in cfg['encode']):\n if 'pool' in cfg['encode'][i]:\n if 'filter_size' in cfg['encode'][i]['pool']:\n val = cfg['encode'][i]['pool']['filter_size']\n if val is not None and rng.uniform() > slippage:\n return val\n return rng.choice([2, 3, 4, 5])\n\ndef getEncodePoolStride(i, encode_depth, rng, cfg, slippage=0): \n val = None\n if 'encode' in cfg and (i in cfg['encode']):\n if 'pool' in cfg['encode'][i]:\n if 'stride' in cfg['encode'][i]['pool']:\n val = cfg['encode'][i]['pool']['stride']\n if val is not None and rng.uniform() > slippage:\n return val\n return 2\n\ndef getEncodePoolType(i, encode_depth, rng, cfg, slippage=0):\n val = None\n if 'encode' in cfg and (i in cfg['encode']):\n if 'pool' in cfg['encode'][i]:\n if 'type' in cfg['encode'][i]['pool']:\n val = cfg['encode'][i]['pool']['type']\n if val is not None and rng.uniform() > slippage:\n return val\n return rng.choice(['max', 'avg'])\n\ndef getHiddenDepth(rng, cfg, slippage=0):\n val = None\n if (not rng.uniform() < slippage) and 'hidden_depth' in cfg:\n val = cfg['hidden_depth']\n elif 'hidden' in cfg:\n val = max(cfg['hidden'].keys())\n if val is not None and rng.uniform() > slippage:\n return val\n d = rng.choice([1, 2, 3])\n return d\n\ndef getHiddenNumFeatures(i, hidden_depth, rng, cfg, slippage=0):\n val = None\n if 'hidden' in cfg and (i in cfg['hidden']):\n if 'num_features' in cfg['hidden'][i]:\n val = cfg['hidden'][i]['num_features']\n if val is not None and rng.uniform() > slippage:\n return val\n return 1024\n\ndef getDecodeDepth(rng, cfg, slippage=0):\n val = None\n if 'decode_depth' in cfg:\n val = cfg['decode_depth']\n elif 'decode' in cfg:\n val = max(cfg['decode'].keys())\n if val is not None and rng.uniform() > slippage:\n return val\n d = rng.choice([1, 2, 3])\n return d\n\ndef getDecodeNumFilters(i, decode_depth, rng, cfg, slippage=0):\n val = None\n if 'decode' in cfg and (i in cfg['decode']):\n if 'num_filters' in cfg['decode'][i]:\n val = cfg['decode'][i]['num_filters']\n if val is not None and rng.uniform() > slippage:\n return val\n return 32\n\ndef getDecodeFilterSize(i, decode_depth, rng, cfg, slippage=0):\n val = None\n if 'decode' in cfg and (i in cfg['decode']):\n if 'filter_size' in cfg['decode'][i]:\n val = cfg['decode'][i]['filter_size']\n if val is not None and rng.uniform() > slippage:\n return val\n return rng.choice([1, 3, 5, 7, 9, 11])\n \ndef getDecodeFilterSize2(i, decode_depth, rng, cfg, slippage=0):\n val = None\n if 'decode' in cfg and (i in cfg['decode']):\n if 'filter_size2' in cfg['decode'][i]:\n val = cfg['decode'][i]['filter_size2']\n if val is not None and rng.uniform() > slippage:\n return val\n return rng.choice([1, 3, 5, 7, 9, 11])\n\ndef getDecodeSize(i, decode_depth, init, final, rng, cfg, slippage=0):\n val = None\n if 'decode' in cfg and (i in cfg['decode']):\n if 'size' in cfg['decode'][i]:\n val = cfg['decode'][i]['size']\n if val is not None and rng.uniform() > slippage:\n return val\n s = np.log2(init)\n e = np.log2(final)\n increment = (e - s) / decode_depth\n l = np.around(np.power(2, np.arange(s, e, increment)))\n if len(l) < decode_depth + 1:\n l = np.concatenate([l, [final]])\n l = l.astype(np.int)\n return l[i]\n\ndef getDecodeBypass(i, encode_nodes, decode_size, decode_depth, rng, cfg, slippage=0):\n val = None\n if 'decode' in cfg and (i in cfg['decode']):\n if 'bypass' in cfg['decode'][i]:\n val = cfg['decode'][i]['bypass']\n #prevent error that can occur here if encode is not large enough due to slippage modification?\n if val is not None and rng.uniform() > slippage:\n return val \n switch = rng.uniform() \n print('sw', switch)\n if switch < 0.5:\n sdiffs = [e.get_shape().as_list()[1] - decode_size for e in encode_nodes]\n return np.abs(sdiffs).argmin()\n \ndef getFilterSeed(rng, cfg):\n if 'filter_seed' in cfg:\n return cfg['filter_seed']\n else: \n return rng.randint(10000)\n\n\ndef model_tfutils_fpd_compatible(inputs, **kwargs):\n batch_size = inputs['images'].get_shape().as_list()[0]\n new_inputs = {'current' : inputs['images'], 'actions' : inputs['parsed_actions'], 'future' : inputs['future_images'], 'time' : tf.ones([batch_size, 1])}\n return model_tfutils(new_inputs, **kwargs)\n\n\ndef model_tfutils(inputs, rng, cfg = {}, train = True, slippage = 0, diff_mode = False, num_classes = 1, **kwargs):\n '''Model definition, compatible with tfutils.\n\n inputs should have 'current', 'future', 'action', 'time' keys. Outputs is a dict with keys, pred and future, within those, dicts with keys predi and futurei for i in 0:encode_depth, to be matched up in loss.\n num_classes = 1 is equivalent to the original l2 loss model.\n '''\n current_node = inputs['current']\n future_node = inputs['future']\n actions_node = inputs['actions']\n time_node = inputs['time']\n\n current_node = tf.divide(tf.cast(current_node, tf.float32), 255)\n future_node = tf.divide(tf.cast(future_node, tf.float32), 255)\n actions_node = tf.cast(actions_node, tf.float32)\n print('Actions shape')\n print(actions_node.get_shape().as_list())\n\n\n print('Diff mode: ' + str(diff_mode))\n\n#I think this should be taken away from cfg\n # fseed = getFilterSeed(rng, cfg)\n\n if rng is None:\n rng = np.random.RandomState(seed=kwargs['seed'])\n\n m = ConvNetwithBypasses(**kwargs)\n\n #encoding\n encode_depth = getEncodeDepth(rng, cfg, slippage=slippage)\n print('Encode depth: %d' % encode_depth)\n cfs0 = None\n\n encode_nodes_current = [current_node]\n encode_nodes_future = [future_node]\n for i in range(1, encode_depth + 1):\n #not sure this usage ConvNet class creates exactly the params that we want to have, specifically in the 'input' field, but should give us an accurate record of this network's configuration\n with tf.variable_scope('encode' + str(i)):\n\n with tf.contrib.framework.arg_scope([m.conv], init='trunc_norm', stddev=.01, bias=0, activation='relu'):\n\n cfs = getEncodeConvFilterSize(i, encode_depth, rng, cfg, prev=cfs0, slippage=slippage)\n cfs0 = cfs\n nf = getEncodeConvNumFilters(i, encode_depth, rng, cfg, slippage=slippage)\n cs = getEncodeConvStride(i, encode_depth, rng, cfg, slippage=slippage)\n\n new_encode_node_current = m.conv(nf, cfs, cs, in_layer = encode_nodes_current[i - 1])\n print('Current encode node shape: ' + str(new_encode_node_current.get_shape().as_list()))\n with tf.variable_scope('encode' + str(i), reuse = True):\n new_encode_node_future = m.conv(nf, cfs, cs, in_layer = encode_nodes_future[i - 1], \\\n init='trunc_norm', stddev=.01, bias=0, activation='relu')\n #TODO add print function\n print('Future encode node shape: ' + str(new_encode_node_current.get_shape().as_list()))\n do_pool = getEncodeDoPool(i, encode_depth, rng, cfg, slippage=slippage)\n if do_pool:\n pfs = getEncodePoolFilterSize(i, encode_depth, rng, cfg, slippage=slippage)\n ps = getEncodePoolStride(i, encode_depth, rng, cfg, slippage=slippage)\n pool_type = getEncodePoolType(i, encode_depth, rng, cfg, slippage=slippage)\n print('Pool size %d, stride %d' % (pfs, ps))\n print('Type: ' + pool_type)\n #just correcting potential discrepancy in descriptor\n if pool_type == 'max':\n pool_type = 'maxpool'\n new_encode_node_current = m.pool(pfs, ps, in_layer = new_encode_node_current, pfunc = pool_type)\n new_encode_node_future = m.pool(pfs, ps, in_layer = new_encode_node_future, pfunc = pool_type)\n print('Current encode node shape: ' + str(new_encode_node_current.get_shape().as_list()))\n print('Future encode node shape: ' + str(new_encode_node_future.get_shape().as_list())) \n encode_nodes_current.append(new_encode_node_current)\n encode_nodes_future.append(new_encode_node_future)\n\n with tf.variable_scope('addactiontime'):\n encode_node = encode_nodes_current[-1]\n enc_shape = encode_node.get_shape().as_list()\n encode_flat = m.reshape([np.prod(enc_shape[1:])], in_layer = encode_node)\n print('Flatten to shape %s' % encode_flat.get_shape().as_list())\n if time_node is not None:\n encode_flat = m.add_bypass([actions_node, time_node])\n else:\n encode_flat = m.add_bypass(actions_node)\n\n nf0 = encode_flat.get_shape().as_list()[1]\n hidden_depth = getHiddenDepth(rng, cfg, slippage=slippage)\n print('Hidden depth: %d' % hidden_depth)\n hidden = encode_flat\n for i in range(1, hidden_depth + 1):\n with tf.variable_scope('hidden' + str(i)):\n nf = getHiddenNumFeatures(i, hidden_depth, rng, cfg, slippage=slippage)\n #TODO: this can be made nicer once we add more general concat\n hidden = m.fc(nf, init = 'trunc_norm', activation = 'relu', bias = .01, in_layer = hidden, dropout = None)\n print('Hidden shape %s' % hidden.get_shape().as_list())\n nf0 = nf\n\n\n #decode\n ds = encode_nodes_future[encode_depth].get_shape().as_list()[1]\n nf1 = getDecodeNumFilters(0, encode_depth, rng, cfg, slippage=slippage)\n if ds * ds * nf1 != nf0:\n with tf.variable_scope('extra_hidden'):\n hidden = m.fc(ds * ds * nf1, init = 'trunc_norm', activation = None, bias = .01, dropout = None)\n print(\"Linear from %d to %d for input size %d\" % (nf0, ds * ds * nf1, ds))\n decode = m.reshape([ds, ds, nf1])\n print(\"Unflattening to\", decode.get_shape().as_list())\n\n\n\n preds = {}\n for i in range(0, encode_depth + 1):\n with tf.variable_scope('pred' + str(encode_depth - i)):\n pred = m.add_bypass(encode_nodes_current[encode_depth - i])\n print('Shape after bypass %s' % pred.get_shape().as_list())\n nf = encode_nodes_future[encode_depth - i].get_shape().as_list()[-1]\n cfs = getDecodeFilterSize2(i, encode_depth, rng, cfg, slippage = slippage)\n print('Pred conv filter size %d' % cfs)\n if i == encode_depth:\n pred = m.conv(nf * num_classes, cfs, 1, init='trunc_norm', stddev=.1, bias=0, activation=None)\n #making this another dimension, while I *think* conv_2d would not handle this\n if num_classes > 1:\n my_shape = pred.get_shape().as_list()\n my_shape[3] = nf\n my_shape.append(num_classes)\n pred = m.reshape(my_shape[1:])\n else:\n if diff_mode:\n pred = m.conv(nf, cfs, 1, init='trunc_norm', stddev=.1, bias=0, activation=None)\n else:\n pred = m.conv(nf, cfs, 1, init='trunc_norm', stddev=.1, bias=0, activation='relu')\n preds['pred' + str(encode_depth - i)] = pred\n if i != encode_depth:\n with tf.variable_scope('decode' + str(i+1)):\n ds = encode_nodes_future[encode_depth - i - 1].get_shape().as_list()[1]\n decode = m.resize_images(ds, in_layer = decode)\n print('Decode resize %d to shape' % (i + 1), decode.get_shape().as_list())\n cfs = getDecodeFilterSize(i + 1, encode_depth, rng, cfg, slippage=slippage)\n nf1 = getDecodeNumFilters(i + 1, encode_depth, rng, cfg, slippage=slippage)\n decode = m.conv(nf1, cfs, 1, init='trunc_norm', stddev=.1, bias=0, activation='relu')\n print('Decode conv to shape %s' % decode.get_shape().as_list())\n\n enc_string = None\n enc_dict = None\n if diff_mode:\n diffs = [encoded_future - encoded_current for (encoded_current, encoded_future) in zip(encode_nodes_current, encode_nodes_future)]\n encode_nodes_diff_dict = dict(('diff' + str(i), diff) for (i, diff) in enumerate(diffs))\n enc_string = 'diff'\n enc_dict = encode_nodes_diff_dict\n else:\n encode_nodes_future_dict = dict(('future' + str(i), encoded_future) for (i, encoded_future) in enumerate(encode_nodes_future))\n enc_string = 'future'\n enc_dict = encode_nodes_future_dict\n outputs = {'pred' : preds, enc_string: enc_dict}\n\n\n return outputs, m.params\n\n\n\n\n\n\n\ndef diff_loss_per_case_fn(labels, logits, **kwargs):\n '''This allows us to do the diff one while reusing the above code.\n\n Maybe merge with below.'''\n #Changed names of inputs to make compatible with tfutils, but this isn't so natural...\n outputs = logits\n inputs = labels\n encode_depth = len(outputs['pred']) - 1\n batch_size = outputs['pred']['pred0'].get_shape().as_list()[0]\n #this just to avoid declaring another placeholder\n tv = outputs['diff']['diff' + str(0)]\n pred = outputs['pred']['pred' + str(0)]\n my_shape = tv.get_shape().as_list()\n norm = (my_shape[1]**2) * my_shape[0] * my_shape[-1]\n loss = tf.nn.l2_loss(pred - tv) / norm\n for i in range(1, encode_depth + 1):\n tv = outputs['diff']['diff' + str(i)]\n pred = outputs['pred']['pred' + str(i)]\n my_shape = tv.get_shape().as_list()\n norm = (my_shape[1]**2) * my_shape[0] * my_shape[-1]\n loss = loss + tf.nn.l2_loss(pred - tv) / norm\n return loss\n\n\ndef loss_per_case_fn(labels, logits, **kwargs):\n #Changed names of inputs to make compatible with tfutils, but this isn't so natural...\n outputs = logits\n inputs = labels\n encode_depth = len(outputs['pred']) - 1\n batch_size = outputs['pred']['pred0'].get_shape().as_list()[0]\n #this just to avoid declaring another tensor\n tv = outputs['future']['future' + str(0)]\n pred = outputs['pred']['pred' + str(0)]\n my_shape = tv.get_shape().as_list()\n norm = (my_shape[1]**2) * my_shape[0] * my_shape[-1]\n loss = tf.nn.l2_loss(pred - tv) / norm\n for i in range(1, encode_depth + 1):\n tv = outputs['future']['future' + str(i)]\n pred = outputs['pred']['pred' + str(i)]\n my_shape = tv.get_shape().as_list()\n norm = (my_shape[1]**2) * my_shape[0] * my_shape[-1]\n loss = loss + tf.nn.l2_loss(pred - tv) / norm\n return loss\n\ndef discretized_loss_fn(labels, logits, num_classes, sigmoid_hiddens = False, **kwargs):\n outputs = logits\n inputs = labels\n encode_depth = len(outputs['pred']) - 1\n tv = outputs['diff']['diff0']\n #get the range to be [0, num_classes-1], then floor it\n tv = tf.cast((num_classes - 1) * (tv + 1) / 2, tf.uint8)\n tv = tf.one_hot(tv, depth = num_classes)\n pred = outputs['pred']['pred0']\n #Not sure whether we should normalize this at all, but I think it's pretty ok as is\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, tv))\n for i in range(1, encode_depth + 1):\n tv = outputs['diff']['diff' + str(i)]\n pred = outputs['pred']['pred' + str(i)]\n if sigmoid_hiddens:\n pred = 2 * tf.nn.sigmoid(pred) - 1\n my_shape = tv.get_shape().as_list()\n norm = (my_shape[1]**2) * my_shape[0] * my_shape[-1]\n loss = loss + tf.nn.l2_loss(pred - tv) / norm\n return loss\n\ndef something_or_nothing_loss_fn(labels, logits, sigmoid_hiddens = False, **kwargs):\n outputs = logits\n inputs = labels\n encode_depth = len(outputs['pred']) - 1\n #we set num_classes = 1 for this, keeping parameters down...this is probably not that important\n tv = outputs['diff']['diff0']\n tv = tf.cast(tf.ceil(tf.abs(tv)), 'uint8')\n tv = tf.one_hot(tv, depth = 2)\n pred = outputs['pred']['pred0']\n my_shape = pred.get_shape().as_list()\n my_shape.append(1)\n pred = tf.reshape(pred, my_shape)\n pred = tf.concat(4, [tf.zeros(my_shape), pred])\n print('before loss shapes')\n print(pred.get_shape().as_list())\n print(tv.get_shape().as_list())\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, tv))\n for i in range(1, encode_depth + 1):\n tv = outputs['diff']['diff' + str(i)]\n pred = outputs['pred']['pred' + str(i)]\n if sigmoid_hiddens:\n pred = 2 * tf.nn.sigmoid(pred) - 1\n my_shape = tv.get_shape().as_list()\n norm = (my_shape[1]**2) * my_shape[0] * my_shape[-1]\n loss = loss + tf.nn.l2_loss(pred - tv) / norm\n return loss\n\ndef loss_agg_for_validation(labels, logits, **kwargs):\n #kind of a hack, just getting a validation score like our loss for this test\n return {'minibatch_loss' : tf.reduce_mean(loss_per_case_fn(labels, logits, **kwargs))}\n\n\n\n","sub_path":"curiosity/models/future_pred_symmetric_coupled_with_below.py","file_name":"future_pred_symmetric_coupled_with_below.py","file_ext":"py","file_size_in_byte":18533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"491007350","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 7 12:11:55 2020\r\n\r\n\r\nCompute area weighted mean for areas in spatial join\r\n\r\n\r\n\r\n@author: aripekkj\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport geopandas as gpd\r\n\r\n\r\n# filepath\r\nfp = r'E:\\LocalData\\aripekkj\\ProtectedAreas\\Madagascar\\INSTAT\\mdg_adm1_region_df.shp'\r\nfp2 = r'E:\\LocalData\\aripekkj\\ProtectedAreas\\Madagascar\\WDPA\\WDPA_June2020_Mada_final_PAs2.shp'\r\nfp3 = r'E:\\LocalData\\aripekkj\\ProtectedAreas\\Madagascar\\sjoin\\WDPA_fire_w_region_df_pop_acc_means_finalPAs.shp'\r\nout_fp = r'E:\\LocalData\\aripekkj\\ProtectedAreas\\Madagascar\\INSTAT\\mdg_adm1_region_df_w_spatial_join.shp' \r\n\r\n# read file\r\nregion = gpd.read_file(fp)\r\npa = gpd.read_file(fp2)\r\njoin_pa = gpd.read_file(fp3)\r\n\r\n# overlay region by wdpa. result has attributes from both layers and areas where single PA polygon intersects over many regions are individual polygons\r\nregion_ol = gpd.overlay(region, pa) \r\n\r\n# function for reprojecting\r\ndef reProject(shapefile, epsg_code):\r\n region_copy = shapefile.copy()\r\n region_reproj = region_copy.to_crs(epsg=epsg_code)\r\n return region_reproj\r\n\r\n# copy and transform GeoDataFrame to cartesian system to get area calculations in meters\r\nregion_utm = reProject(region_ol, 32738)\r\npa_utm = reProject(pa, 32738)\r\n\r\n# compute area in square kilometers\r\nregion_utm['area_km2'] = region_utm['geometry'].area / 10**6 \r\npa_utm['area_km2'] = pa_utm['geometry'].area / 10**6\r\n\r\n# empty column for area proportion. This will be used as weight\r\nregion_utm['area_prop'] = \"\"\r\n\r\n# loop through wdpa_pid in region_utm DataFrame and select corresponding PA from WDPA layer. Then compute the percentage how much the region covers of the whole PA\r\nfor idx, row in region_utm.iterrows():\r\n # select row from WDPA\r\n sel = pa_utm[pa_utm['WDPA_PID'] == row['WDPA_PID']]\r\n region_area_prop = row['area_km2'] / sel['area_km2']\r\n region_utm['area_prop'].iloc[idx] = region_area_prop.iloc[0]\r\n \r\nregion_utm['area_prop']\r\n\r\n# compute weighted mean for INSTAT columns\r\nregion_utm['Econ_det_w'] = region_utm['Econ_deter'] * region_utm['area_prop']\r\nregion_utm['Nr_of_HH_w'] = region_utm['Nr_of_HH'] * region_utm['area_prop']\r\nregion_utm['PoorFood_w'] = region_utm['Poor_food_'] * region_utm['area_prop']\r\nregion_utm['noFoodAl_w'] = region_utm['no_food_al'] * region_utm['area_prop']\r\nregion_utm['FoodSecA_w'] = region_utm['food_sec_a'] * region_utm['area_prop']\r\n\r\n\r\n# function to count sum and group them by WDPA_PID\r\ndef groupBy(df, column_name):\r\n \r\n # group by WDPA_PID\r\n df_group = df.groupby('WDPA_PID')[column_name].sum()\r\n \r\n # convert Series to DataFrame\r\n df_out = pd.Series.to_frame(df_group)\r\n # reset index\r\n df_out = df_out.reset_index()\r\n \r\n return df_out;\r\n\r\n# group by\r\necon_det_w_sum = groupBy(region_utm, 'Econ_det_w')\r\nnrHH_w = groupBy(region_utm, 'Nr_of_HH_w')\r\npoorfood_w = groupBy(region_utm, 'PoorFood_w')\r\nnoFood_w = groupBy(region_utm, 'noFoodAl_w')\r\nFoodSecA_W = groupBy(region_utm, 'FoodSecA_w')\r\n\r\n# join list\r\njoin_list = [econ_det_w_sum, nrHH_w, poorfood_w, noFood_w, FoodSecA_W]\r\n\r\n# join grouped dataframes by wdpa id \r\nfor i in join_list:\r\n \r\n join_pa = pd.merge(join_pa, i, how='outer', on='WDPA_PID')\r\n\r\n# save to shapefile\r\njoin_pa.to_file(out_fp, driver='ESRI Shapefile')\r\n\r\n\r\n\r\n# to find certain protected area for double checking results\r\ntest = region_utm[region_utm['WDPA_PID'] == '555548845']\r\nprint(test[['Econ_deter', 'area_km2', 'area_prop', 'Econ_det_w']])\r\nprint(sum(test['Econ_deter']) / len(test['Econ_deter']))\r\nprint(sum(test['Econ_det_w']))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Python/weightedAreaSpatialJoin.py","file_name":"weightedAreaSpatialJoin.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"498285405","text":"import os, sys, time\nimport numpy as np\nfrom astropy.io import fits\nfrom vis_sample import vis_sample\nfrom vis_sample.file_handling import import_data_uvfits\nimport matplotlib.pyplot as plt\n\n# parse and package the UNWINDOWED DATA\ndata_set = 'simp3_std_medv_medr_STARTHIV_noiseless'\ndata_file = 'fake_data/sim_uvfits/'+data_set+'.uvfits'\nraw_vis = import_data_uvfits(data_file)\n\n# extract the proper velocities from the data file\ndat = fits.open(data_file)\nhdr = dat[0].header\nfreq0 = hdr['CRVAL4']\nindx0 = hdr['CRPIX4']\nnchan = hdr['NAXIS4']\ndfreq = hdr['CDELT4']\nfreqs = freq0 + (np.arange(nchan) - indx0 + 1) * dfreq\nraw_vis.freqs = freqs\nprint(raw_vis.VV.shape, raw_vis.freqs.shape)\n\n\n# parse and package the CASA-WINDOWED DATA\ndata_set = 'simp3_std_medv_medr_STARTHIV_noiseless.hann'\ndata_file = 'fake_data/sim_uvfits/'+data_set+'.uvfits'\ncasa_vis = import_data_uvfits(data_file)\n\n# extract the proper velocities from the data file\ndat = fits.open(data_file)\nhdr = dat[0].header\nfreq0 = hdr['CRVAL4']\nindx0 = hdr['CRPIX4']\nnchan = hdr['NAXIS4']\ndfreq = hdr['CDELT4']\nfreqs = freq0 + (np.arange(nchan) - indx0 + 1) * dfreq\ncasa_vis.freqs = freqs\nprint(casa_vis.VV.shape, casa_vis.freqs.shape)\n\n\n# plot the unwindowed versus the casa-windowed data\nidx = 1500\nplt.plot(raw_vis.freqs / 1e9, raw_vis.VV.real[:,idx], 'oC0')\nplt.plot(casa_vis.freqs / 1e9, casa_vis.VV.real[:,idx], 'oC1', alpha=0.8, \n markersize=2.5)\n\n#for i in range(len(casa_vis.freqs)):\n# print(casa_vis.VV[i,idx], raw_vis.VV[i,idx])\n\n\nfrom scipy.signal import convolve\n\nwindow = np.array([0.0, 0.25, 0.5, 0.25, 0.0])\nmyhann = convolve(raw_vis.VV[:,idx], window, mode='same')\n\nplt.plot(raw_vis.freqs / 1e9, myhann.real, 'oC2', alpha=0.6, markersize=2.3)\n\nplt.show()\nplt.clf()\n\n\ndiff_hanns = (myhann.real - casa_vis.VV.real[:,idx]) / myhann.real\ndiff_raw = (myhann.real - raw_vis.VV.real[:,idx]) / myhann.real\n\nplt.plot(raw_vis.freqs[10:-10] / 1e9, diff_hanns[10:-10], 'oC0')\nplt.show()\n\n\n\n# how to do this quickly on all visibilities?\nfrom scipy.ndimage import convolve1d\n\nt0 = time.time()\ntesthann = convolve1d(raw_vis.VV.real, window, axis=0, mode='nearest')\nprint(time.time()-t0)\n\nprint(testhann.shape)\n\ndiff_21 = (myhann.real - testhann[:,idx]) / myhann.real\nplt.plot(raw_vis.freqs[10:-10] / 1e9, diff_21[10:-10], 'oC0')\nplt.show()\n","sub_path":"fit_Mdyn/vet_windowing.py","file_name":"vet_windowing.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"567117388","text":"import os\nimport sys\n\nfrom flask import current_app\nfrom flask_pymongo import MongoClient\nfrom eve.tests import TestMinimal\nfrom run import create_app\n\nsys.path.insert(0, os.path.abspath('../db'))\n\nfrom db.test import MONGO_HOST, MONGO_PORT, MONGO_DBNAME\n\nSETTINGS_PATH = os.path.join(os.path.dirname('..'), 'db', 'test.py')\n\nclass ServiceTestBase(TestMinimal):\n\n def setUp(self, settings_file=None, url_converters=None):\n\n self.this_directory = os.path.dirname(os.path.realpath(__file__))\n\n if settings_file is None:\n settings_file = SETTINGS_PATH\n\n self.connection = None\n self.setupDB()\n\n self.settings_file = settings_file\n self.app = create_app(SETTINGS_PATH)\n self.app.testing = True\n\n self.test_client = self.app.test_client()\n\n self.domain = self.app.config['DOMAIN']\n\n def setupDB(self):\n self.connection = MongoClient(MONGO_HOST, MONGO_PORT)\n\n def tearDown(self):\n with self.app.app_context():\n current_app.data.driver.db.materials.remove({})\n current_app.data.driver.db.containers.remove({})\n del self.app\n\ndef valid_material_params():\n return {\n \"material_type\": \"blood\",\n \"supplier_name\": \"my supplier name 1\",\n \"donor_id\": \"my donor id 1\",\n \"gender\": \"female\",\n \"scientific_name\": \"Homo sapiens\",\n \"phenotype\": \"eye colour\"\n }","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"301212547","text":"import numpy as np\nimport pandas as pd\n\nclass Perceptron(object):\n def __init__(self, eta = 0.01, n_iter = 100, random_state = 1):\n self.eta = eta\n self.n_iter = n_iter\n self.random_state = random_state\n\n def fit(self, X, y):\n print(\"training begin !!\")\n print(\"start to initialize the weight ...\")\n rgen = np.random.RandomState(self.random_state)\n self.weight = rgen.normal(loc = 0.0, scale = 0.01, size = X.shape[1] + 1)\n self.error = []\n print(\"weight init finish, begin to train ...\")\n for _ in range(self.n_iter):\n errors = 0\n for xi, target in zip(X, y):\n update = target - self.predict(xi)\n self.weight[1:] = self.eta * update * xi\n self.weight[0] = self.eta * update\n errors += bool(update)\n print(\"round %d: %d errors\", _ , errors)\n self.error.append(errors)\n print(\"training finished !!\")\n return self\n\n def predict(self, xi):\n dot_product = np.dot(xi, self.weight[1:]) + xi[0]\n if dot_product >= 0:\n return 1\n else:\n return -1\n\n\nif __name__ == \"__main__\":\n print(\"start to read data from web ...\")\n df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header = None)\n df.head()\n print(\"data reading finished ...\")\n y = df.iloc[0:100, 4].values\n y = np.where(y == 'Iris-setosa', -1, 1)\n X = df.iloc[0:100, [0, 2]].values\n print(\"data unpack finished\")\n net = Perceptron()\n net.fit(X, y)","sub_path":"comp 3314/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"495233911","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\na = 0.247\nZ = 14\nA = 28\n\nER_keV = np.logspace(-0.5, 2, 500);\neps = 11.5*ER_keV*Z**(-7./3.)\ng = 3.*eps**0.15 + 0.7*eps**0.6 + eps\nkappa = 0.133*Z**(2./3.)/np.sqrt(A)\nQF = kappa * g / (1.+kappa*g)\n\nyc = 1./(1./(a*ER_keV) + 1./QF)\n\nplt.plot(ER_keV, yc*ER_keV, linewidth=2)\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.show()\n","sub_path":"util/plotIonizationModel.py","file_name":"plotIonizationModel.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"304273313","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.views.generic.simple import direct_to_template\nfrom .sitemaps import StaticSitemap\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nsitemaps = dict(\n static = StaticSitemap,\n)\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'cowcloud.views.home', name='home'),\n # url(r'^cowcloud/', include('cowcloud.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n (r'^forum/', include('pybb.urls', namespace='pybb')),\n #(r'^files/', include('files.urls')),\n url(r'^upload/', include('fileupload.urls')),\n (r'^pages/', include('django.contrib.flatpages.urls')),\n (r'^$', include('files.urls')),\n (r'^accounts/', include('registration.backends.default.urls')),\n url(r'^register/$', RegistrationView.as_view(form_class=RegistrationFormUniqueEmail), name='registration_register'),\n (r'^accounts/$', 'django.views.generic.simple.direct_to_template', {'template': 'accounts.html'}, 'accounts'),\n\n (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/media/img/favicon.ico'}),\n (r'^$', 'django.views.generic.simple.direct_to_template', {'template': 'home.html'}, 'index'),\n (r'^plans/$', 'django.views.generic.simple.direct_to_template', {'template': 'plans.html'}, 'plans'),\n (r'^terms/$', 'django.views.generic.simple.direct_to_template', {'template': 'terms.html'}, 'terms'),\n (r'^faq/$', 'django.views.generic.simple.direct_to_template', {'template': 'faq.html'}, 'faq'),\n #(r'^contact/$', 'django.views.generic.simple.direct_to_template', {'template':'contact.html'}, 'contact' ),\n #url(r'^$', TemplateView.as_view(template_name='home.html')),\n\n #(r'^moneybookers/status_url/', include('moneybookers.urls')),\n #(r'^moneybookers/cancel/', 'MoneybookersCancel'),\n #(r'^moneybookers/ok/', 'MoneybookersOk'),\n #(r'^order/$', 'View_With_Order'),\n\n url(r\"^r/\", include(\"anafero.urls\")),\n (r'^contact/', include('contact_form.urls')),\n #url(r'', include('webmaster_verification.urls')),\n\t(r'^sitemap\\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),\n\t(r'^robots\\.txt$', direct_to_template, {'template': 'robots.txt', 'mimetype': 'text/plain'}),\n)\n\nif settings.USE_SAML2:\n urlpatterns += patterns('',\n (r'^saml2/', include('djangosaml2.urls')),\n (r'^idp/', include('saml2idp.urls')),\n (r'^sp/', include('saml2sp.urls')),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^storage/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STORAGE_ROOT}),\n\t (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n\t (r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT, 'show_indexes': True}),\n)\n","sub_path":"cowcloud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"517852955","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport asyncio\nimport threading\n\n\n# asyncio: syntax for 3.5\nasync def hello(name):\n print('hello, %s. %s' % (name, threading.current_thread().name))\n # call another coroutine\n await asyncio.sleep(1)\n print('hello, %s. %s' % (name, threading.current_thread().name))\n\n\nif __name__ == '__main__':\n # achieve the EventLoop object\n loop = asyncio.get_event_loop()\n # execute coroutines\n tasks = [hello('jack'), hello('king')]\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n","sub_path":"asynchronous_io/py_68.py","file_name":"py_68.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"355623116","text":"from minio import Minio\nfrom checksum import calculate_checksum\nfrom parseYaml import parse_yaml\nfrom connectCouchdb import connect_couchdb,addFunctionIfNotExist,addUniqueChecksum\nfrom thumbnail import create_thumbnail\n\ndef connect_minio():\n mc = Minio('localhost:9000',\n access_key='DCJSAEHVFETULK44U89V',\n secret_key='ppYczqLJvFO1QhWXQoK73SRCTaVv8bA+ZcFaligU',\n secure=False)\n\n return mc\n\ndef getObject(mc,fromkafka,bucket):\n\n data = mc.get_object(bucket, fromkafka)\n obj = \"testImg\"\n with open(obj, 'wb') as file_data:\n for d in data.stream(32 * 1024):\n file_data.write(d)\n return obj\n\ndef main():\n\n function_name = parse_yaml()\n function_id = calculate_checksum(function_name)\n print(\"Function id : \"+function_id)\n\n '''data from kafka event, which is yet to implement'''\n fromkafka = \"a.jpg\"\n bucket = \"input\"\n\n '''connect couchdb'''\n db = connect_couchdb()\n\n '''query if function id exists in the couchdb. If not, create a new one'''\n addFunctionIfNotExist(db,function_id)\n\n '''Connecting minio'''\n mc = connect_minio()\n\n '''fetch data from the minio'''\n obj = getObject(mc,fromkafka,bucket)\n\n '''Calculate checksum'''\n img_checksum = calculate_checksum(obj)\n print(\"Image Checksum: \"+img_checksum)\n\n '''Adding checksum in couchdb'''\n #addUniqueChecksum(db,img_checksum)\n\n '''call to openwhisk'''\n create_thumbnail(fromkafka)\n\nif __name__ == \"__main__\":\n main()","sub_path":"python/connectMinio.py","file_name":"connectMinio.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"651465927","text":"# Base\nAUTHOR = \"Mateusz Jagiełło\"\nSITENAME = \"sigo's blog\"\nSITEURL = ''\n\n# l10n\nDEFAULT_DATE_FORMAT = '%H:%I:%S %d.%m.%Y'\nDEFAULT_LANG = 'pl'\nTIMEZONE = 'Europe/Warsaw'\n\n# Directories\nDELETE_OUTPUT_DIRECTORY = True\nPATH = 'content/'\nTHEME = ''\nTHEME_STATIC_DIR = ''\n\n# Others\nDEFAULT_CATEGORY = \"pozostałe\"\nRELATIVE_URLS = True\nWITH_FUTURE_DATES = False # If disabled, content with dates in the future will get a default status of draft\n\n# URLs\nARCHIVES_SAVE_AS = ''\nARTICLE_SAVE_AS = '{slug}.html'\nARTICLE_URL = '{slug}.html'\nAUTHOR_SAVE_AS = ''\nAUTHORS_SAVE_AS = ''\nCATEGORIES_SAVE_AS = ''\nCATEGORY_SAVE_AS = '{slug}/index.html'\nCATEGORY_URL = '{slug}/'\nDRAFT_SAVE_AS = 'szkice/{slug}.html'\nDRAFT_URL = 'szkice/{slug}.html'\nPAGE_SAVE_AS = '{slug}.html'\nPAGE_URL = '{slug}.html'\nTAG_SAVE_AS = ''\nTAGS_SAVE_AS = ''\n\n# Feeds\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\nCATEGORY_FEED_ATOM = None\nFEED_ALL_ATOM = None\nTRANSLATION_FEED_ATOM = None\n\n# Pagination\nDEFAULT_PAGINATION = 15\nPAGINATION_PATTERNS = (\n (1, '{base_name}/', '{base_name}/index.html'),\n (2, '{base_name}/{number}/', '{base_name}/{number}/index.html')\n)\n\n# Extra files\nSTATIC_PATHS = [\n 'extra/CNAME'\n]\n\nEXTRA_PATH_METADATA = {\n 'extra/CNAME': {\n 'path': 'CNAME'\n }\n}\n\n# Custom\nLIVERELOAD = True\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"309931339","text":"import numpy as np\n\ndef unit_vector(vector):\n return vector / np.linalg.norm(vector)\n\ndef angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n sf = 1\n if v2_u[0] > 0:\n sf = -1\n return sf*np.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))","sub_path":"utils/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"422957407","text":"T = int(input()) # Testcase\n\n# fibonacci(0, 1, 2) 일때의 0과 1의 갯수를 미리 저장\nzero = [1, 0, 1]\none = [0, 1, 1]\n\ndef dp(N):\n #원하고자 하는 N값이 zero(or one) 의 크기 즉, 3보다 크다면\n if len(zero) <= N:\n for i in range(len(zero), N+1):\n # i - 1 값과 i - 2 값 더해서 append\n zero.append(zero[i-1]+zero[i-2]) \n one.append(one[i-1]+one[i-2])\n print(f'{zero[N]} {one[N]}')\n\nwhile T:\n N = int(input())\n dp(N)\n T -= 1\n","sub_path":"Algorithm/JAEHYEON/1003.py","file_name":"1003.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"85539283","text":"\"\"\"\n2020, day 7\n\"\"\"\n\nimport re\n\ndef makefilter(a_type):\n def myfilter(x):\n return a_type in x[1]\n return myfilter\n\ndef look_for_bag(rules, bag):\n next_trgt_list = []\n contains_target = []\n next_target = bag\n while next_target is not None:\n f = makefilter(next_target)\n contain_target_loop = [x for x in list(filter(f, rules.items()))]\n contain_target_loop = [x[0] for x in contain_target_loop if x[0] not in contains_target]\n contains_target += [x for x in contain_target_loop]\n next_trgt_list += [x for x in contain_target_loop]\n try:\n next_target = next_trgt_list.pop(0)\n except IndexError:\n next_target = None\n return contains_target\n\ndef search_inside_bags(rules, trgt):\n my_contents = rules[trgt]\n tot = 1\n for k, v in my_contents.items():\n tot += v * search_inside_bags(rules, k)\n return tot\n\n\ndef prep(content):\n # put all rules in dictionary\n rulebook = {}\n regex = r'^([a-z]* [a-z]*) bags contain (.*)$'\n regex_empty = r'^([a-z]* [a-z]*) bags contain no other bags.$'\n for c in content:\n m = re.match(regex_empty, c)\n if m is None:\n m = re.match(regex, c)\n rulebook[m.group(1)] = {}\n regex_inner = r'(\\d [a-z]* [a-z]*) bags?'\n fa = re.findall(regex_inner, c.split('bags contain ')[1])\n for ff in fa:\n cnt = int(ff.split(' ')[0])\n name = ' '.join(ff.split(' ')[1:])\n rulebook[m.group(1)][name] = cnt\n else:\n rulebook[m.group(1)] = {}\n return rulebook\n\ndef part1(rulebook):\n target = 'shiny gold'\n contain_shiny_gold = look_for_bag(rulebook, target)\n print(f'Part 1: {len(contain_shiny_gold)} bags can contain a {target} bag.')\n\ndef part2(rulebook):\n target = 'shiny gold'\n tot = search_inside_bags(rulebook, target) - 1 # dont count the shiny gold one!!!\n print(f'Part 2: One {target} bag has to contain {tot} other bags!')\n","sub_path":"2020/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"474833796","text":"import logging, json\nfrom xml.sax.saxutils import escape\nfrom io import TextIOWrapper\n\nfrom .rest_api import RestAPI\nfrom .dataset import DataSet\nfrom .tags_man import TagsManager\nfrom .zone import FilterZoneH\nfrom app.config.a_config import AnfisaConfig\nfrom app.filter.condition import ConditionMaker\nfrom app.filter.cond_op import CondOpEnv\nfrom app.search.index import Index\n\n#===============================================\nclass Workspace(DataSet):\n def __init__(self, data_vault, dataset_info, dataset_path):\n DataSet.__init__(self, data_vault, dataset_info, dataset_path)\n self.mTabRecRand = []\n self.mTabRecKey = []\n self.mTabRecColor = []\n self.mTabRecLabel = []\n\n self.mIndex = Index(self)\n self._loadPData()\n self.mTagsMan = TagsManager(self,\n AnfisaConfig.configOption(\"check.tags\"))\n\n self.mIndex.setup()\n for filter_name, cond_seq, time_label in \\\n self.getMongoAgent().getFilters():\n if self.mIndex.goodOpFilterName(filter_name):\n if not self.mIndex.cacheFilter(filter_name,\n ConditionMaker.upgradeOldFormatSeq(cond_seq),\n time_label):\n logging.error(\"Filter %s for ws=%s failed\" %\n (filter_name, self.getName()))\n self.mZoneHandlers = []\n for zone_title, unit_name in AnfisaConfig.configOption(\"zones\"):\n if (unit_name == \"_tags\"):\n zone_h = self.mTagsMan\n zone_h._setTitle(zone_title)\n else:\n unit = self.mIndex.getUnit(unit_name)\n if (not unit):\n continue\n zone_h = FilterZoneH(self, zone_title, unit)\n self.mZoneHandlers.append(zone_h)\n\n self._setAspectHitGroup(\n *AnfisaConfig.configOption(\"transcript.view.setup\"))\n\n def _loadPData(self):\n with self._openPData() as inp:\n pdata_inp = TextIOWrapper(inp,\n encoding = \"utf-8\", line_buffering = True)\n for line in pdata_inp:\n pre_data = json.loads(line.strip())\n for key, tab in (\n (\"_rand\", self.mTabRecRand),\n (\"_key\", self.mTabRecKey),\n (\"_color\", self.mTabRecColor),\n (\"_label\", self.mTabRecLabel)):\n tab.append(pre_data.get(key))\n assert len(self.mTabRecRand) == self.getTotal()\n\n def getIndex(self):\n return self.mIndex\n\n def getTagsMan(self):\n return self.mTagsMan\n\n def iterZones(self):\n return iter(self.mZoneHandlers)\n\n def getZone(self, name):\n for zone_h in self.mZoneHandlers:\n if zone_h.getName() == name:\n return zone_h\n return None\n\n def getLastAspectID(self):\n return AnfisaConfig.configOption(\"aspect.tags.name\")\n\n def getMongoRecData(self, key):\n return self.getMongoAgent().getRecData(key)\n\n def setMongoRecData(self, key, data, prev_data = False):\n self.getMongoAgent().setRecData(key, data, prev_data)\n\n def _reportListKeys(self, rec_no_seq, rec_it_map_seq):\n marked_set = self.mTagsMan.getMarkedSet()\n ret = []\n for idx, rec_no in enumerate(rec_no_seq):\n ret.append([rec_no, escape(self.mTabRecLabel[rec_no]),\n AnfisaConfig.normalizeColorCode(self.mTabRecColor[rec_no]),\n rec_no in marked_set,\n rec_it_map_seq[idx].to01()])\n return ret\n\n def reportList(self, rec_no_seq, rec_it_map_seq,\n counts_transctipts, random_mode):\n rep = {\n \"workspace\": self.getName(),\n \"total\": self.getTotal(),\n \"transcripts\": counts_transctipts,\n \"filtered\": len(rec_no_seq)}\n if (random_mode and len(rec_no_seq) >\n AnfisaConfig.configOption(\"rand.min.size\")):\n sheet = [(self.mTabRecRand[rec_no], idx)\n for idx, rec_no in enumerate(rec_no_seq)]\n sheet.sort()\n del sheet[AnfisaConfig.configOption(\"rand.sample.size\"):]\n rec_no_seq = [rec_no_seq[idx] for _, idx in sheet]\n rec_it_map_seq = [rec_it_map_seq[idx] for _, idx in sheet]\n rep[\"list-mode\"] = \"samples\"\n else:\n rep[\"list-mode\"] = \"complete\"\n rep[\"records\"] = self._reportListKeys(rec_no_seq, rec_it_map_seq)\n return rep\n\n def getRecKey(self, rec_no):\n return self.mTabRecKey[rec_no]\n\n def iterRecKeys(self):\n return enumerate(self.mTabRecKey)\n\n def filterOperation(self, instr, filter_name, cond_seq):\n op, q, flt_name = instr.partition('/')\n if self.mIndex.hasStdFilter(flt_name):\n return filter_name\n with self:\n if op == \"UPDATE\":\n if cond_seq:\n cond_seq = ConditionMaker.upgradeOldFormatSeq(cond_seq)\n time_label = self.getMongoAgent().setFilter(flt_name, cond_seq)\n self.mIndex.cacheFilter(flt_name, cond_seq, time_label)\n filter_name = flt_name\n elif op == \"DELETE\":\n self.getMongoAgent().dropFilter(flt_name)\n self.mIndex.dropFilter(flt_name)\n flt_name = None\n else:\n assert False\n return filter_name\n\n #===============================================\n def _prepareContext(self, rq_args):\n if \"ctx\" in rq_args:\n return json.loads(rq_args[\"ctx\"])\n return dict()\n\n def _prepareConditions(self, rq_args, with_comp = True):\n #comp_data = (json.loads(rq_args[\"compiled\"])\n # if with_comp and \"compiled\" in rq_args else None)\n comp_data = None\n op_cond = CondOpEnv(self.mIndex.getCondEnv(), comp_data,\n json.loads(rq_args[\"conditions\"])\n if \"conditions\" in rq_args else ConditionMaker.condAll())\n return op_cond, op_cond.getResult()\n\n #===============================================\n @RestAPI.ws_request\n def rq__list(self, rq_args):\n modes = rq_args.get(\"m\", \"\").upper()\n if \"filter\" in rq_args:\n condition = self.mIndex.getFilterOpEnv(\n rq_args[\"filter\"]).getResult()\n assert \"conditions\" not in rq_args\n else:\n _, condition = self._prepareConditions(rq_args)\n if \"zone\" in rq_args:\n zone_name, variants = json.loads(rq_args[\"zone\"])\n zone_f = self.getZone(zone_name).getRestrictF(variants)\n else:\n zone_f = None\n rec_no_seq, rec_it_map_seq = [], []\n count_transctipts = 0\n for rec_no, rec_it_map in condition.iterSelection():\n if zone_f is not None and not zone_f(rec_no):\n continue\n rec_no_seq.append(rec_no)\n rec_it_map_seq.append(rec_it_map)\n count_transctipts += rec_it_map.count()\n return self.reportList(rec_no_seq, rec_it_map_seq,\n [count_transctipts, condition.getCondEnv().getTotalCount()],\n 'S' in modes)\n\n #===============================================\n @RestAPI.ws_request\n def rq__stat(self, rq_args):\n modes = rq_args.get(\"m\", \"\").upper()\n if \"instr\" in rq_args:\n op_env, _ = self._prepareConditions(rq_args, False)\n filter_name = self.filterOperation(rq_args[\"instr\"],\n rq_args.get(\"filter\"), op_env.getCondSeq())\n if filter_name is not None:\n op_env = self.mIndex.getFilterOpEnv(filter_name)\n elif \"filter\" in rq_args:\n assert \"conditions\" not in rq_args\n op_env = self.mIndex.getFilterOpEnv(rq_args[\"filter\"])\n else:\n op_env, _ = self._prepareConditions(rq_args)\n repr_context = self._prepareContext(rq_args)\n return self.mIndex.makeStatReport(op_env, 'R' in modes, repr_context)\n\n #===============================================\n @RestAPI.ws_request\n def rq__statunits(self, rq_args):\n _, condition = self._prepareConditions(rq_args)\n repr_context = self._prepareContext(rq_args)\n return {\n \"units\": [self.mIndex.makeUnitStatReport(\n unit_name, condition, repr_context)\n for unit_name in json.loads(rq_args[\"units\"])]}\n\n #===============================================\n @RestAPI.ws_request\n def rq__tags(self, rq_args):\n modes = rq_args.get(\"m\", \"\").upper()\n rec_no = int(rq_args.get(\"rec\"))\n if rq_args.get(\"tags\") is not None:\n tags_to_update = json.loads(rq_args.get(\"tags\"))\n with self:\n self.mTagsMan.updateRec(rec_no, tags_to_update)\n rep = self.mTagsMan.makeRecReport(rec_no)\n rep[\"filters\"] = self.mIndex.getRecFilters(rec_no, 'R' in modes)\n rep[\"tags-version\"] = self.mTagsMan.getIntVersion()\n return rep\n\n #===============================================\n @RestAPI.ws_request\n def rq__zone_list(self, rq_args):\n zone = rq_args.get(\"zone\")\n if zone is not None:\n return self.getZone(zone).makeValuesReport()\n return [[zone_h.getName(), zone_h.getTitle()]\n for zone_h in self.mZoneHandlers]\n\n #===============================================\n @RestAPI.ws_request\n def rq__rules_data(self, rq_args):\n modes = rq_args.get(\"m\", \"\").upper()\n return self.mIndex.getRulesUnit().getJSonData('R' in modes)\n\n #===============================================\n @RestAPI.ws_request\n def rq__rules_modify(self, rq_args):\n modes = rq_args.get(\"m\", \"\").upper()\n item = rq_args.get(\"it\")\n content = rq_args.get(\"cnt\")\n with self:\n return self.mIndex.getRulesUnit().modifyRulesData(\n 'R' in modes, item, content)\n\n #===============================================\n @RestAPI.ws_request\n def rq__tag_select(self, rq_args):\n return self.mTagsMan.reportSelectTag(\n rq_args.get(\"tag\"))\n\n #===============================================\n @RestAPI.ws_request\n def rq__export(self, rq_args):\n if \"filter\" in rq_args:\n condition = self.mIndex.getFilterOpEnv(\n rq_args[\"filter\"]).getResult()\n assert \"conditions\" not in rq_args\n else:\n _, condition = self._prepareConditions(rq_args)\n if \"zone\" in rq_args:\n zone_name, variants = json.loads(rq_args[\"zone\"])\n zone_f = self.getZone(zone_name).getRestrictF(variants)\n else:\n zone_f = None\n rec_no_seq = []\n for rec_no, _ in condition.iterSelection():\n if zone_f is not None and not zone_f(rec_no):\n continue\n rec_no_seq.append(rec_no)\n fname = self.getApp().makeExcelExport(\n self.getName(), self, rec_no_seq, self.mTagsMan)\n return {\"kind\": \"excel\", \"fname\": fname}\n\n #===============================================\n @RestAPI.ws_request\n def rq__vsetup(self, rq_args):\n return self.getViewSetupReport()\n\n","sub_path":"app/model/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182330590","text":"from sympy import *\nimport numpy as np\nimport math\nimport sympy\nfrom sympy.abc import n\nimport pandas as pd\nfrom scipy.optimize import fmin\nimport math\n\n\ndef switching(p,b):\n\tif b<1:\n\t\treturn 1-p\n\telse:\n\t\treturn p\n\ndef binary(o,b):\n\tif b<1:\n\t\treturn -1\n\telse:\n\t\treturn o\n\ndef reverseKelly(payout, kelly):\n\treturn( ((kelly * payout) + 1)/(payout + 1) )\n\ndef run(ar,bp):\t\n\tarray = [reverseKelly(ar[i],bp[i]) for i in range(len(ar))] #probability that the stock will go up\n\tarrayP = [i - 1 for i in ar]\n\t#print(array, arrayP, 'Check your inputs')\n\tk = len(array) - 1\n\n\tmaximum = 2**len(array)\n\tlength = len(list(str(bin(maximum))[2:])) - 1\n\targ = '0'+str(length)+'b'\n\tsuccess = []\n\tfor i in range(0,maximum):\n\t\tsuccess += [[int(i) for i in list(str(format(i, arg)))]]\n\tb = np.array(success)\n\n\tb[b<1]=-1\n\n\tleading = []\n\tfor i in range(len(b)):\n\t\tleading += [np.prod([switching(array[j],b[i][j]) for j in range(len(b[i]))])]\n\n\tchunk = []\n\tfor i in range(len(b)):\n\t\tchunk += [[binary(arrayP[j],b[i][j]) for j in range(len(b[i]))]]\n\n\tfor one in range(len(chunk)):\n\t\tequation = []\n\t\tfor two in range(len(chunk[one])):\n\t\t\tequation += [leading[one]*chunk[one][two]*Indexed('f',two)]\n\n\n\tp = array\n\tk = len(p)-1\n\tkp = len(p)**2 -1\n\tcrit = symbols(\"crit\")\n\tp, i = symbols(\"p i\", positive = True)\n\ty, i = symbols(\"y i\")\n\tf, i = symbols(\"f i\", positive = True, real = True)\n\tb, i = symbols(\"b i\", positive = True)\n\n\ts = 1 + Sum(Indexed('f',i)*Indexed('b',i),(i,0,k)).doit()\n\tsl = sympy.log(s)\n\tslp = 0\n\n\n\tfor j in range(len(chunk)):\n\t\tslp += leading[j]*sl.subs({Indexed('b',i): chunk[j][i] for i in range(len(array))})\n\n\t#print(slp)\n\n\tsystem = []\n\tfor i in range(len(array)):\n\t\tsystem += [sympy.diff(slp, Indexed('f',i))]\n\n\t#print(system)\n\n\ts = -1*slp\n\tfx = lambdify(f, s)\n\n\tcounter = 0\n\tfor i in range(10):\n\t\ttry:\n\t\t\tcounter += 1\n\t\t\tfinished = (fmin(fx,[i/10]*len(array)))\n\t\t\n\t\t\tif counter == 2:\n\t\t\t\t#print(finished)\n\t\t\t\treturn finished/2\n\t\texcept:\n\t\t\tprint('None found')\n","sub_path":"Manat_Methods/Analysis/simultaneousKelly.py","file_name":"simultaneousKelly.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"145270089","text":"import os\nimport theano\nimport numpy as np\nfrom pylearn2.train_extensions import TrainExtension\nfrom pylearn2.gui import get_weights_report, patch_viewer\n\nclass VisualizeWeights(TrainExtension):\n def __init__(self, dir, base, format='png'):\n self.dir = dir\n self.base = base\n self.format = format\n\n def on_monitor(self, model, dataset, algorithm):\n model = algorithm.model\n epoch = algorithm.monitor.get_epochs_seen()\n fn = self.base + str(epoch) + '.' + self.format\n outfn = os.path.join(self.dir, fn)\n pv = get_weights_report.get_weights_report(model=model)\n pv.save(outfn)\n\nclass VisualizeReconstructions(TrainExtension):\n def __init__(self, dir, base, format='png'):\n self.dir = dir\n self.base = base\n self.format = format\n\n def on_monitor(self, model, dataset, algorithm):\n model = algorithm.model\n epoch = algorithm.monitor.get_epochs_seen()\n fn = self.base + str(epoch) + '.' + self.format\n outfn = os.path.join(self.dir, fn)\n\n # Display (patch, recon patch, diff image)\n ndata = 100\n x = theano.tensor.matrix('x')\n reconX = model.decode(model.encode(x)).eval({x: dataset.X[:ndata,:]})\n\n pv = patch_viewer.PatchViewer(grid_shape=(ndata,3), patch_shape=(12,12), is_color=0)\n for i in xrange(ndata):\n pv.add_patch(np.reshape(dataset.X[i,:], (12,12)), rescale=True)\n pv.add_patch(np.reshape(reconX[i,:], (12,12)), rescale=True)\n pv.add_patch(np.reshape(dataset.X[i,:] - reconX[i,:], (12,12)), rescale=True)#, act=None)\n pv.save(outfn)\n","sub_path":"pylearn2/train_extensions/visualizations.py","file_name":"visualizations.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"247704601","text":"# Write a Python program that prints the string without the characters located at even indices.\n# If the string is empty or only has one character, print it intact.\n\nstring = 'Programming'\n\nnew_string = ''\n\nfor i in range(len(string)):\n if i % 2 != 0:\n new_string += string[i]\n\nprint(new_string)\n\n# option 2\n# for i in range(1,len(string),2):\n","sub_path":"Exercise 63. Remove Characters at Even Indices.py","file_name":"Exercise 63. Remove Characters at Even Indices.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"515364265","text":"nums = [1, 2, 3, 4, 5]\n\nfor _ in nums:\n if _ == 3:\n print('Founded!')\n # break\n continue\n print(_)\nfor num in nums:\n for letter in 'abcdefg':\n print(num, letter, sep='/////')\nfor i in range(1, 10):\n print(i)\n","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"89372887","text":"import logging\nfrom dae.query_variants.sql.schema2.base_query_builder import \\\n BaseQueryBuilder, Dialect\n\nlogger = logging.getLogger(__name__)\n\n\nclass SummaryQueryBuilder(BaseQueryBuilder):\n \"\"\"Build queries related to summary variants.\"\"\"\n\n def __init__(\n self,\n dialect: Dialect,\n db,\n family_variant_table,\n summary_allele_table,\n pedigree_table,\n family_variant_schema,\n summary_allele_schema,\n table_properties,\n pedigree_schema,\n pedigree_df,\n gene_models=None,\n ):\n # pylint: disable=too-many-arguments\n super().__init__(\n dialect,\n db,\n None, # family_variant_table,\n summary_allele_table,\n pedigree_table,\n None, # family_variant_schema,\n summary_allele_schema,\n table_properties,\n pedigree_schema,\n pedigree_df,\n gene_models=gene_models,\n )\n\n def _query_columns(self):\n return [\n \"sa.bucket_index\",\n \"sa.summary_index\",\n \"sa.summary_variant_data\",\n ]\n\n def _build_from(self):\n summary_table_name = self.dialect.build_table_name(\n self.summary_allele_table, self.db)\n from_clause = f\"\\n FROM\\n {summary_table_name} AS sa\"\n self._add_to_product(from_clause)\n\n def _build_join(self, genes=None, effect_types=None):\n if genes is not None or effect_types is not None:\n self._add_to_product(\n self.dialect.build_array_join(\"sa.effect_gene\", \"eg\"))\n\n def _build_group_by(self):\n pass\n\n def _build_having(self, **kwargs):\n pass\n\n def _build_where(\n self,\n regions=None,\n genes=None,\n effect_types=None,\n family_ids=None,\n person_ids=None,\n inheritance=None,\n roles=None,\n sexes=None,\n variant_type=None,\n real_attr_filter=None,\n ultra_rare=None,\n frequency_filter=None,\n return_reference=None,\n return_unknown=None,\n **_kwargs,\n ):\n # pylint: disable=too-many-arguments,too-many-locals\n if self.summary_allele_table:\n inheritance = None\n where_clause = self._build_where_string(\n regions=regions,\n genes=genes,\n effect_types=effect_types,\n family_ids=family_ids,\n person_ids=person_ids,\n inheritance=inheritance,\n roles=roles,\n sexes=sexes,\n variant_type=variant_type,\n real_attr_filter=real_attr_filter,\n ultra_rare=ultra_rare,\n frequency_filter=frequency_filter,\n return_reference=return_reference,\n return_unknown=return_unknown,\n )\n self._add_to_product(where_clause)\n","sub_path":"dae/dae/query_variants/sql/schema2/summary_builder.py","file_name":"summary_builder.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"351940505","text":"import requests\r\n\r\napi_key = 'asdf1234asdf1234asdf1234'\r\n\r\nq = 'cisco.com'\r\n\r\nurl = 'https://panacea.threatgrid.com/api/v2/search/submissions?q={}&api_key={}'.format(q, api_key)\r\n\r\nr = requests.get(url)\r\n\r\nprint(r.json())\r\n","sub_path":"04_submission_search.py","file_name":"04_submission_search.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"433198854","text":"import json\nimport os\nimport argparse\n\nmarkdown_code = str()\n\nframework_list = ['caffe', 'cntk', 'coreml', 'darknet', 'mxnet', 'pytorch', 'tensorflow'] # Haven't added 'keras' yet\nframe_model_map = {\n 'caffe': {'architecture':'prototxt', 'weights':'caffemodel'},\n 'cntk': {'architecture':'model'},\n 'coreml': {'architecture':'mlmodel'},\n 'darknet': {'architecture':'cfg', 'weights':'weights'},\n 'mxnet': {'architecture':'json', 'weights':'params'},\n 'pytorch': {'architecture':'pth'},\n 'tensorflow': {'architecture':'tgz'}\n} # Haven't add 'keras' yet\ndataset_list = ['imagenet', 'imagenet11k', 'Pascal VOC', 'grocery100']\n\ndef add_code(code):\n global markdown_code\n markdown_code += code\n\ndef add_header(level, code):\n add_code(\"#\" * level + \" \" + code + '\\n\\n')\n\ndef draw_line(num):\n add_code(\"| \" * num + \"|\\n\")\n add_code((\"|-\" * num + \"|\\n\"))\n\ndef save_code(filepath):\n with open(filepath, 'w') as f:\n f.write(markdown_code)\n print(\"Markdown generate succeeded!\")\n\ndef LoadJson(json_path):\n with open(json_path, encoding='utf-8') as f:\n data = json.load(f)\n return data\n\ndef RegenerateJsonByDataset(data):\n new_data = {}\n new_data['dataset'] = {}\n for i in range(len(dataset_list)):\n new_data['dataset'][dataset_list[i]] = []\n for mo in data['models']:\n ds = mo['dataset']\n item = {}\n item['name'] = mo['name']\n item['framework'] = mo['framework']\n item['source'] = mo['source']\n item['link'] = mo['link']\n item['version'] = \"\"\n new_data['dataset'][ds].append(item)\n\n # with open('modelmapbydataset.json', 'w') as outfile:\n # json.dump(new_data, outfile)\n return new_data\n\ndef GenerateModelBlock_v2(model):\n link = model['link']\n framework = model['framework']\n\n # generate makedown script\n add_code('''|{}
Framework: {}
Download: '''.format(\n model['name'],\n model['framework']\n ))\n for k in link.keys():\n if link[k]:\n add_code(\"[{}]({}) \".format(\n frame_model_map[framework][k],\n link[k]\n ))\n add_code(\"
Source: \")\n if (model['source']!=\"\"):\n add_code(\"[Link]({})\".format(model['source']))\n add_code(\"
\")\n\ndef DrawTableBlock(data, dataset_name):\n colnum = 3\n add_header(3, dataset_name)\n draw_line(colnum)\n models = data['dataset'][dataset_name]\n num = 0\n for i in range(len(models)):\n if ((models[i]['framework']!='keras') and (models[i]['link']['architecture']!=\"\")):\n GenerateModelBlock_v2(models[i])\n num += 1\n if num % colnum == 0:\n add_code(\"\\n\")\n add_code(\"\\n\")\n\ndef GenerateModelsList_v2(data):\n\n add_header(1, \"Model Collection\")\n\n # add Image Classification\n add_header(2, \"Image Classification\")\n for ds_name in ['imagenet', 'imagenet11k']:\n DrawTableBlock(data, ds_name)\n\n # add Object Detection\n add_header(2, \"Object Detection\")\n for ds_name in ['Pascal VOC', 'grocery100']:\n DrawTableBlock(data, ds_name)\n\n add_code(\"\\n\")\n\ndef GenerateIntroductionAndTutorial():\n # MMdnn introduction\n add_header(1, \"Introduction\")\n text_intro='''This is a collection of pre-trained models in different deep learning frameworks.\\n\nYou can download the model you want by simply click the download link.\\n\nWith the download model, you can convert them to different frameworks.\\n\nNext session show an example to show you how to convert pre-trained model between frameworks.\\n\\n'''\n add_code(text_intro)\n\n # steps for model conversion\n add_header(2, \"Steps to Convert Model\")\n text_example='''**Example: Convert vgg19 model from Tensorflow to CNTK**\\n\n1. Install the stable version of MMdnn\n ```bash\n pip install mmdnn\n ```\n2. Download Tensorflow pre-trained model\n - [x] **Method 1:** Directly download from below model collection\n - [x] **Method 2:** Use command line\n ```bash\n $ mmdownload -f tensorflow -n vgg19\n\n Downloading file [./vgg_19_2016_08_28.tar.gz] from [http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz]\n progress: 520592.0 KB downloaded, 100%\n Model saved in file: ./imagenet_vgg19.ckpt\n ```\n **NOTICE:** _the model name after the **'-n'** argument must be the models appearence in the below model collection._\n\n3. Convert model architecture(*.ckpt.meta) and weights(.ckpt) from Tensorflow to IR\n ```bash\n $ mmtoir -f tensorflow -d vgg19 -n imagenet_vgg19.ckpt.meta -w imagenet_vgg19.ckpt --dstNodeName MMdnn_Output\n\n Parse file [imagenet_vgg19.ckpt.meta] with binary format successfully.\n Tensorflow model file [imagenet_vgg19.ckpt.meta] loaded successfully.\n Tensorflow checkpoint file [imagenet_vgg19.ckpt] loaded successfully. [38] variables loaded.\n IR network structure is saved as [vgg19.json].\n IR network structure is saved as [vgg19.pb].\n IR weights are saved as [vgg19.npy].\n ```\n4. Convert models from IR to PyTorch code snippet and weights\n ```bash\n $ mmtocode -f pytorch -n vgg19.pb --IRWeightPath vgg19.npy --dstModelPath pytorch_vgg19.py -dw pytorch_vgg19.npy\n\n Parse file [vgg19.pb] with binary format successfully.\n Target network code snippet is saved as [pytorch_vgg19.py].\n Target weights are saved as [pytorch_vgg19.npy].\n ```\n5. Generate PyTorch model from code snippet file and weight file\n ```bash\n $ mmtomodel -f pytorch -in pytorch_vgg19.py -iw pytorch_vgg19.npy --o pytorch_vgg19.pth\n\n PyTorch model file is saved as [pytorch_vgg19.pth], generated by [pytorch_vgg19.py] and [pytorch_vgg19.npy].\n Notice that you may need [pytorch_vgg19.py] to load the model back.\n ```\n'''\n add_code(text_example)\n add_code(\"\\n\\n\")\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--file', type=str, default=\"modelmap2.json\", help=\"the path of json file\")\n parser.add_argument('-d', '--distFile', type=str, default=\"Collection_v2.md\", help=\"the path of the readme file\")\n args = parser.parse_args()\n\n # Generate model converter description\n GenerateIntroductionAndTutorial()\n\n # Generate models list\n data = LoadJson(args.file)\n new_data = RegenerateJsonByDataset(data)\n GenerateModelsList_v2(new_data)\n save_code(args.distFile)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mmdnn/models/GenerateMdByDataset.py","file_name":"GenerateMdByDataset.py","file_ext":"py","file_size_in_byte":6417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"651622619","text":"# Sprite objects\n\nimport sys\nfrom pygame import sprite, image, transform\nfrom PIL import Image\ntry:\n import numpy as np\n from hueshift import shift_hue\n HAVE_NUMPY = True\nexcept ImportError:\n sys.stderr.write(\"Warning: numpy not available. The game will function without it, but you may experience poorer performace.\\n\")\n sys.stderr.flush()\n from colorsys import rgb_to_hsv, hsv_to_rgb\n HAVE_NUMPY = False\n\nimport time\n\nclass SpriteError(Exception):\n def __init__(self,msg):\n sys.stderr.write(msg+\"\\n\")\n sys.stderr.flush()\n\nclass Spinner(sprite.Sprite):\n def __init__(self, imagepath, size=None):\n super().__init__()\n\n## if not image.get_extended():\n## raise SpriteError(\"Fatal: extended image support not available. Canot open sprite.\")\n self.size = size\n self.load_image(imagepath)\n \n # how much the spinner has been rotated by\n self.angle = 0\n self.hueshift = 0\n\n self.times_taken = []\n\n def load_image(self, imagepath):\n # get original image as 8-bit since LUT operations are faster\n # than bitmap operations\n im = Image.open(imagepath)\n self.original_image = image.fromstring(im.tobytes(), im.size, \"P\")\n\n # create the palette\n palette = []\n rgb = []\n for val in im.getpalette():\n rgb.append(val)\n if len(rgb) == 3:\n palette.append(rgb + [255])\n rgb = []\n## print (palette[0])\n self.original_image.set_palette(palette)\n self.original_image.set_colorkey([255,255,255])\n \n self.original_rect = self.original_image.get_rect()\n\n self.base_image = transform.scale(self.original_image, self.size)\n self.image = self.base_image\n self.rect = self.original_rect\n\n\n def set_centre_pos(self, pos):\n self.original_rect.center = (pos[0], pos[1])\n\n def rotate(self, angle):\n self.angle += angle\n self.image = transform.rotate(self.base_image, self.angle)\n self.rect = self.image.get_rect(center=self.original_rect.center)\n\n def draw(self, surface):\n surface.blit(self.image, self.rect)\n\n def set_hueshift(self, hueshift):\n \"\"\"Sets the hueshift of the image via a LUT operation\"\"\"\n self.hueshift = hueshift\n p = self.original_image.get_palette()\n if HAVE_NUMPY:\n p2 = shift_hue(np.array(p), self.hueshift)\n else:\n hsv = [list(rgb_to_hsv(*val)) for val in p]\n for i in range(len(hsv)):\n hsv[i][0] = (hsv[i][0]+self.hueshift)%1.0\n p2 = [hsv_to_rgb(*val) for val in hsv]\n self.base_image.set_palette(p2)\n","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"418220883","text":"import os\nimport sys\nimport numpy as np\nfrom PIL import Image\nimport torch\ntorch.manual_seed(2019)\ntorch.cuda.manual_seed_all(2019)\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport argparse\n\nimport time\nimport pdb\n\ntry:\n from .jaad import JAADDataset\n from .jaad_loc import JAADLocDataset\n from .stip import STIPDataset\nexcept:\n from jaad import JAADDataset\n from jaad_loc import JAADLocDataset\n from stip import STIPDataset\n\ndef jaad_collate(batch):\n # each item in batch: a tuple of \n # 1. ped_crops: (30, 224, 224, 3)\n # 2. masks: list of len 30: each = dict of ndarrays: (n_obj, 1080, 1920, 1)\n # 3. GT_act: binary ndarray: (30, 9)\n ped_crops = []\n masks = []\n GT_act, GT_bbox, GT_pose = [], [], []\n obj_bbox, obj_cls = [], []\n fids = []\n img_paths = []\n for each in batch:\n ped_crops += each['ped_crops'],\n masks += each['all_masks'],\n GT_act += each['GT_act'],\n GT_bbox += each['GT_bbox'],\n GT_pose += each['GT_pose'],\n obj_bbox += each['obj_bbox'],\n obj_cls += each['obj_cls'],\n obj_bbox_l += each['obj_bbox_l'],\n obj_cls_l += each['obj_cls_l'],\n obj_bbox_r += each['obj_bbox_r'],\n obj_cls_r += each['obj_cls_r'],\n fids += each['fids'],\n img_paths += each['img_paths'],\n ped_crops = torch.stack(ped_crops)\n GT_act = torch.stack(GT_act)\n GT_bbox = torch.stack(GT_bbox)\n GT_pose = torch.stack(GT_pose)\n fids = torch.stack(fids)\n ret = {\n 'ped_crops': ped_crops,\n 'all_masks': masks,\n 'GT_act': GT_act,\n 'GT_bbox': GT_bbox,\n 'GT_pose': GT_pose,\n 'obj_cls': obj_cls,\n 'obj_bbox': obj_bbox,\n 'fids': fids,\n 'img_paths': img_paths,\n }\n if 'frames' in batch[0]:\n ret['frames'] = torch.stack([each['frames'] for each in batch], 0)\n ret['GT_driver_act'] = torch.stack([each['GT_driver_act'] for each in batch], 0)\n\n return ret\n\n\ndef jaad_loc_collate(batch):\n # each item in batch: a tuple of \n # 1. ped_crops: (30, 224, 224, 3)\n # 2. masks: list of len 30: each = dict of ndarrays: (n_obj, 1080, 1920, 1)\n # 3. GT_act: binary ndarray: (30, 9)\n ped_crops = []\n masks = []\n GT_act, GT_ped_bbox = [], []\n obj_bbox, obj_cls = [], []\n fids = []\n for each in batch:\n ped_crops += each['ped_crops'],\n masks += each['all_masks'],\n GT_act += each['GT_act'],\n GT_ped_bbox += each['GT_ped_bbox'],\n obj_bbox += each['obj_bbox'],\n obj_cls += each['obj_cls'],\n fids += each['fids'],\n GT_act = torch.stack(GT_act)\n fids = torch.stack(fids)\n ret = {\n 'ped_crops': ped_crops,\n 'all_masks': masks,\n 'GT_act': GT_act,\n 'GT_ped_bbox': GT_ped_bbox,\n 'obj_cls': obj_cls,\n 'obj_bbox': obj_bbox,\n 'fids': fids,\n }\n if 'frames' in batch[0]:\n ret['frames'] = torch.stack([each['frames'] for each in batch], 0)\n return ret\n\n\ndef stip_collate(batch):\n # each item in batch: a tuple of \n # 1. ped_crops: (30, 224, 224, 3)\n # 2. masks: list of len 30: each = dict of ndarrays: (n_obj, 1080, 1920, 1)\n # 3. GT_act: binary ndarray: (30, 9)\n ped_crops = []\n masks = []\n GT_act, GT_bbox, GT_pose = [], [], []\n obj_bbox, obj_cls = [], []\n fids = []\n img_paths = []\n for each in batch:\n ped_crops += each['ped_crops'],\n masks += each['all_masks'],\n GT_act += each['GT_act'],\n GT_bbox += each['GT_bbox'],\n # GT_pose += each['GT_pose'],\n obj_bbox += each['obj_bbox'],\n obj_cls += each['obj_cls'],\n fids += each['fids'],\n img_paths += each['img_paths'],\n ped_crops = torch.stack(ped_crops)\n GT_act = torch.stack(GT_act)\n GT_bbox = torch.stack(GT_bbox)\n if len(GT_pose):\n GT_pose = torch.stack(GT_pose)\n fids = torch.stack(fids)\n ret = {\n 'ped_crops': ped_crops,\n 'all_masks': masks,\n 'GT_act': GT_act,\n 'GT_bbox': GT_bbox,\n 'obj_cls': obj_cls,\n 'obj_bbox': obj_bbox,\n 'fids': fids,\n 'img_paths': img_paths,\n }\n if len(GT_pose):\n ret['GT_pose'] = GT_pose\n if 'frames' in batch[0]:\n ret['frames'] = torch.stack([each['frames'] for each in batch], 0)\n ret['GT_driver_act'] = torch.stack([each['GT_driver_act'] for each in batch], 0)\n\n return ret\n\n\n\ndef get_data_loader(opt):\n if opt.dset_name.lower() == 'jaad':\n dset = JAADDataset(opt)\n print('Built JAADDataset.')\n collate_fn = jaad_collate\n\n elif opt.dset_name.lower() == 'jaad_loc':\n dset = JAADLocDataset(opt)\n print('Built JAADLocDataset.')\n collate_fn = jaad_loc_collate\n\n elif opt.dset_name.lower() == 'stip':\n dset = STIPDataset(opt)\n print('Built STIPDataset')\n collate_fn = stip_collate\n\n else:\n raise NotImplementedError('Sorry but we currently only support JAAD. ^ ^b')\n\n dloader = data.DataLoader(dset,\n batch_size=opt.batch_size,\n shuffle=opt.is_train,\n num_workers=opt.n_workers,\n pin_memory=True,\n collate_fn=collate_fn,\n )\n\n return dloader\n\n\ndef cache_all_objs(opt, cache_dir_name):\n opt.is_train = False\n\n opt.collapse_cls = 1\n cache_dir_root = '/sailhome/ajarno/STR-PIP/datasets/cache/'\n cache_dir = os.path.join(cache_dir_root, cache_dir_name)\n os.makedirs(cache_dir, exist_ok=True)\n opt.save_cache_format = os.path.join(cache_dir, opt.split, 'ped{}_fid{}.pkl')\n os.makedirs(os.path.dirname(opt.save_cache_format), exist_ok=True)\n\n dset = JAADDataset(opt)\n dloader = data.DataLoader(dset,\n batch_size=1,\n shuffle=False,\n num_workers=0,\n collate_fn=jaad_collate)\n\n t_start = time.time()\n for i,each in enumerate(dloader):\n if i%50 == 0 and i:\n print('{}: avg time: {:.3f}'.format(i, (time.time()-t_start) / 50))\n t_start = time.time()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dset-name', type=str, default='JAAD_loc')\n parser.add_argument('--annot-ped-format', type=str, default='/sailhome/ajarno/STR-PIP/datasets/annot_{}_ped.pkl')\n parser.add_argument('--annot-loc-format', type=str, default='/sailhome/ajarno/STR-PIP/datasets/annot_{}_loc.pkl')\n parser.add_argument('--is-train', type=int, default=1)\n parser.add_argument('--split', type=str, default='train')\n parser.add_argument('--seq-len', type=int, default=30)\n parser.add_argument('--ped-crop-size', type=tuple, default=(224, 224))\n parser.add_argument('--mask-size', type=tuple, default=(224, 224))\n parser.add_argument('--collapse-cls', type=int, default=0,\n help='Whether to merge the classes. If 1 then each item in masks is a dict keyed by cls, otherwise a list.')\n parser.add_argument('--img-path-format', type=str,\n default='/sailhome/ajarno/STR-PIP/datasets/JAAD_dataset/JAAD_clip_images/video_{:04d}.mp4/{:d}.jpg')\n parser.add_argument('--fsegm-format', type=str,\n default='/sailhome/ajarno/STR-PIP/datasets/JAAD_instance_segm/video_{:04d}/{:08d}_segm.npy')\n parser.add_argument('--save-cache-format', type=str, default='')\n parser.add_argument('--cache-format', type=str, default='')\n parser.add_argument('--batch-size', type=int, default=4)\n parser.add_argument('--n-workers', type=int, default=0)\n # added to test loader\n parser.add_argument('--rand-test', type=int, default=1)\n parser.add_argument('--predict', type=int, default=0)\n parser.add_argument('--predict-k', type=int, default=0)\n parser.add_argument('--combine-method', type=str, default='none')\n parser.add_argument('--load-cache', type=str, default='masks')\n parser.add_argument('--cache-obj-bbox-format', type=str,\n default='/sailhome/ajarno/STR-PIP/datasets/cache/obj_bbox_merged/vid{:08d}.pkl')\n\n opt = parser.parse_args()\n opt.save_cache_format = '/sailhome/ajarno/STR-PIP/datasets/cache/jaad_loc/{}/vid{}_fid{}.pkl'\n opt.cache_format = opt.save_cache_format\n opt.seq_len = 1\n opt.split = 'test'\n\n if True:\n # test dloader\n dloader = get_data_loader(opt)\n\n # for i,eg in enumerate(dloader):\n # if i%100 == 0:\n # print(i)\n # sys.stdout.flush()\n\n for i,vid in enumerate(dloader.dataset.vids):\n print('vid:', vid)\n annot = dloader.dataset.annots[vid]\n n_frames = len(annot['act'])\n for fid in range(n_frames):\n fcache = opt.cache_format.format(opt.split, vid, fid+1)\n if os.path.exists(fcache):\n continue\n dloader.dataset.__getitem__(i, fid_start=fid)\n","sub_path":"data/get_data_loader.py","file_name":"get_data_loader.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200856100","text":"import asyncio\nimport json\nimport logging\nimport websockets\n\nimport aiohttp\nfrom aiohttp import web\nfrom datetime import datetime\n\n\nlogging.basicConfig()\n\nSTATE = dict()\nUSERS = set()\n\nORDER = ['x', 'y', 'z', 'trigger']\n\n\nclass Game:\n \n def __init__(self):\n self.before = datetime.now()\n\n self.FIELD = [0, 0, 0, 0, 0, 0, 0, 0] \n self.SPEED = (10**6 - 10000) \n self.DIRECTIONS = {\"LEFT\": -1, \"RIGHT\": 1} \n self.BALL_POSITION = 0\n\n self.DIRECTION = \"RIGHT\"\n \n self.trigger_state = 0\n\n def main_loop(self, trigger_state):\n # print(trigger_state, type(trigger_state))\n if trigger_state is not None:\n trigger_state = int(trigger_state)\n \n now = datetime.now()\n td = now - self.before\n td = td.seconds * 10**6 + td.microseconds\n\n if td > self.SPEED:\n try:\n self.before = now\n\n NEW_BALL_POSITION = self.BALL_POSITION + self.DIRECTIONS[self.DIRECTION]\n if NEW_BALL_POSITION == 0 or NEW_BALL_POSITION == 7:\n if trigger_state == 1: \n if self.DIRECTION == \"RIGHT\":\n self.DIRECTION = \"LEFT\"\n else:\n self.DIRECTION = \"RIGHT\"\n if NEW_BALL_POSITION == -1 or NEW_BALL_POSITION == 8:\n print(\"game over\")\n raise \n\n self.FIELD[NEW_BALL_POSITION] = 1\n self.FIELD[self.BALL_POSITION] = 0\n self.BALL_POSITION = NEW_BALL_POSITION\n print(self.FIELD)\n\n\n except IndexError:\n if self.DIRECTION == \"RIGHT\":\n self.DIRECTION = \"LEFT\"\n else:\n self.DIRECTION = \"RIGHT\"\n\n\n\ngame = Game()\n\n\nasync def notify_users():\n if USERS: # asyncio.wait doesn't accept an empty list\n serialized_state = ';'.join(str(STATE.get(i, 0)) for i in ORDER)\n # print(serialized_state)\n trigger_state = STATE.get(ORDER[-1], 0)\n game.main_loop(trigger_state)\n await asyncio.wait([user.send_str(serialized_state) for user in USERS])\n\n\nasync def websocket_handler(request):\n websocket = web.WebSocketResponse()\n print('connected')\n await websocket.prepare(request)\n\n USERS.add(websocket)\n try:\n async for msg in websocket:\n if msg.type == aiohttp.WSMsgType.TEXT:\n # print(msg.data)\n STATE.update(json.loads(msg.data))\n await notify_users()\n finally:\n USERS.remove(websocket)\n\n return websocket\n\n\nasync def hello(request):\n with open('./index.html') as file:\n html_data = file.read()\n\n return web.Response(text=html_data, content_type='text/html')\n\n\napp = web.Application()\napp.add_routes([\n web.get('/', hello),\n web.get('/ws', websocket_handler)\n])\n\nweb.run_app(app)\n","sub_path":"streamer.py","file_name":"streamer.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"456757583","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\n\n\ndef load_avg_nlls(nll_dir, dataset):\n return np.load(nll_dir + '/' + dataset + '_avg_graph_nlls.npy')\n\ndef plot_nlls(nlls, title, xlabel):\n \"\"\"\n Plot the nll distribution for a set of graphs\n\n Parameters:\n - nlls: list of nlls calculates\n \"\"\"\n fig, ax = plt.subplots()\n # Plot the two distributions side by side\n sns.distplot(nlls, ax=ax, kde=True)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n \n return fig, ax\n\n\ndef compare_dist(nlls_list, labels, title):\n \"\"\"\n Plot multiple graph distributions\n\n Parameters:\n - nlls_list: list of nll arrays for the different graph classes\n we are comparing\n - lables: the labels that will be used in the legend\n - Examples: DD_1 train (normal), DD_1 (normal), DD_2 (anom)\n \"\"\"\n fig, ax = plt.subplots()\n for i in range(len(nlls_list)):\n sns.distplot(nlls_list[i], ax=ax, kde=True, label=labels[i])\n ax.legend()\n ax.set_xlabel(\"Negative Log Likelihood\")\n ax.set_title(title)\n return fig, ax\n\ndef anomally_detection_score(nlls, labels, threshold):\n \"\"\"\n Given nll predictions for a set of graphs, classify\n the graphs into nomral and anomalous graph classes\n based on the nll threshold\n \"\"\"\n pred_labels = np.zeros(nlls.shape[0])\n\n for i in range(nlls.shape[0]):\n # Label 1 is anomalous\n if nlls[i] > threshold:\n pred_labels[i] = 1\n \n\n # Compute the accuracy\n print (np.sum(np.abs(pred_labels - labels)))\n # Gives the accuracy as 1 - num_incorrect / numa\n accuracy = 1 - np.sum(np.abs(pred_labels - labels)) / labels.shape[0]\n return accuracy\n\n ","sub_path":"eval_nll.py","file_name":"eval_nll.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565824570","text":"'''Stack ADT.\n'''\n\nclass Stack:\n\n '''A last-in, first-out (LIFO) stack of items'''\n\n def __init__(self):\n '''(Stack) -> NoneType\n\n Initialize new Stack self.\n '''\n self._data = [] # _data is not part of the public interface\n\n def pop(self):\n ''' (Stack) -> object\n\n Remove and return the top item from self.\n\n >>> s = Stack()\n >>> s.push(2)\n >>> s.push(3)\n >>> s.pop()\n 3\n '''\n return self._data.pop()\n\n def is_empty(self):\n ''' (Stack) -> bool\n\n Return whether the self is empty.\n\n >>> s = Stack()\n >>> s.push(4)\n >>> s.pop()\n 4\n >>> s.is_empty()\n True\n '''\n return self._data == []\n\n def push(self, o):\n ''' (Stack, object) -> NoneType\n\n Place object o on top of Stack self.\n '''\n self._data.append(o)\n\n # implementation of __eq__, __str__,\n # and __repr__ left as an exercise\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n #uncomment lines below to test performance\n import time\n s = Stack()\n items = range(100000)\n #\n # start the clock\n start = time.time()\n #\n for i in items:\n s.push(i)\n #\n for i in items:\n s.pop()\n #\n end = time.time()\n print (\"It took \", end - start, \"to push/pop\", len(items), \"items\")\n \n \n \n \n\n","sub_path":"Old Classes/Computer Science/148/csc148/lab04/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"115237583","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 18 09:37:03 2020\n\n@author: Bill\n\"\"\"\nimport cv2 \n#import numpy as np \n\n\ndef user_pressed(key):\n wait_ms = 25\n return cv2.waitKey(wait_ms) & 0xFF == ord(key)\n\ndef key_pressed():\n return cv2.waitKey(wait_ms) & 0xFF\n\n# Create a VideoCapture object and read from input file \ncap = cv2.VideoCapture('CoVid-19.mp4') \n\n# Check if camera opened successfully \nif (cap.isOpened()== False): \n print(\"Error opening video file\") \n\ndef you_clicked(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONUP:\n print('You clicked!')\n\ncv2.namedWindow(\"Frame\")\ncv2.setMouseCallback(\"Frame\", you_clicked)\n\nwait_ms = 25\nproceed = True\nwhile(cap.isOpened()): \n\t\n # Capture frame-by-frame \n if proceed:\n ret, frame = cap.read() \n \n if ret: \n height , width , layers = frame.shape\n\n new_h = height//2; new_w = width//2\n shrunk = cv2.resize(frame, (new_w, new_h)) \n\n \t# Display the resulting frame \n cv2.imshow('Frame', shrunk) \n # Break the loop \n else: \n \tbreak\n \n keycode = cv2.waitKey(wait_ms) & 0xFF\n \n\t# Press Q on keyboard to exit \n if keycode == ord('q'): \n break\n\n if keycode==ord(' '):\n proceed = not proceed\n print(proceed)\n\n \n\n# When everything done, release \n# the video capture object \ncap.release() \n\n# Closes all the frames \ncv2.destroyAllWindows()\n","sub_path":"annotate_video.py","file_name":"annotate_video.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566364632","text":"import math\nimport numpy as np\n\nfrom potential_field import PotentialField\n\nclass TangentialField(PotentialField):\n\n def __init__(self, object_radius, movement_distance, max_force, exists, counterclockwise = True):\n self.object_radius = object_radius\n self.movement_distance = movement_distance\n self.max_force = max_force\n self.exists = exists\n if counterclockwise:\n self.direction = 1\n else:\n self.direction = -1\n\n def get_vector(self, robot_position, object_position):\n if not self.exists:\n return [0., 0.]\n distance = self.get_distance(robot_position, object_position)\n angle = self.get_angle(robot_position, object_position)\n if distance < self.object_radius:\n print('robot within obstacle radius')\n dx = 0\n dy = 0\n # dx = -(np.sign(np.cos(angle))) * (1.5 * self.max_force)\n # dy = -(np.sign(np.sin(angle))) * (1.5 * self.max_force)\n elif (self.object_radius <= distance) and (distance <= (self.object_radius + self.movement_distance)):\n constant_proportion = .25\n if distance >= self.object_radius + (3 * self.movement_distance / 4):\n magnitude = (self.max_force * constant_proportion)\n dx = self.direction * magnitude * np.cos(angle + math.pi / 2)\n dy = self.direction * magnitude * np.sin(angle + math.pi / 2)\n else:\n magnitude = (((1 - constant_proportion) * self.max_force) / (3 * self.movement_distance / 4)) * ((3 * self.movement_distance / 4) - distance + self.object_radius) + (constant_proportion * self.max_force)\n dx = self.direction * magnitude * np.cos(angle + math.pi / 2)\n dy = self.direction * magnitude * np.sin(angle + math.pi / 2)\n else:\n dx = 0.\n dy = 0.\n return [dx, dy]\n\n def exists(self):\n return self.exists\n","sub_path":"tangential_field.py","file_name":"tangential_field.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367530437","text":"#event.py for accessing the EventBrite API\nimport json\n\n#get the key\nf = open('../.secret_key.txt', 'rU')\nEVENTBRITE_KEY = json.loads(f.read())[\"eventbrite\"]\nf.close()\n\n\ndef get_events(like, zipcode):\n url = \"https://www.eventbriteapi.com/v3/events/search/?token=\" \\\n + EVENTBRITE_KEY + \"&q=\" + like + \"&location.address=\" + zipcode\\\n + \"&location.within=50mi\"\n response = urllib2.urlopen(url)\n url = response.geturl()\n info = response.read()\n info = json.loads(info)\n #print info\n return info\n\n","sub_path":"Project2/datebrite/utils/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519557353","text":"import numpy as np\nimport pandas as pd\nimport time,calendar,datetime,csv,math,json,sys,operator,os\nfrom pprint import pprint\nfrom builders import momentum_indicators \nimport chart_filter\nimport ma_filter\npd.options.mode.chained_assignment = None\n\ndef main():\n p = {\n 'path_candle_file' : 'builders/warehouse/candle_data/' + '30min_bitstamp.csv',\n 'timeframe' : ['2014-01-01 00:00:00','2018-10-10 00:00:00'],\n 'candle_sec': '1800',\n 'buy': '1-sellEnd_1open*1.0001',\n 'sell': 'buy-10_realHighest',\n 'chart_filter': [\n {\n 'toggle': False,\n 'condition': 'condition1',\n 'path_trendline_file': 'builders/warehouse/trendlines/' + '30min_2014-01-01_2018-06-19_40_150_4_15_001_001_4.txt', \n 'mode': 'greater_than_limit',\n 'condition_parameter': 'm', \n 'limit': '0',\n 'limit1': '0',\n 'limit2': '0'\n },\n {\n 'toggle': False,\n 'lineAbove': {\n 'path_candle_file': 'builders/warehouse/candle_data/' + '30min_bitstamp.csv',\n 'indicador': 'SMA',\n 'average': '30',\n },\n 'lineBellow': {\n 'path_candle_file': 'builders/warehouse/candle_data/' + '30min_bitstamp.csv',\n 'indicador': 'SMA',\n 'average': '7',\n }\n }\n ],\n 'units_maker': {\n 'threshold' : '30',\n 'td_s': '-9',\n 'td_c': '13',\n 'pattern': 'pattern6',\n 'max_order': '500', # in USD\n 'path_historical_data' : 'builders/warehouse/historical_data/' + 'bitstampUSD.csv',\n 'add': ['buy','sell','lowest','lastPrice'] \n }\n }\n\n # goodtimes = chart_filter.callable(p)\n goodtimes = ma_filter.frontDoor(p)\n # pprint(goodtimes)\n units_list = callable(p,goodtimes)\n p['units_maker']['units_amt'] = len(units_list)\n pprint(units_list)\n print('Amount of units in the setup: ',len(units_list))\n write_json((p,units_list))\n\ndef callable(p,goodtimes):\n candle_df = get_dataframe(p)\n raw_df = get_raw(p)\n # rsi_df = get_rsi_df(p)\n # td_s_df = get_td_s_df(p)\n # td_c_df = get_td_c_df(p)\n units_list = globals()[p['units_maker']['pattern']](p,goodtimes)\n for item in p['units_maker']['add']:\n globals()['add_{0}'.format(item)](p,units_list,candle_df,raw_df)\n return units_list\n\n# ---------------------------------------------------------------------------------\n# * SECTION 1 *\n# Every function in this section must return a units_list, with the timestamp of the candle 0 of each unit. \n\ndef pattern1(p,goodtimes):\n # Input:

, specifically p['path_candle_file'] and p['units_maker']['threshold']. which has all the periods we want. In case chart_filter turned off, will be a list containing\n # one period, which is a list of two items, the first being the inferior limit and the second the superior limit of the period. In the other hand, if chart_filter is turned on, will most likely have\n # many periods in it.\n # Output: , which is a list of dictionaries. Each dictionary only has the '0' key, which stands for 'candle 0'. Its value is also a dictionary with a single key named 'ts' containing the timestamp\n # of the candle 0 as a value. can be understood as a list of units. Every single dictionary in the list is a place where will be added the respective information of each unit. So far in the program\n # the only information each unit has is its first timestamp, the timestamp that starts the candle 0. \n rsi = momentum_indicators.rsi(p['path_candle_file'])\n units_list = []\n threshold = float(p['units_maker']['threshold'])\n for period in goodtimes: \n mini_rsi = filter_rsi(rsi,period)\n for index in range(mini_rsi.shape[0]-1):\n if mini_rsi[index,1] < threshold and mini_rsi[index-1,1] > threshold:\n units_list.append({'0': {'ts': mini_rsi[index,0]}})\n return units_list\n\ndef pattern2(p,goodtimes):\n # For td_setup : equal to 9 for \"normal\" td_sell_setup, 80 for minimal_sell_setup or 90 for perfect_sell_setup\n # : equal to -9 for \"normal\" td_buy_setup, -80 for minimal_buy_setup or -90 for perfect_sell_setup\n td_s_df = get_td_s_df(p)\n td_s_df = td_s_df.reset_index()\n td = td_s_df.values\n td_s = int(p['units_maker']['td_s'])\n units_list = []\n for period in goodtimes: \n mini_td = filter_td(td,period)\n for i in range(mini_td.shape[0]):\n if mini_td[i,1]==td_s:\n units_list.append({'0': {'ts': mini_td[i,0]}})\n return units_list\n\ndef pattern3(p,goodtimes):\n # For td_countdown : equal to 13 for td_sell_countdown\n # : equal to -13 for td_buy_countdown\n td_c_df = get_td_c_df(p)\n td_c_df = td_c_df.reset_index()\n td = td_c_df.values\n td_c = int(p['units_maker']['td_c'])\n units_list = []\n for period in goodtimes: \n mini_td = filter_td(td,period)\n for i in range(mini_td.shape[0]):\n if mini_td[i,1]==td_c:\n units_list.append({'0': {'ts': mini_td[i,0]}})\n return units_list\n\n# ---------------------------------------------------------------------------------\n# * SECTION 2 *\n# Add details to units_list for further analysis.\n\ndef add_buy(p,units_list,candle_df,raw_df):\n operator_dict = {\n '+': operator.add,\n '-': operator.sub,\n '*': operator.mul\n }\n candle,moment = translate_order('buy',p['buy'])\n p['buy'] = {\n 'candle': candle,\n 'moment': moment\n }\n candle,moment = translate_order('sell',p['sell'])\n p['sell'] = {\n 'candle': candle,\n 'moment': moment\n }\n for unit in units_list:\n unit['buy'] = {}\n find_buy(p,unit,candle_df,raw_df,operator_dict)\n\ndef add_sell(p,units_list,candle_df,raw_df):\n for unit in units_list:\n if unit['buy']['type'] == 'all-bought':\n unit['sell'] = {}\n find_sell(p,unit,candle_df,raw_df)\n\ndef add_lowest(p,units_list,candle_df,raw_df):\n for unit in units_list:\n if unit['buy']['type'] == 'all-bought':\n if unit['sell']['type'] == 'all-sold':\n unit['lowest'] = {}\n find_lowest(p,unit,candle_df,raw_df)\n\ndef add_lastPrice(p,units_list,candle_df,raw_df):\n for unit in units_list:\n end_interval = int(unit['0']['ts'])+int(p['candle_sec'])*(int(p['sell']['candle'][-1])+1)\n raw_section = raw_df[raw_df.timestamp=start_interval) & (raw_df.timestamp=unit['buy']['price']]\n if raw_partition.empty:\n unit['buy']['type'] = 'nothing-bought'\n return\n raw_partition['USD_acc_volume'] = raw_partition['volume'].cumsum(axis = 0)*raw_partition['price']\n unit['buy']['first_executed'] = {\n 'ts': int(raw_partition.iloc[0].timestamp),\n 'index': int(raw_partition.iloc[0].name)\n }\n if (raw_partition.USD_acc_volume >= float(p['units_maker']['max_order'])).any():\n unit['buy']['type'] = 'all-bought'\n last_executed_row = raw_partition[raw_partition.USD_acc_volume >= float(p['units_maker']['max_order'])].iloc[0]\n unit['buy']['last_executed'] = {\n 'ts': int(last_executed_row.timestamp),\n 'index': int(last_executed_row.name)\n }\n start_interval_to_last_executed = raw_section.loc[:unit['buy']['last_executed']['index']]\n lowest_row = start_interval_to_last_executed[start_interval_to_last_executed.price == start_interval_to_last_executed.price.min()].iloc[0]\n unit['buy']['lowest'] = {\n 'ts': int(lowest_row.timestamp), \n 'index': int(lowest_row.name),\n 'price': (float(lowest_row.price) - unit['buy']['price'])/unit['buy']['price']\n }\n else:\n unit['buy']['type'] = 'partially-bought'\n\ndef find_sell(p,unit,candle_df,raw_df):\n start_index = unit['buy']['last_executed']['index']\n end_interval = int(unit['0']['ts'])+int(p['candle_sec'])*(int(p['sell']['candle'][-1])+1)\n raw_section = raw_df[(raw_df.index>start_index) & (raw_df.timestamp= float(p['units_maker']['max_order'])).any():\n unit['sell']['type'] = 'all-sold'\n realHighest_price = raw_sorted[raw_sorted.USD_acc_volume >= float(p['units_maker']['max_order'])].iloc[0].price\n unit['sell']['realHighest_price'] = realHighest_price\n unit['sell']['realHighest'] = (float(realHighest_price) - unit['buy']['price'])/unit['buy']['price']\n\n raw_partition = raw_section[raw_section.price>=realHighest_price]\n raw_partition['USD_acc_volume'] = raw_partition['volume'].cumsum(axis = 0)*raw_partition['price']\n last_row = raw_partition[raw_partition.USD_acc_volume >= float(p['units_maker']['max_order'])].iloc[0]\n unit['sell']['first_executed'] = {\n 'ts': int(raw_partition.iloc[0].timestamp), \n 'index': int(raw_partition.iloc[0].name) \n }\n unit['sell']['last_executed'] = {\n 'ts': int(last_row.timestamp), \n 'index': int(last_row.name) \n }\n\ndef find_lowest(p,unit,candle_df,raw_df):\n start_index = unit['buy']['last_executed']['index'] + 1\n end_index = unit['sell']['last_executed']['index']\n raw_section = raw_df.loc[start_index:end_index] \n min_row = raw_section[raw_section.price == raw_section.price.min()].iloc[0]\n unit['lowest']['price'] = (float(min_row.price) - unit['buy']['price'])/unit['buy']['price']\n unit['lowest']['ts'] = int(min_row.timestamp)\n unit['lowest']['index'] = int(min_row.name)\n\ndef translate_order(mode,input):\n# This function receives as input a string with the format '1-2-3_0high+30' and returns a list called 'candle' and\n# a dictionary 'moment' that are useful for further calculation.\n moment = {}\n candle,moment['string'] = input.split('_')\n candle = candle.split('-')\n\n if mode == 'buy':\n index = 0\n for char in moment['string']:\n if char.isdigit():\n index = index + 1 \n else:\n break\n moment['candle'] = moment['string'][0:index]\n moment['ohlc'] = [i for i in ['open','high','low','close'] if i in moment['string']][0]\n ope = [i for i in ['+','-','*'] if i in moment['string']]\n if ope != []:\n moment['operator'] = ope[0]\n moment['change'] = moment['string'][moment['string'].find(moment['operator'])+1:]\n return candle,moment\n\n if mode == 'sell':\n return candle,moment\n\ndef get_raw(p):\n return pd.read_csv(p['units_maker']['path_historical_data'], header=None, names=['timestamp','price','volume'])\n\ndef filter_rsi(rsi,timeframe):\n return rsi[(rsi[:,0] >= timeframe[0]) & (rsi[:,0] <= timeframe[1])]\n\ndef filter_td(td,timeframe):\n return td[(td[:,0] >= timeframe[0]) & (td[:,0] <= timeframe[1])]\n\ndef get_dataframe(p):\n pre_candles_df = pd.read_csv(p['path_candle_file'], header=None, names=['time','timestamp','open','high','low','close','volume','change'])\n candles_df = pre_candles_df.set_index('timestamp')\n return candles_df\n\ndef get_rsi_df(p):\n rsi_array = momentum_indicators.rsi(p['path_candle_file'])\n pre_rsi_df = pd.DataFrame(rsi_array, columns = ['timestamp','rsi'])\n rsi_df = pre_rsi_df.set_index('timestamp')\n return rsi_df\n\ndef get_td_s_df(p):\n# Here we get the data from the csv and put in an array the timestamp and the td of the respective candle\n with open('builders/warehouse/td_data/td_setup_30min_bitstamp.csv', newline='') as csvfile:\n data = csv.reader(csvfile, delimiter=' ', quotechar='|')\n big_list = []\n for row in data:\n ts_start = float(row[0].split(',')[0])\n td = float(row[0].split(',')[1])\n big_list.append([ts_start,td])\n td_s_data = np.array(big_list)\n # td_s_data = td_s_data.astype(int)\n td_s_df = pd.DataFrame(td_s_data, columns = ['timestamp','td_s'])\n td_s_df = td_s_df.set_index('timestamp')\n return td_s_df\n\ndef get_td_c_df(p):\n# Here we get the data from the csv and put in an array the timestamp and the td of the respective candle\n with open('builders/warehouse/td_data/td_countdown_30min_bitstamp.csv', newline='') as csvfile:\n data = csv.reader(csvfile, delimiter=' ', quotechar='|')\n list = []\n big_list = []\n for row in data:\n ts_start = float(row[0].split(',')[0])\n td = float(row[0].split(',')[1])\n big_list.append([ts_start,td])\n td_c_data = np.array(big_list)\n # td_c_data = td_c_data.astype(int)\n td_c_df = pd.DataFrame(td_c_data, columns = ['timestamp','td_c'])\n td_c_df = td_c_df.set_index('timestamp')\n return td_c_df\n\ndef write_json(data):\n # It dumps the data in a new file called \"experiment.txt\" in experiment_data directory.\n half1_path = 'builders/warehouse/setup_data/setup'\n half2_path = str(int(time.time()))\n path = half1_path + half2_path + '.txt'\n while os.path.exists(path):\n time.sleep(1)\n half2_path = str(int(time.time()))\n path = half1_path + half2_path + '.txt'\n with open(path, 'w') as outfile:\n json.dump(data, outfile)\n\nif __name__ == '__main__':\n time1 = time.time()\n main()\n time2 = time.time()\n print('---------------------------------------')\n print('Runtime: ',time2-time1)\n print('Ran at: ',datetime.datetime.fromtimestamp(time2))\n","sub_path":"units_maker.py","file_name":"units_maker.py","file_ext":"py","file_size_in_byte":15203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"419380588","text":"import os\nfrom pathlib import Path\n\n\nclass GradleFinder:\n def __init__(self, couchEditRoot: str = None):\n \"\"\"\n :param couchEditRoot: Root path of the CouchEdit application, where the gradlew(.bat) file resides.\n If none is given, the cwd will be used and its parent dirs will be queried.\n \"\"\"\n\n self.__couchEditRoot = couchEditRoot\n self.__gradleFileName = self.__getGradleFileName()\n\n def getGradleExecutablePath(self) -> Path:\n if self.__couchEditRoot is None:\n startPath = Path('.').absolute()\n path = startPath\n\n ret = None\n\n while ret is None:\n print(\"Trying {0}\".format(path))\n\n try:\n ret = self.__getGradleFileInPath(path)\n except FileNotFoundError:\n # try the parent if file not found\n pass\n except StopIteration:\n pass\n\n parent = path.parent\n\n if parent == path:\n # we have reached the root\n raise Exception(\"Cannot find {0} in {1} or one of its parents\".format(\n self.__gradleFileName,\n startPath\n ))\n\n path = parent\n else:\n path = Path(self.__couchEditRoot).absolute()\n\n try:\n ret = self.__getGradleFileInPath(path)\n except StopIteration:\n raise Exception(\"No {0} found in {1}!\".format(self.__gradleFileName, path))\n\n return ret\n\n def __getGradleFileInPath(self, path: Path) -> Path:\n \"\"\"\n :param path:\n :return: Path object representing the gradle executable file that has been found.\n :exception: StopIteration if no Gradle file could be found in the current\n \"\"\"\n candidate = path.glob(self.__gradleFileName)\n\n return next(candidate)\n\n def __getGradleFileName(self) -> str:\n if os.name == 'nt':\n return 'gradlew.bat'\n\n return 'gradlew'\n","sub_path":"utils/evaluation/testrunner/GradleFinder.py","file_name":"GradleFinder.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"551647113","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Eric Aderne\n\"\"\"\n# Arquivo para executar a API\n\nimport os\n\nfrom api import app\n\nif __name__ == '__main__':\n app.debug = True\n host = os.environ.get('HOST','192.168.200.154')\n port = int(os.environ.get('PORT',81))\n app.run(host=host, port=port)\n #app.run\n \n \n","sub_path":"run_api.py","file_name":"run_api.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"569092626","text":"# je veux un objet mail qui me dise\n# questions necessitant acces à un seul champ:\n# - si un mail contient une pj de nom de fichier 'untel'\n# - si un mail contient une pj de type 'untel'\n# - le from\n# - le sujet\n# - la date du mail\n# - le nom du fichier dont provient l objet mail\n# questions necessitant acces à plusieurs champs: \n# - la liste des pj par nom et type\n\n\n# comme toutes ces questions sont déjà résolues par le module mail,\n# ecrire une fonction enveloppante pour les pj et pour les mails\nimport email\nfrom email import policy\nfrom email.parser import BytesParser\nfrom email.iterators import walk\nfrom dateutil.parser import *\nfrom io import StringIO\nclass Objet_mail(email.message.EmailMessage):\n #class Objet_mail(email.message.EmailMessage):\n #AttributeError: 'Objet_piece_jointe' object has no attribute '_headers'\n #class Objet_mail(email.parser.BytesParser):\n #AttributeError: 'Objet_piece_jointe' object has no attribute '_headers'\n\n def __init__(self,nom_fichier_mail):\n self.setName(nom_fichier_mail)\n self.setConteneurMail()\n self.setFromToDateSubject()\n self.setListePiecesJointes()\n self.setListeNomFichiersPiecesJointes()\n\n # set / get \n\n def setName(self,nom_fichier_mail):\n print('setName: debut du parse de {}'.format(nom_fichier_mail))\n if nom_fichier_mail.endswith('.eml'):\n self.nom_fichier_mail = nom_fichier_mail\n else:\n raise ValueError(\"le nom de fichier doit se terminer par eml: {}\".format(nom_fichier_mail))\n\n\n \n \n def setConteneurMail(self):\n with open(self.nom_fichier_mail, 'rb') as fp:\n self.conteneurMail = BytesParser(policy=policy.default).parse(fp, headersonly=False)\n\n def setFromToDateSubject(self):\n #self.isfrom = self.conteneurMail.get_all('From',None)\n self.isfrom = self.conteneurMail.get('From',None)\n #self.isto = self.conteneurMail.get_all('To',None)\n self.isto = self.conteneurMail.get('To',None)\n self.isdate = parse(self.conteneurMail.get('Date',None))\n self.issubject = self.conteneurMail.get('Subject',None)\n\n \n \n\n def setListePiecesJointes(self):\n #http://blog.magiksys.net/parsing-email-using-python-content\n #https://www.ianlewis.org/en/parsing-email-attachments-python\n # https://docs.python.org/3/library/email.message.html#email.message.EmailMessage.get_content_type\n self.liste_pieces_jointes = []\n compteur_piece_jointe = 0\n for part in self.conteneurMail.iter_attachments():\n non_nul_si_piece_jointe = cree_Objet_Piece_Jointe(part,self, compteur_piece_jointe + 1)\n if non_nul_si_piece_jointe:\n compteur_piece_jointe = compteur_piece_jointe + 1\n self.liste_pieces_jointes.append(non_nul_si_piece_jointe)\n \n\n def setListeNomFichiersPiecesJointes(self):\n self.liste_noms_fichiers_pieces_jointes = []\n if self.liste_pieces_jointes:\n #self.liste_noms_fichiers_pieces_jointes = []\n from module_utilitaire_fichier import clean_windows_filename_string\n for pj in self.liste_pieces_jointes:\n self.liste_noms_fichiers_pieces_jointes.append(clean_windows_filename_string(pj.getFileName()))\n\n\n # getters \n\n\n def getName(self):\n return self.nom_fichier_mail\n\n def getDate(self):\n return self.isdate\n\n def getSubject(self):\n return self.issubject\n\n def getFrom(self):\n return self.isfrom\n\n # utilitaires\n\n def getAttachmentCount(self):\n return len(self.liste_pieces_jointes)\n\n def getAllAttachmentFilenames(self):\n try:\n if self.liste_noms_fichiers_pieces_jointes:\n return self.liste_noms_fichiers_pieces_jointes\n except AttributeError as a:\n #TODO: AttributeError: 'Objet_mail' object has no attribute 'liste_noms_fichiers_pieces_jointes'\n print(a)\n print('objet mail a pour attributs: {}'.format(dir(self)))\n else:\n return []\n\n def getAllSearchableMailheaderKeyNames(self):\n return self.conteneurMail.keys()\n\n def getNormalizedName(self,shorten=False):\n import re\n #from string plante si pas dechamp mail aliase.\n #traiter ce cas\n fromstring = re.search(r'\\<(.*)\\>', self.getFrom()).group(1)\n if not fromstring:\n fromstring = self.getFrom()\n if not fromstring:\n raise ValueError(\"getNormalizedName : fromstring vide malgre bricolage\")\n #compter pj\n compter_pj = 'has ' + str(self.getAttachmentCount()) + ' pj'\n if not compter_pj:\n raise ValueError(\"getNormalizedName : Compter pj vide magre bricolage\")\n #attachmentfilenames\n try:\n attchfilename = '_'.join(self.getAllAttachmentFilenames())\n except AttributeError:\n print(\"pas trouve de chmp attachfilename pour mail {} alor uairait du renvoyer vide si pas d attc. pb plantae lors crea pj\".format(\n self.getName()))\n raise AttributeError\n if not shorten:\n filename_with_unauthorized_characters = ''.join([\n self.getDate().isoformat(sep=' '),\n self.getSubject(),\n fromstring,\n compter_pj,\n attchfilename,\n '.eml'])\n if shorten:\n filename_with_unauthorized_characters = ''.join([\n self.getDate().isoformat(sep=' '),\n '.eml'])\n \n from module_utilitaire_fichier import clean_windows_filename_string\n \n return clean_windows_filename_string(filename_with_unauthorized_characters)\n \n \n\ndef cree_Objet_Piece_Jointe(mailpart,mailinstance,numeropiecejointe):\n class Objet_piece_jointe(object):\n #class Objet_piece_jointe(email.message.EmailMessage):\n #class Objet_piece_jointe(email.message.MIMEPart):\n def __init__(self,mailpart,mailinstance):\n self.mailinstance = mailinstance\n self.numeropiecejointe = numeropiecejointe\n self.setRawData(mailpart)\n self.parseAttachment(mailpart,mailinstance)\n\n\n #setters\n def setRawData(self,mailpart):\n self.file_data = mailpart.get_content()\n #self.attachment = StringIO(self.file_data)\n #TypeError: initial_value must be str or None, not bytes\n\n\n \n \n\n \n def parseAttachment(self,mailpart,mailinstance):\n #bug noname (30).eml:\n # il a deux pieces jointes\n # il a un nom de fichier trop long qui utilise la continuation de ligne\n # file*1*\n track_filename_in_disposition = [\"filename\",\" filename\"]\n track_filename_in_content = ['name']\n #import pdb; pdb.set_trace()\n self.content_disposition = mailpart.get(\"Content-Disposition\", None)\n if self.content_disposition:\n self.mailinstance = mailinstance\n dispositions = self.content_disposition.strip().split(\";\")\n #print(dispositions)\n \n if not bool(self.content_disposition and (dispositions[0].lower() == \"attachment\"\n or dispositions[1].split('=')[0] in track_filename_in_disposition\n or mailpart.get('Name',None))):\n raise ValueError(\"le champ content_disposition {}n a pas la forme attendue, pas plus que name pour sauver la mise {} pour la piece jointe numero {} \".format(self.content_disposition,mailpart.get('Name',None),self.numeropiecejointe))\n self.content_type = mailpart.get_content_type()\n self.size = len(self.file_data)\n self.name = None\n self.create_date = None\n self.mod_date = None\n self.read_date = None\n # traiter filename separement\n #http://blog.magiksys.net/parsing-email-using-python-content get_ilename\n \n self.name = mailpart.get_filename(None) \n for param in dispositions[1:]:\n name,value = param.split(\"=\")\n name = name.lower()\n #dbg_print(name,)\n\n if name == \"filename\":\n if not self.name:\n self.name = value # todo strip name\n #shitty hack seing what domo mailserver spits\n elif name == \" filename\":\n if not self.name:\n self.name = value #todo strip name\n elif name == \"create-date\":\n self.create_date = parse(value) \n elif name == \"modification-date\":\n self.mod_date = parse(value) \n elif name == \"read-date\":\n self.read_date = parse(value)\n # correction des parametres necessaires\n # attachment has a name\n if not self.name:\n if mailpart.get('name',None):\n self.name = mailpart.get('name',None)\n print(self.name)\n else:\n # pas la peine de conttinuer il y a une couille\n raise ValueError(\"la piece jointe doit avoir un nom\")\n # attachment has a date\n if self.create_date:\n print(\"create_date vaut: {}\".format(self.create_date))\n if not self.create_date:\n self.create_date = mailinstance.getDate()\n print(\"create exite pa . je ui attribue: {}\".format(self.create_date))\n \n self.is_attachment = True\n \n else:\n self.is_attachment = False\n\n #getters\n\n def getFileName(self):\n return self.name\n\n def getFileType(self):\n return self.content_type\n\n #utilitaires\n \n\n def getFile(self):\n dest = self.getFileName()\n with open(dest, 'wb') as g:\n g.write(self.file_data)\n\n \n \n \n\n \n\n # fin de la décla de la classe Piece_jointe\n\n # execution de la fonction cree_piece_jointe: \n pj = Objet_piece_jointe(mailpart,mailinstance)\n if pj.is_attachment:\n return pj\n else:\n return None\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n \n","sub_path":"parse_email/module_objet_mail.py","file_name":"module_objet_mail.py","file_ext":"py","file_size_in_byte":10822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"91272880","text":"import argparse\nimport os\nimport time\nimport torch\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.autograd import Variable\n\nfrom utils.dataset import *\nfrom utils.models import *\nfrom utils.logger import *\n\nclass TrainBase:\n def __init__(self, args):\n self.args = args\n self.min_val_loss = 10000000000\n self.min_val_tag_loss = 1000000\n\n self.min_train_loss = 10000000000\n self.min_train_tag_loss = 1000000\n\n self.params = None\n\n self._init_model_path()\n self.model_dir = self._init_model_dir()\n self.writer = self._init_writer()\n self.train_transform = self._init_train_transform()\n self.val_transform = self._init_val_transform()\n self.model_state_dict = self._load_model_state_dict()\n\n self.train_data_loader = self._init_data_loader(self.args.train_file_cc, self.args.train_file_mlo, self.train_transform)\n self.val_data_loader = self._init_data_loader(self.args.val_file_cc, self.args.val_file_mlo, self.val_transform)\n\n self.extractor = self._init_visual_extractor()\n self.attention = self._init_attention()\n self.mlc = self._init_mlc()\n\n self.ce_criterion = self._init_ce_criterion()\n self.mse_criterion = self._init_mse_criterion()\n\n self.optimizer = self._init_optimizer()\n self.scheduler = self._init_schedule()\n self.logger = self._init_logger()\n self.writer.write(\"{}\\n\".format(self.args))\n\n def train(self):\n for epoch_id in range(self.start_epoch, self.args.epochs):\n train_tag_loss = self._epoch_train()\n val_tag_loss = self._epoch_val()\n print(epoch_id)\n print(train_tag_loss)\n print(val_tag_loss)\n\n if self.args.mode == 'train':\n self.scheduler.step(train_tag_loss)\n else:\n self.scheduler.step(val_tag_loss)\n self.writer.write('[{} - Epoch {}] train_tag_loss:{} - val_tag_loss:{} - lr:{}\\n'.format(self._get_now(),\n epoch_id,\n train_tag_loss,\n val_tag_loss,\n self.optimizer.param_groups[0]['lr']))\n self._save_model(epoch_id, val_tag_loss, train_tag_loss)\n self._log(train_tags_loss=train_tag_loss, val_tags_loss=val_tag_loss, lr=self.optimizer.param_groups[0]['lr'], epoch=epoch_id)\n\n def _epoch_train(self):\n raise NotImplementedError\n\n def _epoch_val(self):\n raise NotImplementedError\n\n def _init_model_path(self):\n if not os.path.exists(self.args.model_path):\n os.makedirs(self.args.model_path)\n\n def _init_model_dir(self):\n model_dir = os.path.join(self.args.model_path, self.args.saved_model_name)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n model_dir = os.path.join(model_dir, self._get_now().replace(':','-'))\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n return model_dir\n\n def _init_writer(self):\n writer = open(os.path.join(self.model_dir, 'log.txt'), 'w')\n return writer\n\n def _init_train_transform(self):\n transform = transforms.Compose([\n transforms.Resize(self.args.resize),\n transforms.RandomCrop(self.args.crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n return transform\n\n def _init_val_transform(self):\n transform = transforms.Compose([\n transforms.Resize(self.args.resize),\n transforms.RandomCrop(self.args.crop_size),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n return transform\n\n def _load_model_state_dict(self):\n self.start_epoch = 0\n try:\n model_state = torch.load(self.args.load_model_path)\n self.start_epoch = model_state['epoch']\n self.writer.write('[Load model-{} succeed!]\\n'.format(self.args.load_model_path))\n self.writer.write('Load from epoch {}\\n'.format(model_state['epoch']))\n return model_state\n except Exception as err:\n self.writer.write('[Load model falied] {}\\n'.format(err))\n return None\n\n def _init_data_loader(self, file_list_cc, file_list_mlo, transform):\n data_loader = get_loader(image_dir=self.args.image_dir,\n file_list_cc=file_list_cc,\n file_list_mlo=file_list_mlo,\n transform=transform,\n batch_size=self.args.batch_size,\n shuffle=True)\n return data_loader\n\n def _init_visual_extractor(self):\n model = VisualFeatureExtractor(model_name=self.args.visual_model_name,\n pretrained=self.args.pretrained,\n visual_size = 2048)\n try:\n model_state = torch.load(self.args.load_visual_model_path)\n model.load_state_dict(model_state['model'])\n self.writer.write('[Load feature extractor succeed!]\\n')\n except Exception as err:\n self.writer.write('[Load feature extractor failed] {}\\n'.format(err))\n\n if not self.args.visual_trained:\n for i, param in enumerate(model.parameters()):\n param.requires_grad = False\n else:\n if self.params:\n self.params += list(model.parameters())\n else:\n self.params = list(model.parameters())\n\n if self.args.cuda:\n model = model.cuda()\n return model\n\n def _init_attention(self):\n model = FeatureAttention(visual_size=self.extractor.out_features, hidden_size=self.args.hidden_size)\n try:\n model_state = torch.load(self.args.load_attention_model_path)\n model.load_state_dict(model_state['model'])\n self.writer.write('[Load Attention model succeed!]\\n')\n except Exception as err:\n self.writer.write('[Load Attention model failed {}]\\n'.format(err))\n\n if not self.args.att_trained:\n for i, param in enumerate(model.parameters()):\n param.requires_grad = False\n else:\n if self.params:\n self.params += list(model.parameters())\n else:\n self.params = list(model.parameters())\n if self.args.cuda:\n model = model.cuda()\n return model\n\n def _init_mlc(self):\n model = MLC(classes=self.args.classes, fc_in_features=self.extractor.out_features, k=self.args.k)\n try:\n model_state = torch.load(self.args.load_mlc_model_path)\n model.load_state_dict(model_state['model'])\n self.writer.write('[Load MLC succeed!]\\n')\n except Exception as err:\n self.writer.write('[Load MLC failed {}]\\n'.format(err))\n\n if not self.args.mlc_trained:\n for i, param in enumerate(model.parameters()):\n param.requires_grad = False\n else:\n if self.params:\n self.params += list(model.parameters())\n else:\n self.params = list(model.parameters())\n if self.args.cuda:\n model = model.cuda()\n return model\n\n @staticmethod\n def _init_ce_criterion():\n return nn.CrossEntropyLoss(size_average=False, reduce=False)\n\n @staticmethod\n def _init_mse_criterion():\n return nn.MSELoss()\n\n def _init_optimizer(self):\n return torch.optim.Adam(params=self.params, lr=self.args.learning_rate)\n\n def _init_schedule(self):\n scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=self.args.patience, factor=0.1)\n return scheduler\n\n def _init_logeer(self):\n logger = Logger(os.path.join(self.model_dir, 'logs'))\n return logger\n\n def _get_now(self):\n return str(time.strftime('%Y%m%d-%H:%M', time.gmtime()))\n\n def _to_var(self, x, requires_grad=True):\n if self.args.cuda:\n x = x.cuda()\n return Variable(x, requires_grad=requires_grad)\n\n def _save_model(self, epoch_id, val_tag_loss, train_tag_loss):\n def save_whole_model(_filename):\n self.writer.write(\"Saved Model in {}\\n\".format(_filename))\n torch.save({'extractor': self.extractor.state_dict(),\n 'mlc': self.mlc.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'epoch': epoch_id},\n os.path.join(self.model_dir, \"{}\".format(_filename)))\n\n def save_part_model(_filename, value):\n self.writer.write(\"Saved Model in {}\\n\".format(_filename))\n torch.save({\"model\": value},\n os.path.join(self.model_dir, \"{}\".format(_filename)))\n\n if val_tag_loss < self.min_val_tag_loss:\n file_name = \"val_best_loss.pth.tar\"\n save_whole_model(file_name)\n self.min_val_tag_loss = val_tag_loss\n\n if train_tag_loss < self.min_train_tag_loss:\n file_name = \"train_best_loss.pth.tar\"\n save_whole_model(file_name)\n self.min_train_tag_loss = train_tag_loss\n\n def _log(self,\n train_tags_loss,\n val_tags_loss,\n lr,\n epoch):\n info = {\n 'train tags loss': train_tags_loss,\n 'val tags loss': val_tags_loss,\n 'learning rate': lr\n }\n\n for tag, value in info.items():\n self.logger.scalar_summary(tag, value, epoch + 1)\n\n def _init_logger(self):\n logger = Logger(os.path.join(self.model_dir, 'logs'))\n return logger\n\nclass Trainer(TrainBase):\n def __init__(self, args):\n TrainBase.__init__(self, args)\n self.args = args\n\n def _epoch_train(self):\n tag_loss = 0\n self.extractor.train()\n self.mlc.train()\n self.attention.train()\n\n for i, (images_cc, images_mlo, _, _, label_cc, label_mlo) in enumerate(self.train_data_loader):\n batch_tag_loss = 0\n images_cc = self._to_var(images_cc)\n images_mlo = self._to_var(images_mlo)\n label_cc = self._to_var(label_cc, requires_grad=False)\n\n avg_feature = self.extractor.forward_cat(images_cc, images_mlo)\n tags = self.mlc.forward(avg_feature)\n batch_tag_loss = self.mse_criterion(tags, label_cc).sum()\n\n self.optimizer.zero_grad()\n batch_tag_loss.backward()\n\n if self.args.clip > 0:\n torch.nn.utils.clip_grad_norm(self.extractor.parameters(), self.args.clip)\n torch.nn.utils.clip_grad_norm(self.mlc.parameters(), self.args.clip)\n torch.nn.utils.clip_grad_norm(self.attention.parameters(), self.args.clip)\n self.optimizer.step()\n\n tag_loss += self.args.lambda_tag * batch_tag_loss.data\n return tag_loss\n\n def _epoch_val(self):\n tag_loss = 0\n self.extractor.eval()\n self.mlc.eval()\n self.attention.eval()\n\n for i, (images_cc, images_mlo, _, _, label_cc, label_mlo) in enumerate(self.val_data_loader):\n images_cc = self._to_var(images_cc, requires_grad=False)\n images_mlo = self._to_var(images_mlo, requires_grad=False)\n label_cc = self._to_var(label_cc, requires_grad=False)\n\n avg_feature = self.extractor.forward_cat(images_cc, images_mlo)\n tags = self.mlc.forward(avg_feature)\n batch_tag_loss = self.mse_criterion(tags, label_cc).sum()\n\n tag_loss += self.args.lambda_tag * batch_tag_loss.data\n return tag_loss\n\nif __name__ == '__main__':\n import warnings\n warnings.filterwarnings(\"ignore\")\n parser = argparse.ArgumentParser()\n\n #Data Argument\n parser.add_argument('--patience', type=int, default=20)\n parser.add_argument('--mode', type=str, default='train')\n\n # Path Argument\n parser.add_argument('--image_dir', type=str, default='./data/images',\n help='the path for images')\n parser.add_argument('--train_file_cc', type=str, default='./data/train_data_cc.txt',\n help='the train_cc one hot array')\n parser.add_argument('--train_file_mlo', type=str, default='./data/train_data_mlo.txt',\n help='the train_mlo one hot array')\n parser.add_argument('--val_file_cc', type=str, default='./data/val_data_cc.txt',\n help='the test_cc one hot array')\n parser.add_argument('--val_file_mlo', type=str, default='./data/val_data_mlo.txt',\n help='the test_mlo one hot array')\n\n # Transform Argument\n parser.add_argument('--resize', type=int, default=256,\n help='size for resizing images')\n parser.add_argument('--crop_size', type=int, default=224,\n help='size of randomly cropping images')\n\n # Save/Load model Argument\n parser.add_argument('--model_path', type=str, default='./models',\n help='path for saving trained models')\n parser.add_argument('--load_model_path', type=str, default='',\n help='the path of loaded model')\n parser.add_argument('--saved_model_name', type=str, default='breast_model',\n help='the name of saved model')\n\n # Model Argument\n parser.add_argument('--momentum', type=int, default=0.1)\n\n # Feature Extractor\n parser.add_argument('--visual_model_name', type=str, default='resnet50',\n help='CNN model name')\n parser.add_argument('--pretrained', action='store_true', default=True,\n help='not using pretrained model when training')\n parser.add_argument('--load_visual_model_path', type=str, default='.')\n parser.add_argument('--visual_trained', action='store_true', default=True,\n help='whether train visual extractor or not')\n\n # Attention\n parser.add_argument('--hidden_size', type=int, default=512)\n parser.add_argument('--load_attention_model_path', type=str, default='.')\n parser.add_argument('--att_trained', action='store_true', default=True)\n\n # MLC\n parser.add_argument('--classes', type=int, default=9)\n parser.add_argument('--k', type=int, default=9)\n parser.add_argument('--load_mlc_model_path', type=str, default='.')\n parser.add_argument('--mlc_trained', action='store_true', default=True)\n\n # Training Argument\n parser.add_argument('--batch_size', type=int, default=16)\n parser.add_argument('--learning_rate', type=int, default=0.001)\n parser.add_argument('--epochs', type=int, default=5000)\n parser.add_argument('--clip', type=float, default=-1,\n help='gradient clip, -1 means no clip (default: 0.35)')\n\n # Loss Function\n parser.add_argument('--lambda_tag', type=float, default=10000)\n\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n\n trainer = Trainer(args)\n trainer.train()","sub_path":"train_cat.py","file_name":"train_cat.py","file_ext":"py","file_size_in_byte":15720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"53063213","text":"#partLookup.py\n#contains the functions for querying the database for more information on a part.\n#these function load the part in full and prints them out to the screen. If the notes are included the option to edit them is given.\nfrom heodb import componentFactoryByPartNumber, callEditor, formatTitles\nfrom cmdmenus import calcScreen\nimport os, subprocess\n\ndef notesAppend(notes):\n\tstrInArray = []\n\tstrInArray.append('\\n\\n\\n')\n\trows, columns = calcScreen()\n\tstrInArray.append(' ' + '=' * (int(columns)-6) + ' ')#line across the entire screen\n\tstrInArray.append('\\n\\n')\n\tstrInArray.append(notes)\n\treturn strInArray\n\ndef partLookup(db = None, partNumLong = None, mode = 'FULL'):\n\tcompObj = componentFactoryByPartNumber(db, partNumLong)\n\tcompObj.loadPart()\n\teditorTups = []\n\tfor table in compObj.tables:\n\t\tfor elem in table.elementTable.elements:\n\t\t\teditorTups.append(elem.titlePair)\n\tstrInArray = []\n\tif mode.upper() in ['FULL', 'NOTES']:\n\t\tstrInArray = formatTitles(editorTups)\n\tif mode.upper() in ['FULL', 'VALUES']:\n\t\tstrInArray += notesAppend(compObj.ManuInfo.notes.value)\n\tos.remove(callEditor(['NEW',''.join(strInArray).rstrip()]))\n\ndef partLookupFull(db, partNumLong):\n\tpartLookup(db = db, partNumLong = partNumLong, mode = 'FULL')\n\ndef partLookupNotes(db, partNumLong):\n\tpartLookup(db = db, partNumLong = partNumLong, mode = 'NOTES')\n\ndef partLookupValues(db, partNumLong):\n\tpartLookup(db = db, partNumLong = partNumLong, mode = 'VALUES')\n","sub_path":"heodb/partLookup.py","file_name":"partLookup.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"407842881","text":"import csv\nimport numpy as np\nfrom datetime import datetime\n\ndata = []\nwith open('small_uber_dataset.csv', 'r') as f:\n\tcount = 0\n\tfor line in f:\n\t\tif count == 0:\n\t\t\tcount += 1\n\t\t\tcontinue\n\t\tsplit_line = line.split(',')\n\t\tdate_str, lat_str, lon_str, base_str = split_line\n\t\tsplit_line[0] = datetime.strptime(date_str, '%m/%d/%y %H:%M')\n\t\tsplit_line[1] = float(lat_str)\n\t\tsplit_line[2] = float(lon_str)\n\t\tdata.append(split_line[0:3])\n\nnp_data = np.array(data)\n\n#find max & min latitude & longtitude\nmax_lat = max(max([np_data[:,1]]))\nmin_lat = min(min([np_data[:,1]]))\n\nmax_lon = max(max([np_data[:,2]]))\nmin_lon = min(min([np_data[:,2]]))\n\n#print(max_lat, min_lat, max_lon, min_lon)\n\n#assigns each pickup to the closest point. delta is the distance between points\ndef assign_zones(max_lat, min_lat, max_lon, min_lon, delta, data):\n\tlat_range=np.arange(min_lat, max_lat, delta)\n\tlon_range=np.arange(min_lon, max_lon, delta)\n\n\t#prints the list of points\n\tprint(lat_range)\n\tprint(lon_range)\n\n\t#line[3] and line[4] are the coordinates of closest point\n\tfor line in data:\n\t\tline.append(0)\n\t\tline.append(0)\t\n\t\tfor lat in lat_range:\n\t\t\tif line[1] <= lat + delta/2:\n\t\t\t\tline[3] = lat\n\t\t\t\tbreak\n\t\tfor lon in lon_range:\n\t\t\tif line[2] <= lon + delta/2:\n\t\t\t\tline[4] = lon\n\t\t\t\tbreak\t\n\treturn\n\ndelta=0.01\nassign_zones(max_lat, min_lat, max_lon, min_lon, delta, data)\n#print(data)\n\n\n\n\n\n\n","sub_path":"zones.py","file_name":"zones.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"320396559","text":"import time\nimport functools\n\nimport requests\n\n\nRETRY_SLEEP = 3.0 # seconds\n\n\ndef retry(exc_cls, tries=3, logger=None):\n \"\"\"\n Retry decorator\n :exc_cls: class to follow\n :tries: number of tries\n \"\"\"\n def decoy(f):\n @functools.wraps(f)\n def functor(*args, **kwargs):\n t = tries # copy\n # last one without catching\n while t > 1:\n try:\n return f(*args, **kwargs)\n except exc_cls as ex:\n if logger:\n msg = 'Retry for \"{}\" / attempts: {}'.format(\n f.__name__, (t - 1))\n logger.error(msg)\n logger.exception('retry error')\n t -= 1\n return f(*args, **kwargs)\n return functor\n return decoy\n\n\ndef wrapper(\n context,\n exceptions=None,\n attempts=None,\n):\n \"\"\"\n result = retry.wrapper(\n context={\n 'function': requests.get,\n 'args': ('http://example.com/'),\n 'kwargs': {\n 'auth': AUTH_DATA,\n },\n },\n exceptions=(requests.exceptions.RequestException),\n attempts=30,\n )\n \"\"\"\n #\n DEFAULT_EXCEPTIONS = (Exception,)\n DEFAULT_RETRY_ATTEMPTS = 3\n #\n if exceptions is None:\n exceptions = DEFAULT_EXCEPTIONS\n if attempts is None:\n attempts = DEFAULT_RETRY_ATTEMPTS\n #\n if not context.get('args'):\n context['args'] = []\n if not context.get('kwargs'):\n context['kwargs'] = {}\n #\n for attempt in xrange(attempts):\n try:\n result = context['function'](\n *context['args'], **context['kwargs'])\n break\n except exceptions as ex:\n if attempt >= (attempts - 1):\n raise\n time.sleep(attempt * RETRY_SLEEP)\n continue\n return result\n\n\ndef requests_wrapper(\n context,\n attempts=None,\n sessions=False,\n):\n \"\"\"\n response = retry.requests_wrapper(\n context={\n 'function': 'get',\n 'args': ('http://example.com/'),\n 'kwargs': {\n 'auth': AUTH_DATA,\n },\n },\n attempts=30,\n )\n \"\"\"\n DEFAULT_RETRY_ATTEMPTS = 3\n #\n if attempts is None:\n attempts = DEFAULT_RETRY_ATTEMPTS\n #\n if not context.get('args'):\n context['args'] = []\n if not context.get('kwargs'):\n context['kwargs'] = {}\n #\n if sessions:\n session = requests.session()\n func = getattr(session, context['function'].lower())\n else:\n func = getattr(requests, context['function'].lower())\n #\n for attempt in xrange(attempts):\n try:\n response = func(\n *context['args'], **context['kwargs'])\n if response.ok:\n break\n if response.status_code < 500:\n break\n if attempt >= (attempts - 1):\n break\n try:\n r_data = response.json()\n except ValueError:\n pass\n time.sleep(attempt * RETRY_SLEEP)\n continue\n except requests.exceptions.RequestException:\n if attempt >= (attempts - 1):\n raise\n time.sleep(attempt * RETRY_SLEEP)\n continue\n return response\n","sub_path":"acme/tools/retry.py","file_name":"retry.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"504898444","text":"\"\"\"\nby Quangbd\n\"\"\"\nimport math\nimport heapq # for retrieval topK\nimport numpy as np\n\n_session = None\n_test_ratings = None\n_test_negatives = None\n_K = None\n_prediction = None\n_user_index = None\n_item_index = None\n_cate_index = None\n_cates_movies = None\n\n\ndef evaluate_model(session, test_ratings, test_negatives, top_k, prediction, user_index, item_index, cate_index, cates_movies):\n \"\"\"\n Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation\n Return: score of each test rating.\n \"\"\"\n global _session\n global _test_ratings\n global _test_negatives\n global _K\n global _prediction\n global _user_index\n global _item_index\n global _cate_index\n global _cates_movies\n _session = session\n _test_ratings = test_ratings\n _test_negatives = test_negatives\n _K = top_k\n _prediction = prediction\n _user_index = user_index\n _item_index = item_index\n _cate_index = cate_index\n _cates_movies = cates_movies\n\n hits, ndcgs = [], []\n # Single thread\n for idx in range(len(_test_ratings)):\n hr, ndcg = eval_one_rating(idx, _cates_movies)\n hits.append(hr)\n ndcgs.append(ndcg)\n return hits, ndcgs\n\n\ndef eval_one_rating(idx, cates_movies):\n rating = _test_ratings[idx]\n items = _test_negatives[idx]\n u = rating[0]\n gt_item = rating[1]\n items.append(gt_item)\n # Get prediction scores\n map_item_score = {}\n users = np.full(len(items), u, dtype='int32')\n # items = np.array(items)\n\n users2, items2, cates2 = [], [], []\n for i in range(len(items)):\n if items[i] in cates_movies:\n for j in cates_movies[items[i]]:\n users2.append(users[i])\n items2.append(items[i])\n cates2.append(j)\n else:\n users2.append(users[i])\n items2.append(items[i])\n cates2.append(18)\n predictions = _session.run(_prediction, feed_dict={_user_index: users2, _item_index: items2, _cate_index: cates2})\n for i in range(len(items2)):\n item = items2[i]\n map_item_score[item] = predictions[i]\n # Evaluate top rank list\n rank_list = heapq.nlargest(int(len(items2) / int(_K)), map_item_score, key=map_item_score.get)\n\n hr = get_hit_ratio(rank_list, gt_item)\n ndcg = get_ndcg(rank_list, gt_item)\n\n # print(gt_item)\n # print(rank_list)\n # print(hr)\n # print(ndcg)\n # print('----------------------------')\n return hr, ndcg\n\n\ndef get_hit_ratio(rank_list, gt_item):\n for item in rank_list:\n if item == gt_item:\n return 1.0\n return 0\n\n\ndef get_ndcg(rank_list, gt_item):\n for i in range(len(rank_list)):\n item = rank_list[i]\n if item == gt_item:\n return math.log(2) / math.log(i + 2)\n return 0\n","sub_path":"qevaluate2.py","file_name":"qevaluate2.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260536212","text":"print(\"Введите целое число, заканчивая нажатием Enter; или нажмите просто Enter, чтобы завершить\")\n\ntotal = 0 # Сумма\ncount = 0 # Количество чисел\np1 = [] # Неотсортированный список (для медианы)\np = [] # Отсортированный список (для max/min)\nwhile True:\n line = input(\"Число: \") # Ввод чисел\n if line: # Если есть значение\n try:\n number = int(line) # П��таемся преобразовать значение в число\n except ValueError as err:\n print(err)\n continue\n p1.append(int(number)) # Добавляем число в неотсортированный список\n p.append(int(number)) # Добавляем число в отсортированный список\n c = count # Чтобы не изменение \"c\" не влияло на \"count\"\n while 0 < c < len(p): # Сортировка\n if (p[c]) < (p[c - 1]): # Если число, стоящее справа меньше, чем слева, то..\n a = (p[c - 1]) # В стороннюю переменную записывается значение левого числа\n p[c - 1] = (p[c]) # Значение правого числа присваивается левому\n p[c] = a # Значение левого числа из сторонней переменной присваивается правому (как бы перестановка)\n c -= 1 # Осуществляем уменьшение индекса, чтобы все отсортировать\n total += number\n count += 1\n else:\n break\nif count:\n if count % 2 == 0: # Проверка количества на четность/нечетность для нахождения медианы\n med = (int(p1[count // 2]) + int(p1[(count // 2) - 1])) / 2\n else:\n med = int(p1[(count // 2)])\n print(\"Количество чисел:\", count, \"Сумма:\", total)\n print(\"Среднее арифметическое:\", total / count, \"Медиана: \", med)\n print(\"Минимальное значение: \", p[0], \"Максимальное значение: \", p[count - 1])\n print(p)\n","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"640718944","text":"from stock_tracer import StockTracerService\nfrom stock_tracer.common import transaction, API\nfrom stock_tracer.model import Stock\nfrom stock_tracer.operation.base import Base\n\n@API(StockTracerService, \"add_stock\")\nclass AddStockOperation(Base):\n \"\"\"AddStockOperation\"\"\"\n def __init__(self, exchange, symbol, *args, **kwargs):\n \"\"\"__init__\n\n :param exchange:\n :param symbol:\n :param *args:\n :param **kwargs:\n \"\"\"\n super(AddStockOperation, self).__init__(*args, **kwargs)\n self.exchange = exchange\n self.symbol = symbol\n\n def execute(self):\n \"\"\"execute the operation\"\"\"\n with transaction() as tx:\n stock = Stock(exchange=self.exchange, symbol=self.symbol)\n self.logger.info(\"Adding stock {0}\".format(stock))\n tx.add(stock)\n tx.flush()\n self.reply = str(stock)\n","sub_path":"operation/add_stock_operation.py","file_name":"add_stock_operation.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"383411606","text":"import os\r\nfrom PIL import Image\r\nimport cfg\r\nfrom multiprocessing import Pool\r\n\r\ndef create():\r\n base_dir = r'D:\\数据集\\COCO\\labels\\train2014'\r\n img_basedir = r'E:\\images\\train2014'\r\n lables_dir = r'D:\\数据集\\COCO\\labels\\train2014'\r\n labels = os.listdir(lables_dir)\r\n file = open('label.txt','a')\r\n for label in labels:\r\n img = Image.open(os.path.join(img_basedir,label.replace('txt','jpg')))\r\n img = img.resize((cfg.IMG_WIDTH,cfg.IMG_HEIGHT))\r\n img.save(os.path.join(r'C:\\images',label.replace('txt','jpg')))\r\n print(label)\r\n file.write(label.replace('txt', 'jpg') + ' ')\r\n with open(os.path.join(base_dir,label)) as f:\r\n texts = [line.strip() for line in f.readlines()]\r\n for text in texts:\r\n boxs = text.split(' ')\r\n cls = boxs[0]\r\n x1 = int(cfg.IMG_WIDTH * (float(boxs[1]) - float(boxs[3]) / 2))\r\n y1 = int(cfg.IMG_HEIGHT * (float(boxs[2]) - float(boxs[4]) / 2))\r\n x2 = int(cfg.IMG_WIDTH * (float(boxs[1]) + float(boxs[3]) / 2))\r\n y2 = int(cfg.IMG_HEIGHT * (float(boxs[2]) + float(boxs[4]) / 2))\r\n w1 = int(x2 - x1)\r\n h1 = int(y2 - y1)\r\n cx = int(x1 + w1//2)\r\n cy = int(y1 + h1//2)\r\n file.write(cls+' '+str(cx)+' '+str(cy)+' '+str(w1)+' '+str(h1))\r\n file.write(' ')\r\n file.write('\\n')\r\n file.close()\r\nif __name__ == '__main__':\r\n pool = Pool(50) # 创建25个线程\r\n pool.apply_async(create) # 让每个线程都去执行downloadmovie函数,传递的参数为(i,)\r\n pool.close() # 任务执行完毕以后就关闭线程\r\n pool.join() # 等待线程结束","sub_path":"labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"118972613","text":"import tornado.web\nimport tornado.escape\nimport games.wow\n\n\nclass WorldOfWarcraft(tornado.web.RequestHandler):\n def get(self):\n region = self.get_argument('region')\n realm = self.get_argument('realm')\n name = self.get_argument('name')\n\n character = games.wow.WorldOfWarcraft.get_character(region, realm, name)\n\n self.set_header('Access-Control-Allow-Origin', '*')\n self.write(character)","sub_path":"api/wow.py","file_name":"wow.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"563871873","text":"\nimport json\nfrom gensim import corpora\nfrom gensim import models\nfrom gensim import similarities\nfrom nltk import sent_tokenize, word_tokenize\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport wikipedia\n\ndef preprocess(sentence):\n #print (sentence+' ')\n sentence = sentence.lower()\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(sentence)\n filtered_words = [w for w in tokens if not w in stopwords.words('english')]\n return \" \".join(filtered_words)\nf= open(\"answersim.txt\",\"w+\")\nwith open(\"nfL6.json\") as json_file: \n \n data = json.load(json_file)\n for p in data:\n cat= p['main_category']\n ques= p['question']\n #print(ques)\n \n\n z=p['nbestanswers']\n tokenized_sents = [word_tokenize(sent) for sent in sent_tokenize(ques)]\n dictionary = corpora.Dictionary(tokenized_sents)\n dictionary.save('/tmp/deerwester.dict') \n from sklearn.feature_extraction.text import TfidfTransformer\n tfidf_transformer = TfidfTransformer()\n tfidf_ques = tfidf_transformer.fit_transform(dictionary)\n print (ques)\n for q in z:\n #print(tokenized_sents)\n tokenized_ans = [word_tokenize(sent) for sent in sent_tokenize(q)]\n dictionar = corpora.Dictionary(tokenized_ans)\n \n dictionar.save('/tmp/deerwester.dict') \n #print(tokenized_ans)\n \n #print(dictionary)\n \n #print(tfidf_ques.shape)\n \n #print(dictionar)\n from sklearn.feature_extraction.text import TfidfTransformer\n tfidf_transformer = TfidfTransformer()\n e = len(dictionar)\n if e>1:\n tfidf_ans = tfidf_transformer.fit_transform(dictionar)\n #print(tfidf_ans.shape)\n corpus = [dictionary.doc2bow(text) for text in tokenized_ans]\n lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)\n vec_bow = dictionary.doc2bow(ques.lower().split())\n vec_lsi = lsi[vec_bow] \n #print(vec_lsi)\n #print(corpus)\n index=similarities.Similarity('nfL6',lsi[corpus],len(dictionary))\n sims = index[vec_lsi] \n #print(list(enumerate(sims)))\n sims = sorted(enumerate(sims), key=lambda item: -item[1])\n k=sims[0][0]\n lis = (tokenized_ans[k])\n str1 = ' '.join(lis)\n \n #print(tokenized_ans[k])\n tokenized_sents = [word_tokenize(sent) for sent in sent_tokenize(ques)]\n dictionary = corpora.Dictionary(tokenized_sents)\n dictionary.save('/tmp/deerwester.dict')\n lis = ques\n #print ques\n #str1 = ' '.join(lis)\n strm = preprocess(lis)\n #aug_data= wikipedia.search(strm)\n #dat = wikipedia.page(aug_data)\n #cont = dat.content\n # print (cont)\n# stri=' '.join(aug_data)\n# wiki_data=preprocess(stri)\n \n sty= str1.encode('utf-8')\n f.write(sty)\n print(sty)\n f.write(\"\\n\\n\\n\")\n \n \n # print (cont)\nf.close\n\n","sub_path":"similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411065797","text":"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport os\nimport sys\nimport time\nimport shutil\nimport argparse\nimport ast\nimport logging\nimport numpy as np\nimport paddle.fluid as fluid\n\nfrom models import *\nfrom data.indoor3d_reader import Indoor3DReader\nfrom utils import *\n\nlogging.root.handlers = []\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\nlogger = logging.getLogger(__name__)\n\nnp.random.seed(1024)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"PointNet++ semantic segmentation train script\")\n parser.add_argument(\n '--model',\n type=str,\n default='MSG',\n help='SSG or MSG model to train, default MSG')\n parser.add_argument(\n '--use_gpu',\n type=ast.literal_eval,\n default=True,\n help='default use gpu.')\n parser.add_argument(\n '--batch_size',\n type=int,\n default=1,\n help='evaluation batch size, default 1')\n parser.add_argument(\n '--num_points',\n type=int,\n default=4096,\n help='number of points in a sample, default: 4096')\n parser.add_argument(\n '--num_classes',\n type=int,\n default=13,\n help='number of classes in dataset, default: 13')\n parser.add_argument(\n '--weights',\n type=str,\n default='checkpoints/200',\n help='directory name to save train snapshoot')\n parser.add_argument(\n '--data_dir',\n type=str,\n default='dataset/Indoor3DSemSeg/indoor3d_sem_seg_hdf5_data',\n help='dataset directory')\n parser.add_argument(\n '--log_interval',\n type=int,\n default=100,\n help='mini-batch interval for logging.')\n args = parser.parse_args()\n return args\n\n\ndef eval():\n args = parse_args()\n print_arguments(args)\n # check whether the installed paddle is compiled with GPU\n check_gpu(args.use_gpu)\n\n assert args.model in ['MSG', 'SSG'], \\\n \"--model can only be 'MSG' or 'SSG'\"\n\n # build model\n startup = fluid.Program()\n eval_prog = fluid.Program()\n with fluid.program_guard(eval_prog, startup):\n with fluid.unique_name.guard():\n eval_model = PointNet2SemSegMSG(args.num_classes, args.num_points) \\\n if args.model == 'MSG' else \\\n PointNet2SemSegSSG(args.num_classes, args.num_points)\n eval_model.build_model()\n eval_feeds = eval_model.get_feeds()\n eval_outputs = eval_model.get_outputs()\n eval_loader = eval_model.get_loader()\n eval_prog = eval_prog.clone(True)\n eval_keys, eval_values = parse_outputs(eval_outputs)\n\n place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(startup)\n\n if not os.path.isdir(args.weights):\n assert os.path.exists(\"{}.pdparams\".format(args.weights)), \\\n \"Given resume weight {}.pdparams not exist.\".format(args.weights)\n fluid.load(eval_prog, args.weights, exe)\n\n eval_compile_prog = fluid.compiler.CompiledProgram(eval_prog)\n \n # get reader\n indoor_reader = Indoor3DReader(args.data_dir)\n eval_reader = indoor_reader.get_reader(args.batch_size, args.num_points, mode='test')\n eval_loader.set_sample_list_generator(eval_reader, place)\n\n eval_stat = Stat()\n try:\n eval_loader.start()\n eval_iter = 0\n eval_periods = []\n while True:\n cur_time = time.time()\n eval_outs = exe.run(eval_compile_prog, fetch_list=eval_values)\n period = time.time() - cur_time\n eval_periods.append(period)\n eval_stat.update(eval_keys, eval_outs)\n if eval_iter % args.log_interval == 0:\n log_str = \"\"\n for name, value in zip(eval_keys, eval_outs):\n log_str += \"{}: {:.4f}, \".format(name, np.mean(value))\n logger.info(\"[EVAL] batch {}: {}time: {:.2f}\".format(eval_iter, log_str, period))\n eval_iter += 1\n except fluid.core.EOFException:\n logger.info(\"[EVAL] Eval finished, {}average time: {:.2f}\".format(eval_stat.get_mean_log(), np.mean(eval_periods[1:])))\n finally:\n eval_loader.reset()\n\n\nif __name__ == \"__main__\":\n eval()\n","sub_path":"PaddleCV/3d_vision/PointNet++/eval_seg.py","file_name":"eval_seg.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"347055113","text":"from pyspark import SparkContext \nimport random\nimport itertools as it\nimport datetime\nimport sys\n\ndef UsertoRowMapping(business,user_dict): #convert string ID of user to integer based on row number in characteristic matrix\n rated_users = business[1]\n business_id = business[0]\n new_list = [] #create new list with row numbers for users\n for user in rated_users:\n user2 = user_dict[user]\n new_list.append(user2)\n return (business_id,new_list)\n\ndef HashParameters(n,max_row_number): #generate all hash parameters\n params = []\n k = 0 \n while k < n: #only generate parameters for max number of hash functions\n a = random.randint(0,1000)\n b = random.randint(0,1000)\n params.append([a,b,max_row_number])\n k+=1\n return params\n\ndef GenerateHashFunctions(business,hparams,max_row_number): #apply hash parameters in hash functions and then on user_ids\n row_values = []\n for parameters in hparams:\n a = parameters[0]\n b = parameters[1]\n c = parameters[2]\n business_id = business[0]\n initial_row_numbers = business[1]\n new_row_numbers = []\n for y in initial_row_numbers:\n new_number = ((a*y) + b) % max_row_number\n new_row_numbers.append(new_number)\n min_value = min(new_row_numbers)\n row_values.append(min_value)\n return (business[0],row_values)\n\ndef CreateBands(business,rows,n): #subset signature matrix into sets of size band\n users = business[1]\n business_id = business[0]\n band_size = []\n p = 0 \n while p <= (n - 1):\n a = users[p:p+rows]\n band_size.append(a)\n p = p + rows\n return (business_id,users, band_size)\n\ndef BandManipulation(business): #convert band into key to group later\n\tusers = tuple(business[2])\n\tbusiness_sig = business[1]\n\tbusiness_id = business[0]\n\treturn (users,(business_id,business_sig))\n\ndef RemoveSigMat(candidate): #to clean up data, remove signature matrix and keep business_id\n cands = []\n cand = candidate[1]\n for c in cand: \n business_id = c[0]\n cands.append(business_id)\n return cands\n\ndef Transform(band,business_dict): #clean up data and add original user list to each business pair\n if len(band) == 2:\n pair1 = band[0]\n pair1_data = business_dict[pair1]\n pair2 = band[1]\n pair2_data = business_dict[pair2]\n return ((pair1,pair1_data),(pair2,pair2_data))\n else:\n combos = list(it.combinations(band,2))\n combos_list = []\n for combo in combos:\n pair1 = combo[0]\n pair1_data = business_dict[pair1]\n pair2 = combo[1]\n pair2_data = business_dict[pair2]\n combos_list.append(((pair1,pair1_data),(pair2,pair2_data)))\n return combos_list\n\ndef Jaccard(): #implements jaccard similarity \n\ttrain_RDD = lsh.textFile(input_file)\n\theader = train_RDD.first()\n\ttrain_RDD = train_RDD.filter(lambda x: x!= header) #remove header\n\ttrain_initial = train_RDD.map(lambda x:x.split(','))\n\tuser_as_key = train_initial.map(lambda x:(x[0],x[1]))\n\tbusiness_as_key = train_initial.map(lambda x:(x[1],x[0]))\n\tuser_combos = user_as_key.groupByKey().map(lambda x : (x[0], list(x[1]))).zipWithIndex().map(lambda x: (x[0][0],x[1])) #add row numbers to transform string user_id to number\n\tusers_row_numbers = user_combos.collect()\n\tuser_dict = {} #to map user_id to integer in the future\n\tfor user in users_row_numbers: \n\t user_dict[user[0]] = user[1]\n\tbusiness_combos = business_as_key.groupByKey().map(lambda x : (x[0], list(x[1]))) #group businesses to generate business as key and list of users who rated it as values\n\tbusiness_combos_with_rows = business_combos.map(lambda x: UsertoRowMapping(x,user_dict)) #transform values to integers\n\tbus_comb = business_combos_with_rows.collect()\n\tbusiness_dict = {} \n\tfor bus in bus_comb: \n\t business_dict[bus[0]] = bus[1]\n\tmax_row_number = len(users_row_numbers) #number of bins\n\tn = 60 #number of hash functions\n\tbands = 20\n\trows = 3\n\thparams = HashParameters(n,max_row_number) \n\tsig_mat = business_combos_with_rows.map(lambda x: GenerateHashFunctions(x, hparams,max_row_number))\n\tbanded_sig_mat = sig_mat.map(lambda x: CreateBands(x,rows,n))\n\n\tb1 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][0])) #extract all bands into one rdd for that subset\n\tb2 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][1]))\n\tb3 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][2]))\n\tb4 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][3]))\n\tb5 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][4]))\n\tb6 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][5]))\n\tb7 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][6]))\n\tb8 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][7]))\n\tb9 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][8]))\n\tb10 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][9]))\n\tb11 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][10]))\n\tb12 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][11]))\n\tb13 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][12]))\n\tb14 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][13]))\n\tb15 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][14]))\n\tb16 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][15]))\n\tb17 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][16]))\n\tb18 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][17]))\n\tb19 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][18]))\n\tb20 = banded_sig_mat.map(lambda x:(x[0],x[1],x[2][19]))\n\n\tb1_amended = b1.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2) #group by key and filter out if no businesses align for that band\n\tb2_amended = b2.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb3_amended = b3.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb4_amended = b4.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb5_amended = b5.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb6_amended = b6.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb7_amended = b7.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb8_amended = b8.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb9_amended = b9.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0], list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb10_amended = b10.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb11_amended = b11.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb12_amended = b12.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb13_amended = b13.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb14_amended = b14.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb15_amended = b15.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb16_amended = b16.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb17_amended = b17.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb18_amended = b18.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb19_amended = b19.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\tb20_amended = b20.map(lambda x:BandManipulation(x)).groupByKey().map(lambda x : (x[0],list(x[1]))).filter(lambda x:len(x[1])>=2)\n\n\n\tcandidates = b1_amended.union(b2_amended) #add all the candidate pairs together\n\tcandidates = candidates.union(b3_amended)\n\tcandidates = candidates.union(b4_amended)\n\tcandidates = candidates.union(b5_amended)\n\tcandidates = candidates.union(b6_amended)\n\tcandidates = candidates.union(b7_amended)\n\tcandidates = candidates.union(b8_amended)\n\tcandidates = candidates.union(b9_amended)\n\tcandidates = candidates.union(b10_amended)\n\tcandidates = candidates.union(b11_amended)\n\tcandidates = candidates.union(b12_amended)\n\tcandidates = candidates.union(b13_amended)\n\tcandidates = candidates.union(b14_amended)\n\tcandidates = candidates.union(b15_amended)\n\tcandidates = candidates.union(b16_amended)\n\tcandidates = candidates.union(b17_amended)\n\tcandidates = candidates.union(b18_amended)\n\tcandidates = candidates.union(b19_amended)\n\tcandidates = candidates.union(b20_amended)\n\n\tcandidates_cleaned = candidates.map(lambda x: RemoveSigMat(x)) #remove signature matrix\n\tcandidates_original = candidates_cleaned.map(lambda x: Transform(x,business_dict)) #add original user_id\n\tcandidate_sim = candidates_original.collect()\n\n\tjac_sim = [] #to get jaccard similarities of all the pairs\n\tfor cand in candidate_sim:\n\t if isinstance(cand,tuple):\n\t pair1 = cand[0][0]\n\t pair1_data = set(cand[0][1])\n\t pair2 = cand[1][0]\n\t pair2_data = set(cand[1][1])\n\t jac = ((len(pair1_data&pair2_data)))/float(len(pair1_data|pair2_data))\n\t inp = ((pair1,pair2),jac)\n\t jac_sim.append(inp)\n\t else:\n\t for combo in cand: \n\t pair1 = combo[0][0]\n\t pair1_data = set(combo[0][1])\n\t pair2 = combo[1][0]\n\t pair2_data = set(combo[1][1])\n\t jac = ((len(pair1_data&pair2_data)))/float(len(pair1_data|pair2_data))\n\t inp = ((pair1,pair2),jac)\n\t jac_sim.append(inp)\n\n\tsimilar = {} #filter out those with jaccard less than 0.5\n\tfor pair in jac_sim:\n\t a = pair[0][0]\n\t b = pair[0][1]\n\t jaccard = pair[1]\n\t if(jaccard>=0.5):\n\t if(a Server marks all pulled messages as read - Pehaps\n add a 'pull_unread'\n\n neet to write \n - update_available_nodes\n - update_available_pillars\n'''\n\n\n\n'''\n Network settings \n'''\n# Network static data, changing can ensure\n# incompatibility between seperate networks \n__NETWORK_DATA_PING__ = \"*\"\n\nclass XNMNode(threading.Thread):\n def __init__(self, id, create_node=False, port=None, ip_addr=\"127.0.0.1\", qoverride=None):\n threading.Thread.__init__(self)\n\n # Access to databases\n self.data_access = DataStore()\n\n # If create node flag is raised, create and add to db\n if create_node:\n if port is None:\n cout(\"fail\", \"Port must be given for network\")\n exit(1)\n port = str(port)\n try:\n temp_unused = int(port)\n except ValueError:\n cout(\"fail\", \"Port must be an integer!\")\n exit(1)\n cout(\"yellow\", \"Generating address....\")\n new_address = generate_address(id)\n if new_address[0]:\n new_address = new_address[1]\n else:\n cout(\"fail\", \"Error creating address: \" + new_address[1])\n exit(1)\n self.data_access.add_node_info(\n \"node\", \n new_address, \n ip_addr, \n EnsureST(datetime.now()), \n EnsureST(datetime.now()),\n port\n )\n cout(\"yellow\", \"Generating RSA keys....\")\n self.keys = generate_keys()\n self.data_access.add_keys_info(\"node\", new_address, self.keys[0], self.keys[1])\n\n print(\n ctxt(\"lightgreen\", \"New node created! Address: \"), \n ctxt(\"cyan\", new_address),\n \"@\",\n ctxt(\"yellow\", port)\n )\n self.naddress = new_address\n self.id = id\n return\n self.id = id\n node_info = self.data_access.get_node(\"node\", id)\n\n if len(node_info) > 1:\n cout(\"fail\", \"Error: Node's node table has duplicates..\")\n exit(1)\n if len(node_info) == 0:\n cout(\"fail\", \"No node was found with the given address..\")\n exit(1)\n\n key_info = self.data_access.get_keys(\"node\", id)\n\n if len(key_info) > 1:\n cout(\"fail\", \"Error: Node has more than one key pair..\")\n exit(1)\n if len(key_info) == 0:\n cout(\"fail\", \"Error: Node can't load key pair..\")\n exit(1)\n\n # Node keys'''\n self.public_key = key_info[0][\"public\"]\n self.private_key = key_info[0][\"private\"]\n\n # Set the node settings\n self.ip = node_info[0][\"ip\"]\n self.last_pull = node_info[0][\"last_pull\"]\n self.last_connect = node_info[0][\"last_connect\"]\n\n # Behaviour \n self.supressText = True\n self.signal = True\n self.daemon = True\n\n '''\n Interaction settings\n '''\n # Node serving as current point of contact\n self.current_provider = None\n self.known_pillars = []\n self.currently_available_pillars = []\n self.known_nodes = []\n\n # Messages we want to send to the network\n self.outgoing_messages = Queue()\n\n # Messages we've recieved from other nodes / etc\n if qoverride is None:\n self.received_messages = Queue()\n else:\n self.received_messages = qoverride\n\n '''\n Main node loop\n '''\n def run(self):\n if not self.supressText:\n print(\"Node starting..\")\n\n '''\n Load known pillars, get list of online\n Select random online pillar to join onto network, this becomes self.current_provider\n - If errors occur interacting with current_provider, retest avaiailable pillars, and get new provider\n '''\n\n self.sleep_on_provider_cycle = 1\n self.cycles_since_active_provider = 1\n\n while self.signal:\n \n # If we have a provider, ensure they are online\n if self.current_provider is not None:\n self.sleep_on_provider_cycle = 1\n result = self.perform_ping(self.current_provider[\"ip\"], self.current_provider[\"port\"])\n if not result:\n self.current_provider = None\n \n # If we lost our provider, cycle known pillars and assign providers.\n # once we are attached to an active provider, we will continue updating.\n while self.current_provider is None and self.signal:\n \n # Every 10 failed attempts to contact a provider, add a second\n # onto how long we sleep so we don't over-flood the network\n # Once we exceed a 5-second wait per cycle, reset to 1\n self.cycles_since_active_provider += 1\n if 0 == self.cycles_since_active_provider % 10:\n self.sleep_on_provider_cycle += 1\n if self.sleep_on_provider_cycle > 5:\n self.sleep_on_provider_cycle = 1\n self.cycles_since_active_provider = 1\n\n # Check for online pillars, and try to assign a provider\n self.check_available_pillars()\n self.assign_provider()\n\n # Sleep for the determined amount of time\n sleep(self.sleep_on_provider_cycle)\n \n # We have a provider ? - Update the node!\n if self.current_provider is not None:\n self.update_available_pillars()\n\n # Did we lose the provider while updating? If not, update available nodes\n if self.current_provider is not None:\n self.update_available_nodes()\n\n # Did we lose the provider yet? No? Request messages\n if self.current_provider is not None:\n self.request_messages()\n\n # If we still have the provider, make it provide a \n # receiving end for messages\n if self.current_provider is not None:\n self.push_messages()\n sleep(1)\n\n '''\n End the node, and bring down the thread\n '''\n def kill(self):\n if not self.supressText:\n print(\"\\nPlease wait while node shuts down\")\n # Write updates to db\n self.data_access.update_node_activity(\n \"node\",\n self.id,\n self.ip,\n self.last_pull,\n self.last_connect\n )\n # Kill and join\n self.signal = False\n self.join()\n\n '''\n Pull nodes and return (for interacting with another program)\n '''\n def external_pull_known_nodes(self):\n return self.data_access.get_nodes(\"node\")\n\n '''\n Pull pillars and return (for interacting with another program)\n '''\n def external_pull_known_pillars(self):\n return self.data_access.get_pillars(\"node\")\n\n '''\n Ensure the provider is who we think they are\n '''\n def validate_response(self, response, sender):\n \n key = self.data_access.get_keys(\"node\", sender[\"id\"])\n if len(key) != 1:\n return (False, \"Couldn't load key from sender\")\n\n return verify_data(key[0][\"public\"], response[\"hash\"], response[\"signature\"])\n\n\n '''\n Used to set-up the node to a network initially from \n an external file - Someone using the library\n '''\n def force_register(self, pillar_id, pillar_ip, pillar_port):\n try:\n p = int(pillar_port)\n except ValueError:\n cout(\"fail\", \"Error: Port must be an integer!\")\n return (False, \"Not registered\")\n \n # Add the pillar to the database\n self.data_access.add_pillar_info(\"node\", pillar_ip, pillar_id, pillar_port, EnsureST(datetime.now()))\n\n '''\n Ping a pillar - Attempt 0,5 times\n True - Success\n False - Failure\n '''\n def perform_ping(self, address, port):\n port=int(port)\n for i in range(0, 5):\n result = query(address, port, __NETWORK_DATA_PING__, timeout=2)\n if ValidTCPRespnse(result):\n return True\n return False\n\n '''\n Assign new pillar contact - The \n current interface into the network\n '''\n def assign_provider(self):\n if len(self.currently_available_pillars) == 0:\n return\n self.current_provider = GetRandomChoice(self.currently_available_pillars)\n self.cycles_since_active_provider = 1\n self.sleep_on_provider_cycle = 1\n\n '''\n Generic request\n '''\n def execute_request(self, request, receiver=None):\n if receiver is None:\n receiver = self.current_provider\n #print(\"REQ: \", receiver[\"ip\"], \"@\", receiver[\"port\"])\n port = int(receiver[\"port\"])\n for i in range(0,5):\n result = query(receiver[\"ip\"], port, request)\n #print(result)\n if ValidTCPRespnse(result):\n return(True, result)\n return(False, \"Unable to send request\")\n\n '''\n Ping for available pillars, and ensure registration\n '''\n def check_available_pillars(self):\n # Clear known pillars\n self.currently_available_pillars = []\n\n # Pull list of known pillars from db\n self.known_pillars = self.data_access.get_pillars(\"node\")\n\n # Try pinging all pillars, ones that respond will have a registration\n # reqeust stent to them. If we can register, we add it to available\n for pillar in self.known_pillars:\n if self.perform_ping(pillar[\"ip\"], pillar[\"port\"]):\n\n # Ensure we are registered with the available pillar\n registration = create_message(self.private_key, \"n.register\", self.id, pillar[\"id\"], self.ip + \"<&>\" + self.public_key)\n result_of_execute = self.execute_request(registration, receiver=pillar)\n\n if result_of_execute[0]:\n result = disassemble_message(result_of_execute[1])\n if result[0]:\n if result[1][\"data\"] == \"_MALFORMED_REGISTRATION_\" or result[1][\"data\"] == \"_MALFORMED_REGISTRATION_IP_ERROR_\":\n cout(\"fail\", \"Failure to comply with pillar's registration rules\")\n elif result[1][\"data\"] == \"_REGISTERED_\":\n self.currently_available_pillars.append(pillar)\n else:\n # Ensure they gave us a key that can decode their signed hash\n if verify_data(result[1][\"data\"], result[1][\"hash\"], result[1][\"signature\"]):\n self.data_access.add_keys_info(\"node\", result[1][\"from\"], result[1][\"data\"], \"\")\n self.currently_available_pillars.append(pillar)\n else:\n cout(\"fail\", \"Pillar failed to give valid key\")\n else: \n cout(\"fail\", \"Unknown data response in regards to registration with \" + pillar[\"id\"])\n\n '''\n Request list of available nodes - update db\n '''\n def update_available_nodes(self):\n if self.current_provider is None:\n return\n request = create_message(self.private_key, \"n.nodes\", self.id, self.current_provider[\"id\"], \".\")\n nodes = self.execute_request(request)\n if not nodes[0]:\n cout(\"fail\", nodes[1])\n self.current_provider = None\n return\n nodes = disassemble_message(nodes[1])\n if not nodes[0]:\n cout(\"fail\", \"[node->update_available_nodes] => invalid node data from pillar!\")\n self.current_provider = None\n return\n\n try:\n nodes = loads(nodes[1][\"data\"])\n except:\n cout(\"fail\", \"Unable to loads node data from pillar\")\n self.current_provider = None\n return\n\n # Get currently known nodes\n currently_known_nodes = self.data_access.get_nodes(\"node\")\n\n # Check each incomming node against all known nodes. n2 :(\n for inode in nodes:\n \n incomming_node_exists_in_database = False\n for cnode in currently_known_nodes:\n if cnode[\"uid\"] == inode[\"uid\"]:\n incomming_node_exists_in_database = True\n \n # If the incomming node doesn't exist in the database, add it\n if not incomming_node_exists_in_database:\n self.data_access.add_node_info(\"node\",\n inode[\"uid\"],\n inode[\"ip\"],\n inode[\"last_pull\"],\n inode[\"last_connect\"],\n inode[\"port\"]\n )\n # All incomming nodes checked, new should be added \n return\n\n '''\n Request list of available pillars - update db\n '''\n def update_available_pillars(self):\n request = create_message(self.private_key, \"n.pillars\", self.id, self.current_provider[\"id\"], \".\")\n pillars = self.execute_request(request)\n if not pillars[0]:\n cout(\"fail\", pillars[1])\n self.current_provider = None\n return\n pillars = disassemble_message(pillars[1])\n if not pillars[0]:\n cout(\"fail\", \"[node->update_available_pillars] => invalid node data from pillar!\")\n self.current_provider = None\n return\n\n\n####################################\n######\n # print(\"Got the following pillar data from the provider: \\n\", pillars[1][\"data\"])\n######\n####################################\n\n try:\n pillars = loads(pillars[1][\"data\"])\n except:\n cout(\"fail\", \"Unable to loads pillars data from pillar\")\n self.current_provider = None\n return\n\n # Get currently known pillars\n currently_known_pillars = self.data_access.get_pillars(\"node\")\n\n # Check each incomming node against all known nodes. n2 :(\n for ipillar in pillars:\n \n incomming_pillar_exists_in_database = False\n for cpillar in currently_known_pillars:\n if cpillar[\"id\"] == ipillar[\"id\"]:\n incomming_pillar_exists_in_database = True\n \n # If the incomming node doesn't exist in the database, add it\n if not incomming_pillar_exists_in_database:\n self.data_access.add_pillar_info(\"node\",\n ipillar[\"ip\"],\n ipillar[\"id\"],\n ipillar[\"port\"],\n ipillar[\"last_online\"]\n )\n # All incomming nodes checked, new should be added \n\n return\n\n '''\n Request list of new messages\n '''\n def request_messages(self):\n if self.current_provider is None:\n return\n request = create_message(self.private_key, \"n.check\", self.id, self.current_provider[\"id\"], \".\")\n messages = self.execute_request(request)\n\n if not messages[0]:\n cout(\"fail\", messages[1])\n self.current_provider = None\n return\n\n #debug_TODO(\"request_messages\", \"Add the new messages to current node messages! \")\n\n messages = disassemble_message(messages[1])\n\n # Invalid message data \n if not messages[0]:\n cout(\"fail\", \"[node] => invalid message data from pillar!\")\n self.current_provider = None\n return\n\n # print(\"Messages : \", messages)\n \n # No messages, no problem\n if messages[1][\"data\"] == \"NO_MESSAGES\":\n return\n else:\n # Data will be a stringed list of dictionaries\n try:\n current_messages = loads(messages[1][\"data\"])\n except:\n cout(\"fail\", \"Unable to loads messages from pillar\")\n self.current_provider = None\n return\n\n # Add each dict into the queue\n for cmsg in current_messages:\n self.received_messages.put(cmsg)\n\n '''\n Send a message to a node on the network\n - Take in - assemble message and add to outgoing queue\n '''\n def send_message(self, receiver, data, previous_hash=None):\n message = create_message(self.private_key, \"n.submit\", self.id, receiver, data, previous_hash=previous_hash)\n self.outgoing_messages.put(message)\n\n '''\n Send a message to a network of nodes\n - Take in - assemble message and add to outgoing queue\n '''\n def broadcast_message(self, receivers, data, previous_hash=None):\n message = create_message(self.private_key, \"n.broadcast\", self.id, receivers, data, previous_hash=previous_hash)\n self.outgoing_messages.put(message)\n\n '''\n Send all queued messages\n '''\n def push_messages(self):\n \n # Ensure again that we have a provider\n if self.current_provider is not None:\n \n try_flag = True\n failed_attempts = 0\n original_size = self.outgoing_messages.qsize()\n \n # If we do, and we have messages, send them out\n while not self.outgoing_messages.empty() and try_flag:\n \n success = False\n message = self.outgoing_messages.get()\n\n\n##################################\n debug_MESSAGE(\"push_messages\", \"sending messages...\")\n debug_MESSAGE(\"push_messages\", \"Queue Size: \" + str(original_size))\n \n print(\"TRYING TO SEND: \", message)\n##################################\n\n for i in range(0, 5):\n \n result = query(self.current_provider[\"ip\"], int(self.current_provider[\"port\"]), message)\n success = ValidTCPRespnse(result)\n\n if success:\n self.outgoing_messages.task_done()\n break\n\n if success == False:\n self.outgoing_messages.task_done()\n self.outgoing_messages.put(message)\n failed_attempts += 1\n \n # Ensure that if pillar goes down while sending messages, we only fail\n # so-many times before stopping, allowing us to cycle providers\n if failed_attempts > (original_size + (original_size/2) ):\n try_flag = False","sub_path":"Python/DigitalHills-Archive/obelisk-cab/connector/xnmlib/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":19559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569131860","text":"import logging\nimport argparse\n\nfrom fxplc import FXPLC, RegisterDef, RegisterType, NoResponseError, ResponseMalformedError, NotSupportedCommandError\n\n\ndef main():\n argparser = argparse.ArgumentParser()\n argparser.add_argument('-d', '--debug', action='store_true')\n argparser.add_argument('-p', '--path', type=str, metavar=\"PATH\", required=True)\n\n op_sp = argparser.add_subparsers(title=\"operation\")\n\n sp = op_sp.add_parser('read')\n sp.set_defaults(cmd=\"read\")\n sp.add_argument(\"register\", type=str, nargs='*')\n\n sp = op_sp.add_parser('read_bit')\n sp.set_defaults(cmd=\"read_bit\")\n sp.add_argument(\"register\")\n\n sp = op_sp.add_parser('read_bytes')\n sp.set_defaults(cmd=\"read_bytes\")\n sp.add_argument(\"register\")\n sp.add_argument(\"count\", type=int, default=1, nargs='?')\n\n sp = op_sp.add_parser('read_counter')\n sp.set_defaults(cmd=\"read_counter\")\n sp.add_argument(\"register\")\n\n sp = op_sp.add_parser('write_bit')\n sp.set_defaults(cmd=\"write_bit\")\n sp.add_argument(\"register\")\n sp.add_argument(\"value\", type=str, choices=[\"1\", \"0\", \"on\", \"off\", \"yes\", \"no\", \"true\", \"false\"])\n\n args = argparser.parse_args()\n\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO, format=\"[%(asctime)s] [%(name)s] %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\n\n fx = FXPLC(args.path)\n\n try:\n if args.cmd == \"read\":\n for r in args.register:\n reg = RegisterDef.parse(r)\n bit = fx.read_bit(r)\n bit_str = \"on\" if bit else \"off\"\n if reg.type == RegisterType.Timer:\n cnt = fx.read_counter(r)\n print(f\"{reg} = {bit_str}, counter: {cnt}\")\n else:\n print(f\"{reg} = {bit_str}\")\n\n if args.cmd == \"read_bit\":\n d = fx.read_bit(args.register)\n print(d)\n\n if args.cmd == \"write_bit\":\n on = args.value in (\"1\", \"on\", \"yes\", \"true\")\n fx.write_bit(args.register, on)\n\n if args.cmd == \"read_bytes\":\n d = fx.read_bytes(args.register, args.count)\n print(d)\n\n if args.cmd == \"read_counter\":\n d = fx.read_counter(args.register)\n print(d)\n except NotSupportedCommandError:\n print(\"[ERROR] Command not supported\")\n exit(1)\n except NoResponseError:\n print(\"[ERROR] No response\")\n exit(1)\n except ResponseMalformedError:\n print(\"[ERROR] Response malformed\")\n exit(1)\n\n\nmain()\n","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"289292588","text":"#!/usr/bin/env python3\n\na_values = [3.3, 3.51, 3.75]\nstart_values = { 'x' : 0.228734167, 'x_hat' : 0.228734168 }\nsteps = 1000\n\ndef iteration_step(x,a):\n return a * x * (1-x)\n\n\nfor key, val in start_values.items():\n for a in a_values:\n x = val\n logfilename = key + '_' + str(a) + '.log'\n with open(logfilename,'w') as logfile:\n for i in range(steps):\n logfile.write('{}\\t{}\\n'.format(i,x))\n x = iteration_step(x,a)\n \n","sub_path":"ass36_helper.py","file_name":"ass36_helper.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"466711105","text":"import os\nimport glob\n\nimport numpy as np\nimport skimage.io as sio\nimport skimage.util as cropper\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import random_split\nimport torchvision.transforms as transforms\nfrom skimage.transform import resize\nfrom PIL import Image \n\nclass BrainDataset(torch.utils.data.Dataset):\n def __init__(self, data_path, folders, labels, frames, transform=None):\n\n\n\n self.data_path = data_path\n self.labels = labels\n self.folders = folders\n self.transform = transform\n self.frames = frames\n\n def __len__(self):\n \"Denotes the total number of samples\"\n return len(self.folders)\n\n def read_images(self, path, selected_folder, use_transform):\n X = []\n for i in self.frames:\n image = Image.open(os.path.join(path, selected_folder, 'image-slice{:03d}.jpg'.format(i))).convert('L')\n if use_transform is not None:\n image = use_transform(image)\n\n X.append(image.squeeze_(0))\n X = torch.stack(X, dim=0)\n X = X.permute(1, 2,0)\n\n # for i in range(X.shape[0]):\n # for j in range(X.shape[1]):\n # for z in range(X.shape[2]):\n # if X[i][j][z] != 0.0:\n # print(X[i][j][z])\n return X\n\n def __getitem__(self, index):\n \"Generates one sample of data\"\n # Select sample\n folder = self.folders[index]\n\n # Load data\n X = self.read_images(self.data_path, folder, self.transform).unsqueeze_(0) # (input) spatial images\n y = torch.LongTensor([self.labels[index]])\n X = torch.squeeze(X)\n return X, y","sub_path":"Glimpse_Network/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"649992263","text":"# -*- coding: utf-8 -*-\n# Get these from the API, e.g.\n# https://apisandbox.openbankproject.com/consumer-registration\n\n# OAuth data for for anil.x.d.n@example.com\nCLIENT_KEY = '2na3xkig0kehgywgrdxq5aquodtq320oep2sq3zz'\nCLIENT_SECRET = 'vvil0c1tkqcvzqmo22lzjzaopt1gpy3zix3vlikd'\n\n# API host to talk to\nAPI_HOST = 'https://apisandbox.openbankproject.com'\n\n# Our bank we want to work with\nOUR_BANK = 'obp-bankx-n'\n\n# Our counterpart we might want to send money\nCOUNTERPART_BANK = 'obp-bankx-n'\nCOUNTERPART_ACCOUNT_ID = '6106c2f8-5870-402a-9889-8d8ae7e6fbe7'\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92691313","text":"import pyfirmata #facilitates communication with Arduino https://pyfirmata.readthedocs.io/en/latest/\nimport time #Required to use delay functions\nfrom progress.bar import ChargingBar\n\n\"\"\" \nTester function to check the circuit assembly. The input port must be a\nstring specifying the computer port your circuit is plugged into,\n'COMxx'. While this tester function can point you to possible errors, it\nis not meant to be an all-rounded benchmark routine. It works by\nconnecting to arduino and setting the PWM output to the maximal value\nand comparing the initial signal with the heated up signal. If the latter\nis smaller, than R2 can heat up fine. I recommend using a thermocouple to\nmeasure the temperature of R2 while heating up, so you can manually check\nif it works. Always start from room temperature (~22 C). Make sure that\nthe temperature of R2 is steady at the start. The signal difference\nshould be around 0.3 V if R2 is not insulated and started from 22 C. If\nthe signal difference is equal to zero than the problem is in the\nreadout. \"\"\"\n\n\nportName = 'COM3'\narduino = pyfirmata.Arduino(portName)\ncnd = False\nit = pyfirmata.util.Iterator(arduino)\nit.start()\n\ntry:\n #Connect to arduino hardware on input port.\n input_pin = arduino.get_pin('a:0:i') #analog, input to the computer\n output_pin = arduino.get_pin('d:3:o') #digital, output from the computer\n output_pin.mode = 3 #PWM mode\n # Set the output of digital pin 3 to high (5V), this will heat R2\n output_pin.write(1) #writes to D3\n time.sleep(5)\n #set the output of digital pin 3 to low (0V)\n output_pin.write(0)\n print('Connection established. \\n')\n cnd = True\n\nexcept:\n print('Can not connect. \\n')\n print('Possible errors: \\n')\n print('- the arduino is not installed properly. \\n Ensure that the proper driver is installede and the correct .inf file is uploaded to ruggeduino. \\n')\n print('- the connection between arduino and the computer is faulty. \\n Try restarting the computer or a different USB cable \\n')\n\nif cnd:\n try:\n # Read the voltage to the analog pin 0 when the digital pin 3 signal\n # is low\n signal0 = input_pin.read()\n # Set digital pin 3 to high to heat up resistor R2\n output_pin.write(1)\n bar = ChargingBar('Heating up...', max=20)\n signal1 = 0\n for i in range(20):\n time.sleep(3)\n bar.next()\n if i == 20:\n # Read the voltage to analog pin 0 after R2 has heated\n signal1 = input_pin.read()\n print((signal0 - signal1),' V.')\n # Set the ouptut of digital pin 3 to low (0V)\n output_pin.write(0)\n bar.finish()\n\n if signal0 - signal1 > 0.2:\n print('R2 heating is functional. \\n')\n print('Circuit assembled successfully. \\n')\n elif signal0 - signal1 > 0.1 and signal0 - signal1 <= 0.2:\n print('R2 heating is functional but the power output is too low. \\n')\n print('Are you running the test at steady room temperature?. \\n')\n else:\n print('R2 heating does not work, the environment is extreme \\n or the signals can not be read out. \\n')\n\n except:\n print('Can not write the PWM output properly. \\n')\n print('Possible problems: \\n')\n print('- the circuit is not assembled correctly. Check the schematic. \\n')\n print('- your R2 resistor is too hot. Wait for it to cool down. \\n')\n\n\nwait = input(\"Press Enter to Continue\")","sub_path":"past code (not used)/Project 1 Tester.py","file_name":"Project 1 Tester.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"431344834","text":"\n#https://leetcode.com/problems/binary-search/\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n\n \n bottom = 0\n top = len(nums)-1\n output = 0\n while bottom <= top:\n \n \n mid = int((bottom+top)/2)\n \n if top - bottom <= 1 and target!= nums[mid] and target != nums[top] and target != nums[bottom]:\n return -1\n \n if target == nums[output]:\n return output\n \n \n if target < nums[mid]:\n top = mid-1\n elif target > nums[mid]:\n bottom = mid+1\n else:\n output = mid\n\n \n","sub_path":"Leetcode/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"360167086","text":"\"\"\"wcz_boke URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom blog_admin import views as admin_views\nfrom blog_user import views as user_views\nfrom blog_block import views as block_views\nfrom blog_subject import views as subject_views\nfrom blog_message import views as message_views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', user_views.index, name='home'),\n path('useradd/', user_views.useradd, name='useradd'),\n path('yzm/', user_views.yzm, name='yzm'),\n path('userlogin/', user_views.userlogin, name='userlogin'),\n path('userlogout/', user_views.userlogout, name='userlogout'),\n path('create/subject/',user_views.create_subject,name=\"create_subject\"), \n path('check/username//',user_views.check_username,name=\"check_username\"), \n path('send/email/',user_views.send_email,name=\"send_email\"), \n path('check/email//',user_views.check_email,name=\"check_email\"), \n path('check/yzm//',user_views.check_yzm,name=\"check_yzm\"), \n path('reset/userpwd/',user_views.reset_pwd,name=\"reset_pwd\"), \n path('set/userpwd//',user_views.set_pwd,name=\"set_pwd\"), \n path('date/save/',user_views.save_date,name=\"save_date\"), \n path('subject/type/',user_views.subject_type,name=\"subject_type\"), \n path('add/subject/',subject_views.subjectadd,name=\"subjectadd\"),\n path('show/subject//',subject_views.show_subject,name=\"show_subject\"),\n path('add/message/',message_views.messageadd,name=\"messageadd\"),\n path('del/message///',message_views.messagedel,name=\"messagedel\"),\n path('user/search/',user_views.user_search,name=\"user_search\"),\n path('nav/',user_views.nav_bar,name=\"nav_bar\"),\n path('save/img/',subject_views.img_save,name=\"img_save\"),\n path('newest_subject/',user_views.newest_subject,name=\"newest_subject\"),\n path('show_subject/message//',message_views.show_message,name=\"show_message\"),\n path('recordmy/',admin_views.record_myself,name=\"recordmy\"),\n path('myself/',admin_views.myself,name=\"myself\"),\n path('study_back/',user_views.study_back,name=\"study_back\"),\n path('music/',user_views.music,name=\"music\"),\n path('usermes/',user_views.usermes,name=\"usermes\"),\n path('show/usermes/',message_views.show_usermes,name=\"show_usermes\"),\n path('show/myself/',admin_views.show_record_myself,name=\"show_record_myself\"),\n path('bottomlist/',user_views.lastjz,name=\"lastjz\"),\n]\n","sub_path":"Zk_Blog/wcz_boke/wcz_boke/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"561198934","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom ..models import *\nfrom ..forms.field_edit import UserProfileForm\nfrom django.core.cache import cache\nfrom django.views.decorators.cache import cache_page, cache_control\n\n# Create your views here.\n@cache_page(60 * 15, key_prefix=\"home\")\n@cache_control(max_age=3600)\ndef home(request):\n\tprofile = UserProfile.objects.first()\n\tcache_profile = load_from_cache(name=profile.name)\n\tcontext = {'profile': cache_profile}\n\n\treturn render(request, 'user_profile/profile.html', context)\n\n@cache_page(60 * 15, key_prefix=\"profile\")\n@cache_control(max_age=3600)\ndef profile(request, profile_id):\n\tcache_profile = load_from_cache(name=profile_id)\n\n\tcontext = {'profile': cache_profile}\n\t\n\treturn render(request, 'user_profile/profile.html', context)\n\n\n@cache_page(60 * 15, key_prefix=\"edit_profile\")\n@cache_control(max_age=3600)\ndef edit_profile(request, profile, target_field):\n\tprofile = load_from_cache(name=profile)\n\n\tform = UserProfileForm(instance=profile)\n\t\n\t# context = {'value': getattr(profile, target_field)}\n\n\tif request.method == 'POST':\n\t\tform = UserProfileForm(request.POST, request.FILES, instance=profile)\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\n\tcontext = {'form': form}\n\n\treturn render(request, 'user_profile/field_edit.html', context)\n\n\ndef load_from_cache(name):\n\tcache_profile = cache.get('userprofile-%s' % (name), None)\n\n\tif not cache_profile:\n\t\tcache_profile = UserProfile.objects.get(name=name)\n\t\tcache.set('userprofile-%s' % (name), cache_profile)\n\t\n\treturn cache_profile\n\n\n# class CacheProfile(UserProfile):\n# \tdef get_queryset(self):\n# \t\treturn super(CacheProfile, self).local_from_cache().select_related()\n\n# \tdef load_from_cache(self, queryset):\n# \t\tcache_obj = cache.get('%s-%s' % (self.model.__name__.lower(), self.kwargs['namee']), None)\n\n# \t\tif not cache_obj:\n# \t\t\tcache_obj = super(CacheProfile, self).get_object(queryset)\n# \t\t\tcache.set('%s-%s' % (self.model.__name__.lower(), self.kwargs['name']), cache_obj)\n\t\t\n# \t\treturn cache_obj","sub_path":"user_profile/views/user_profile.py","file_name":"user_profile.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"594845278","text":"\"\"\"\nInterface for the library pycddlib to\ndefine user-friendly constraints.\nThis class is used to represent the user preferences :math:`w`.\n\"\"\"\n\nimport cdd\nfrom source import Utils\nfrom fractions import Fraction\nimport numpy as np\nimport time\nimport sys\nimport copy\n\n\nclass Polytope:\n \"\"\"\n Class used to represents a convex p-dimensional polytope :math:`W` where each vector\n :math:`w = (w_1, ..., w_p)\\in W` is such that\n :math:`w_i \\geq 0, w_i \\leq 1` and :math:`w_1 + ... + w_n = 1`.\n\n This class is an interface to interact with the library pycddlib defining constraints\n with a user-friendly syntax.\n \n \n :ivar vars: List of strings: id used to represent the dimensions of the polytope.\n :ivar n_vars: Integer: number of dimension of the polytope.\n :ivar formatted_constraints: List of string: formatted constraints defining the polytope.\n :ivar extreme_points: List of p-dimensional float: extreme points of the polytope considering the\n formatted constraints.\n\n \"\"\"\n\n def __init__(self, vars, frac=True, epi_var=''):\n \"\"\"\n Initialize polytope as the set W of vectors w such that\n :math:`w_i \\geq 0, w_i \\leq 1` and :math:`w_1 + ... + w_n = 1`.\n\n :param vars: list of string: List of id used to represent the dimensions of the polytope.\n :param frac: boolean: If True, the coefficients of the formatted constraints must be\n fractions. This increase the precision of the polytope representation and avoid numerical errors.\n :param epi_var: string: Name of variable used to represent the epigraph value\n\n \"\"\"\n\n #constraints vars[i] >= 0\n formatted_constraints = []\n for j in range(len(vars)):\n if vars[j] == epi_var: continue\n l_constraints = ''\n for i in range(len(vars)):\n if i != j:\n l_constraints += ' 0 ' + vars[i]\n else:\n l_constraints += ' 1 ' + vars[i]\n if i < len(vars) - 1:\n l_constraints += ' + '\n else:\n l_constraints += ' >= 0'\n formatted_constraints.append(l_constraints)\n\n # constraint sum_i vars[i] = 1 expresssed as sum_i vars[i] >= 1 and sum_i vars[i] <= 1\n sum_constraint1 = ''\n sum_constraint2 = ''\n for i in range(len(vars)):\n if vars[i] == epi_var: continue\n sum_constraint1 += ' 1 ' + vars[i]\n sum_constraint2 += ' 1 ' + vars[i]\n if i < len(vars) - 1:\n sum_constraint1 += ' + '\n sum_constraint2 += ' + '\n else:\n sum_constraint1 += ' >= 1'\n sum_constraint2 += ' <= 1'\n formatted_constraints.append(sum_constraint1)\n formatted_constraints.append(sum_constraint2)\n\n\n\n self.vars = vars\n self.n_vars = len(vars)\n self.formatted_constraints = formatted_constraints\n self.__matrix = Polytope.__get_matrix_from_constraints(formatted_constraints, vars, frac)\n self.__cdd_polytope = Polytope.__get_cdd_polytope_from_matrix(self.__matrix, frac)\n self.extreme_points = Polytope.__get_extreme_points_from_cdd_polytope(self.__cdd_polytope)\n self.frac = frac\n\n\n def add_formatted_constraints(self, formatted_constraints):\n \"\"\"\n Method to add a constraints to the polytope.\n\n :param formatted_constraints: list of string. Each string represents a constraint;\n constraint format: \\\\number \\\\space \\\\var1 \\\\space + ... + \\\\space \\\\number \\\\space \\\\varp \\\\space {<=, >=} \\\\number.\n Example: '2.0 w1 + 0 w2 <= 1'\n\n \"\"\"\n for constraint in formatted_constraints:\n self.formatted_constraints.append(constraint)\n M = Polytope.__get_matrix_from_constraints(formatted_constraints, self.vars, self.frac)\n self.__matrix = np.vstack((self.__matrix, M))\n self.__cdd_polytope = Polytope.__get_cdd_polytope_from_matrix(self.__matrix, self.frac)\n self.extreme_points = Polytope.__get_extreme_points_from_cdd_polytope(self.__cdd_polytope)\n\n @staticmethod\n def get_formatted_constraint_from_vectors(vars, vals, sign, constant):\n \"\"\"\n Method to generate a formatted constraint.\n\n :param vars: list of string: Id of variables.\n :param vals: list of float: Coefficients of variables.\n :param sign: string: '<=' or '>='.\n :param constant: float: Constant value\n :return: string: Formatted constraint.\n\n \"\"\"\n if len(vars) != len(vals):\n sys.exit('different length vars and vals')\n c = ''\n first = True\n for i in range(len(vars)):\n if first:\n first = False\n else:\n c += ' + '\n c +='%f %s' % (vals[i], vars[i])\n c+= ' %s %f' % (sign, constant)\n return c\n\n @staticmethod\n def __get_extreme_points_from_constraints(formatted_constraints, vars, frac=False):\n\n M = Polytope.__get_matrix_from_constraints(formatted_constraints, vars, frac)\n #print(M)\n\n start_time = time.time()\n poly = Polytope.__get_cdd_polytope_from_matrix(M, frac)\n extreme_points = Polytope.__get_UL_bounds_matrix_constraints(poly)\n\n tot_time = time.time() - start_time\n #print('time to compute top points of A: %.4f; n extreme points: %i' % (tot_time, len(extreme_points)))\n\n if len(extreme_points) == 0:\n print('Null set of extreme points')\n sys.exit()\n return extreme_points\n\n @staticmethod\n def __get_matrix_from_constraints(formatted_constraints, vars, frac=False):\n\n # For a polyhedron described as P = {x | A x <= b}, the H-representation is the matrix [b -A].\n\n n_vars = len(vars)\n\n #creating 0 matrix: [b -A];\n #rows: n formatted_constraints\n #cols: constant term + n vars = n+1\n matrix = [[0 for j in range(1 + n_vars)] for i in range(len(formatted_constraints))]\n\n\n for i in range(len(formatted_constraints)):\n constraint = formatted_constraints[i]\n if '<=' in constraint:\n split_elmnt = '<='\n sign = 1\n elif '>=' in constraint:\n split_elmnt = '>='\n sign = -1\n else:\n continue\n\n #print(constraint)\n if frac:\n costant_term = Fraction(constraint.split(split_elmnt)[1].strip())\n else:\n costant_term = float(constraint.split(split_elmnt)[1].strip())\n matrix[i][0] = sign*costant_term\n\n constraint_elms = list(constraint.split(split_elmnt)[0].strip().split('+'))\n constraint_elms = [x.strip() for x in constraint_elms]\n for element in constraint_elms:\n var_name = element.split(' ')[1].strip()\n if frac:\n coefficient = Fraction(element.split(' ')[0].strip())\n else:\n coefficient = float(element.split(' ')[0].strip())\n\n j = Utils.index_containing_substring(vars, var_name) + 1\n matrix[i][j] = -sign*coefficient\n\n return matrix\n\n @staticmethod\n def __get_cdd_polytope_from_matrix(M, frac=False):\n if frac:\n mat = cdd.Matrix(M, number_type='fraction')\n else:\n mat = cdd.Matrix(M, number_type='float')\n mat.rep_type = cdd.RepType.INEQUALITY\n poly = cdd.Polyhedron(mat)\n return poly\n\n @staticmethod\n def __get_cdd_polytope_from_constraints(formatted_constraints, vars, frac=False):\n M = Polytope.__get_matrix_from_constraints(formatted_constraints, vars, frac)\n return Polytope.__get_cdd_polytope_from_matrix(M, frac)\n\n @staticmethod\n def __get_extreme_points_from_cdd_polytope(polytope):\n # get_generators return a matrix;\n # a row represents an extreme point:\n # the extreme point start at index 1\n ext_points = polytope.get_generators()\n ext_points_list = set([])\n for point in ext_points:\n if point[0] == 0:\n continue\n ext_point = point[1:]\n ext_points_list.add(ext_point)\n return(ext_points_list)\n\n @staticmethod\n def __get_UL_bounds_matrix_constraints_and_sum_constraint(l_constraints, u_constraints, vars, frac=False):\n\n # For a polyhedron described as P = {x | A x <= b}, the H-representation is the matrix [b -A].\n M1 = Polytope.__get_UL_bounds_matrix_constraints(vars, l_constraints, u_constraints, frac)\n M2 = Polytope.__get_sum_constraint(vars)\n M = np.vstack((M1, M2))\n return M\n\n @staticmethod\n def __get_UL_bounds_matrix_constraints(l_constraints, u_constraints, frac=False):\n # For a polyhedron described as P = {x | A x <= b}, the H-representation is the matrix [b -A].\n\n n_vars = len(l_constraints)\n\n #creating 0 matrix: [b -A];\n #rows: n upperbound constraint, n lowerbound constraint = 2n\n #cols: constant term + n vars = n+1\n\n matrix = []\n\n for i in range(n_vars):\n # [b -A]\n matrix_row_l = [0] * (1 + n_vars)\n matrix_row_u = [0] * (1 + n_vars)\n\n # setting b value; note that we need to change the sign of the lower bound coefficent\n if frac:\n matrix_row_l[0] = -Fraction(l_constraints[i])\n matrix_row_u[0] = Fraction(u_constraints[i])\n else:\n matrix_row_l[0] = -l_constraints[i]\n matrix_row_u[0] = u_constraints[i]\n\n # setting scalar coefficient -a_i of current var i to 1;\n # note that we need to change the sign of the lower bound coefficent\n matrix_row_l[i + 1] = -(-1)\n matrix_row_u[i + 1] = -1\n\n matrix.append(matrix_row_l)\n matrix.append(matrix_row_u)\n\n\n return matrix\n\n @staticmethod\n def __get_sum_constraint(vars):\n\n n_vars = len(vars)\n\n #sum constraints\n\n #setting scalar coefficient to 1 for all variables; note -A and we need to change the sign of the lower bound coefficent\n sum_constraint_l = [-(-1)] * (1 + n_vars)\n sum_constraint_u = [-1] * (1 + n_vars)\n\n\n #setting constant term b; note that we need to change the sign of the lower bound coefficent\n sum_constraint_l[0] = -1\n sum_constraint_u[0] = 1\n\n matrix = []\n\n matrix.append(sum_constraint_l)\n matrix.append(sum_constraint_u)\n\n return matrix\n\n @staticmethod\n def __add_formatted_constraints_to_matrix(M, formatted_constraints, vars, frac=False):\n Mc = copy.deepcopy(M)\n return np.vstack((Mc, Polytope.__get_matrix_from_constraints(formatted_constraints, vars, frac)))\n","sub_path":"source/Polytope.py","file_name":"Polytope.py","file_ext":"py","file_size_in_byte":10894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"279528953","text":"import os\nimport json\nimport logging\nimport requests\nimport re\n\nfrom hamcrest import *\nfrom nose.tools import *\nfrom nose.plugins.attrib import attr\nfrom nose_parameterized import parameterized, param\n\nfrom linkedlist import *\n\n@parameterized([\n (ListNode(5, None), ListNode(1, ListNode(2, ListNode(3, ListNode(4, None)))))\n])\ndef test_mergeTwoLists(l1, l2):\n l3 = mergeTwoLists(l1, l2)\n\n\ndef test_reorderList():\n l1 = ListNode(1, ListNode(2, ListNode(3, ListNode(4, None))))\n l3 = reorderList(l1)\n print(l3)\n\ndef test_reverseList():\n l1 = ListNode(1, ListNode(2, ListNode(3, ListNode(4, None))))\n l3 = reverseList(l1)\n print(l3)\n\ndef test_reorderList1():\n l1 = ListNode(1, None)\n l3 = reorderList(l1)\n print(l3)\n\ndef test_reorderListq():\n l1 = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5, None)))))\n l3 = reorderList(l1)\n print(l3)\n\ndef test_reverseBetween():\n l1 = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5, None)))))\n l3 = reverseBetween(l1, 2, 4)\n print(l3)\n\ndef test_mergeKLists():\n l1 = ListNode(1, None)\n l3 = mergeKLists([l1])\n print(l3)\n\ndef test_mergeKLists2():\n l1 = ListNode(1, ListNode(2, ListNode(3, None)))\n l2 = ListNode(4, ListNode(5, ListNode(6, None)))\n l3 = mergeKLists([l1,l2])\n print(l3)\n\ndef test_reverseKGroup():\n l1 = ListNode(1, None)\n l2 = ListNode(4, ListNode(5, ListNode(6, None)))\n l3 = reverseKGroup(l1, 2)\n print(l3)\n\ndef test_reverseKGroup2():\n l1 = ListNode(1, None)\n l2 = ListNode(4, ListNode(5, ListNode(6, None)))\n l3 = reverseKGroup(l2, 2)\n print(l3)\n\ndef test_reverseKGroup3():\n l1 = ListNode(1, None)\n l2 = ListNode(4, ListNode(5, ListNode(6, ListNode(7, None))))\n l3 = reverseKGroup(l2, 2)\n\ndef test_insertionSortList():\n l2 = ListNode(3, ListNode(4, ListNode(1, None)))\n l3 = insertionSortList(l2) \n print(l3)\n\ndef test_insertionSortList2():\n l2 = ListNode(6, ListNode(5, ListNode(4, ListNode(3, ListNode(2, ListNode(1, None))))))\n l3 = insertionSortList(l2) \n print(l3)\n","sub_path":"python/ll_test.py","file_name":"ll_test.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"329489487","text":"import organism\nimport pygame\nimport copy\nimport hitbox\nimport global_vars\n\nclass Player(organism.Organism):\n\tdef __init__(self, character_num, joystick):\n\t\tsuper(Player,self).__init__()\n\n\t\tself.base_image=pygame.image.load('Images/Player.png').convert()\n\t\tself.base_image.set_colorkey((0,0,0))\n\n\t\tif character_num==0:\n\t\t\tstaff=pygame.image.load('Images/Green.png').convert()\n\t\telif character_num==1:\n\t\t\tstaff=pygame.image.load('Images/Red.png').convert()\n\t\telif character_num==2:\n\t\t\tstaff=pygame.image.load('Images/Blue.png').convert()\n\t\telse:\n\t\t\tstaff=pygame.image.load('Images/Purple.png').convert()\n\n\t\tstaff.set_colorkey((0,0,0))\n\n\t\tself.hat_image=copy.copy(self.base_image)\n\t\that=pygame.image.load('Images/Hat.png').convert()\n\t\that.set_colorkey((0,0,0))\n\t\tself.hat_image.blit(hat,(0,1))\n\n\t\tself.rest_image=copy.copy(self.base_image)\n\t\tself.rest_image.blit(staff,(15,0))\n\n\t\tself.rest_hat_image=copy.copy(self.hat_image)\n\t\tself.rest_hat_image.blit(staff,(15,0))\n\n\t\tself.image=self.base_image\n\n\t\tself.melee_image=pygame.image.load('Images/Melee.png').convert()\n\t\tself.melee_image.set_colorkey((0,0,0))\n\t\tself.melee_image.blit(staff,(0,5))\n\t\n\t\tself.blast_image=pygame.image.load('Images/Blast.png').convert()\n\t\tself.blast_image.set_colorkey((0,0,0))\n\t\tself.blast_image.blit(staff, (4,1))\n\n\t\tself.health=3\n\t\tself.special_num=character_num\n\t\tself.joystick=joystick\n\t\tself.joystick.init()\n\t\tself.rect=self.image.get_rect()\n\t\tself.orientation=0\n\t\tself.attacking=0\n\t\tself.hat=character_num==0\n\n\tdef update_img(self):\n\t\tif self.health>0:\n\t\t\tif self.attacking:\n\t\t\t\tif self.hat:\n\t\t\t\t\tself.image=pygame.transform.rotate(self.hat_image,self.orientation-90)\n\t\t\t\telse:\n\t\t\t\t\tself.image=pygame.transform.rotate(self.base_image,self.orientation-90)\n\t\t\telse:\n\t\t\t\tif self.hat:\n\t\t\t\t\tself.image=pygame.transform.rotate(self.rest_hat_image,self.orientation-90)\n\t\t\t\telse:\n\t\t\t\t\tself.image=pygame.transform.rotate(self.rest_image,self.orientation-90)\n\t\telse:\n\t\t\tpass\n\t\t\t\n\tdef move(self):\n\t\txAxis=self.joystick.get_axis(0)\n\t\tyAxis=self.joystick.get_axis(1)\n\t\tif xAxis<-.25:\n\t\t\tself.rect=self.rect.move(-1,0)\n\t\telif xAxis>.25:\n\t\t\tself.rect=self.rect.move(1,0)\n\t\tif yAxis<-.25:\n\t\t\tself.rect=self.rect.move(0,-1)\n\t\telif yAxis>.25:\n\t\t\tself.rect=self.rect.move(0,1)\n\t\tif self.health>1 and not self.attacking:\n\t\t\tif abs(xAxis)>abs(yAxis):\n\t\t\t\tif xAxis<-.75:\n\t\t\t\t\tself.rect=self.rect.move(-1,0)\n\t\t\t\t\tself.orientation=180\n\t\t\t\telif xAxis>.75:\n\t\t\t\t\tself.rect=self.rect.move(1,0)\n\t\t\t\t\tself.orientation=0\n\t\t\telse:\n\t\t\t\tif yAxis<-.75:\n\t\t\t\t\tself.rect=self.rect.move(0,-1)\n\t\t\t\t\tself.orientation=90\n\t\t\t\telif yAxis>.75:\n\t\t\t\t\tself.rect=self.rect.move(0,1)\n\t\t\t\t\tself.orientation=270\n\t\tself.rect=self.rect.clamp(pygame.Rect(32,32,256,192))\n\n\tdef attack(self):\n\t\tif self.health>0:\n\t\t\tif self.joystick.get_button(0):\n\t\t\t\tself.attacking=15\n\t\t\t\t\n\t\t\t\tpos=self.rect.center\n\n\t\t\t\tif self.orientation==0:\n\t\t\t\t\tpos=(pos[0]+26,pos[1]-8)\n\t\t\t\telif self.orientation==90:\n\t\t\t\t\tpos=(pos[0],pos[1]-19)\n\t\t\t\telif self.orientation==180:\n\t\t\t\t\tpos=(pos[0]-11,pos[1]-8)\n\t\t\t\telse:\n\t\t\t\t\tpos=(pos[0],pos[1]+19)\n\n\t\t\t\tglobal_vars.add_hitbox(hitbox.Hitbox(self.melee_image, 0, 5, self.melee_poison, self.melee_strength, self.melee_stun, self.orientation, pos))\n\n\t\t\telif self.joystick.get_button(1):\n\t\t\t\tself.attacking=30\n\n\t\t\t\tglobal_vars.add_hitbox(hitbox.Hitbox(self.blast_image, self.blast_speed, 240, self.blast_poison, self.blast_strength, self.blast_stun, self.orientation, self.rect.center))\n\n\tdef update(self):\n\t\tif self.joystick.get_button(9)==True:\n\t\t\tself.health=0\n\t\tif self.stun==0:\n\t\t\tself.move()\n\t\t\tif not self.attacking:\n\t\t\t\tself.attack()\n\t\t\telse:\n\t\t\t\tself.attacking-=1\n\t\tif self.poison and self.clock%60:\n\t\t\tself.health-=1\n\t\t\tself.poison-=1\n\t\tif self.health<0:\n\t\t\tself.health=0\n\t\tself.update_img()\n","sub_path":"Sorcery/Backup/backup_player.py","file_name":"backup_player.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"572359172","text":"import logging\n\nfrom django.core.management import BaseCommand\n\nfrom feedpocket.feeds.models import Feed\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n can_import_settings = True\n\n pairs = set()\n\n def handle(self, *args, **options):\n self.find_duplicates(list(Feed.objects.all().order_by('sync_frequency', 'url', 'title')))\n self.find_duplicates(list(Feed.objects.all().order_by('sync_frequency', 'title')))\n self.find_duplicates(list(Feed.objects.all().order_by('title')))\n for feed1, feed2 in self.pairs:\n keep = self.duplicate_question(feed1, feed2)\n if keep == '1':\n self.merge(feed1, feed2)\n elif keep == '2':\n self.merge(feed2, feed1)\n else:\n self.stdout.write('Keeping both :)')\n\n def find_duplicates(self, feeds):\n for i in range(len(feeds) - 1):\n if feeds[i].url.startswith(feeds[i + 1].url) or \\\n feeds[i + 1].url.startswith(feeds[i].url) or \\\n feeds[i].title == feeds[i + 1].title and \\\n feeds[i].title != '-':\n if feeds[i].id > feeds[i + 1].id:\n self.pairs.add((feeds[i], feeds[i + 1]))\n else:\n self.pairs.add((feeds[i + 1], feeds[i]))\n\n def duplicate_question(self, feed1, feed2):\n self.stdout.write('1: {f.title} - {f.url} ({count})'.format(\n f=feed1,\n count=feed1.subscriptions.count()\n ))\n self.stdout.write('2: {f.title} - {f.url} ({count})'.format(\n f=feed2,\n count=feed2.subscriptions.count()\n ))\n return input('Those seems to be the same, which should I keep(0 means both)? ')\n\n def merge(self, keep, remove):\n self.stdout.write('Merging {} into {}'.format(remove.url, keep.url))\n self.stdout.write('Moving {} subscriptions'.format(remove.subscriptions.count()))\n remove.subscriptions.update(feed_id=keep.id)\n remove.delete()\n self.stdout.write('Done')\n","sub_path":"feedpocket/feeds/management/commands/find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"362187030","text":"# @Author: Li Yuan Rong\n# @Date: 2021-06-19 08:52:02\n# @Last Modified by: Li Yuan Rong\n# @Last Modified time: 2021-07-08 18:32:49\n#!/usr/bin/python\n\n\nimport socket\nimport os\n\nlocalIP = \"0.0.0.0\"\nlocalPort = 20001\nbufferSize = 1024\n\n# Create a datagram socket\nUDPServerSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n# Bind to address and ip\nUDPServerSocket.bind((localIP, localPort))\n\nprint(\"UDP server up and listening\")\n\n# Listen for incoming datagrams\nwhile(True):\n bytesAddressPair = UDPServerSocket.recvfrom(bufferSize)\n message = bytesAddressPair[0]\n clientMsg = message.decode(\"utf-8\")\n keyword = clientMsg.split()\n\n with open(\"/etc/hosts\", \"r+\") as f:\n lines = f.readlines()\n f.seek(0)\n for line in lines:\n if keyword[2] not in line:\n f.write(line)\n f.write(clientMsg)\n f.truncate()\n \n os.system('sudo service dnsmasq restart')\n # Sending a reply to client\n #UDPServerSocket.sendto(bytesToSend, address)\n","sub_path":"udpSvr.py","file_name":"udpSvr.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"213876611","text":"import math\nimport re\n\n\ndef splitSpecial(word):\n return [char for char in word]\n\n\ndef intToHexWith0(n):\n binTemp = bin(int(n, 16))\n binTemp = int(binTemp, 2)\n binTemp = format(binTemp, '#016b')\n binTemp = binTemp[2:]\n\n return binTemp\n\n\ndef binaryStringToHex(hexa):\n binary_string = hexa\n\n return '%0*X' % ((len(binary_string) + 3) // 4, int(binary_string, 2))\n\n\ndef isMultipleof16(n):\n while (n > 0):\n n = n - 16\n if (n == 0):\n return 1\n return 0\n\n\ndef toBin(x):\n return toBin(x // 2) + [x % 2] if x > 1 else [x]\n\n\ndef checkIpv4Format(ip, maskOrIp):\n ipSplit = ip.split(\".\")\n count = len(ipSplit)\n\n errorMessage = \"IP erroné\"\n if (maskOrIp == \"masque\"):\n errorMessage = \"Masque erroné\"\n if (count != 4):\n raise Exception(errorMessage)\n for i in range(len(ipSplit)):\n\n try:\n ipSplitToInt = int(ipSplit[i])\n except:\n raise Exception(errorMessage)\n finally:\n if ipSplitToInt < 0 or ipSplitToInt > 255:\n raise Exception(errorMessage)\n\n\ndef checkPrefix(prefix, type):\n prefixInt = 0\n try:\n prefixInt = int(prefix)\n except:\n raise Exception(\"Prefixe erroné\")\n finally:\n if type == \"IPv4\":\n if prefixInt < 1 or prefixInt > 32:\n raise Exception(\"Prefixe erroné\")\n else:\n if prefixInt < 1 or prefixInt > 128:\n raise Exception(\"Prefixe erroné\")\n\n\ndef getClass(ip):\n ipSplit = ip.split(\".\")\n ipSplitToInt = int(ipSplit[0])\n\n if 1 <= ipSplitToInt <= 126:\n return \"A\"\n if 128 <= ipSplitToInt <= 191:\n return \"B\"\n if 192 <= ipSplitToInt <= 223:\n return \"C\"\n else:\n raise Exception(\"IP erroné\")\n\n\ndef getMask(ip):\n if (getClass(ip) == \"A\"):\n return \"255.0.0.0\"\n if (getClass(ip) == \"B\"):\n return \"255.255.0.0\"\n if (getClass(ip) == \"C\"):\n return \"255.255.255.0\"\n\n\ndef getNetworkAddress(ip, mask):\n networkAddress = \"\"\n ipSplit = ip.split(\".\")\n maskSplit = mask.split(\".\")\n\n for i in range(len(ipSplit)):\n ipSplitInt = int(ipSplit[i])\n maskSplitInt = int(maskSplit[i])\n\n if (i == 0):\n networkAddress = str(ipSplitInt & maskSplitInt)\n else:\n networkAddress = networkAddress + \".\" + str(ipSplitInt & maskSplitInt)\n\n return networkAddress\n\n\ndef onesComplement(n):\n if n == 0:\n return 255\n number_of_bits = (int)(math.floor(math.log(n) / math.log(2))) + 1\n return ((1 << number_of_bits) - 1) ^ n\n\n\ndef getBroadcatAddress(ip, mask):\n broadcast = \"\"\n ipSplit = ip.split(\".\")\n maskSplit = mask.split(\".\")\n\n for i in range(len(ipSplit)):\n if i == 0:\n broadcast = str(onesComplement(int(maskSplit[i])) | int(ipSplit[i]))\n else:\n broadcast = broadcast + \".\" + str(onesComplement(int(maskSplit[i])) | int(ipSplit[i]))\n\n return broadcast\n\n\ndef getLastOrder(ip, mask):\n lastOrder = str(int(getBroadcatAddress(ip, mask).split(\".\")[0])) + \".\" + str(\n int(getBroadcatAddress(ip, mask).split(\".\")[1])) + \".\" + str(\n int(getBroadcatAddress(ip, mask).split(\".\")[2])) + \".\" + str(\n int(getBroadcatAddress(ip, mask).split(\".\")[3]) - 1)\n return lastOrder\n\n\ndef getFirstOrder(ip, mask):\n firstOrder = str(int(getNetworkAddress(ip, mask).split(\".\")[0])) + \".\" + str(\n int(getNetworkAddress(ip, mask).split(\".\")[1])) + \".\" + str(\n int(getNetworkAddress(ip, mask).split(\".\")[2])) + \".\" + str(int(getNetworkAddress(ip, mask).split(\".\")[3]) + 1)\n return firstOrder\n\n\ndef getHostPartIndex(ip):\n if (getClass(ip) == 'A'):\n return 1\n if (getClass(ip) == 'B'):\n return 2\n if (getClass(ip) == 'C'):\n return 3\n\n\ndef hostBit(ip):\n return (4 - getHostPartIndex(ip)) * 8\n\n\ndef getAddressNumber(ip):\n numberOf = pow(2, hostBit(ip)) - 2\n return numberOf\n\n\ndef decimalToSubnet(n):\n if n == 1:\n return 128\n if n == 2:\n return 192\n if n == 3:\n return 224\n if n == 4:\n return 240\n if n == 5:\n return 248\n if n == 6:\n return 252\n if n == 7:\n return 254\n if n == 8:\n return 255\n\n\ndef getMaskSpecial(prefix):\n if prefix < 8:\n return decimalToSubnet(prefix) + \".0.0.0\"\n\n subnetMask = \"\"\n i = 0\n while (prefix >= 8):\n prefix = prefix - 8\n i = i + 1\n\n for index in range(i):\n initial = \"255\"\n if (index == 0):\n subnetMask = initial\n else:\n subnetMask = subnetMask + \".\" + initial\n\n subnetMask = subnetMask + \".\" + str(decimalToSubnet(prefix))\n\n while (len(subnetMask.split(\".\")) != 4):\n subnetMask = subnetMask + \".0\"\n\n return subnetMask\n\n\ndef hostBitSpecial(prefix):\n return 32 - prefix\n\n\ndef getAddressNumberSpecial(prefix):\n numberOf = pow(2, hostBitSpecial(prefix)) - 2\n return numberOf\n\n\ndef checkIpv6Format(ip):\n ipSplit = ip.split(\":\")\n count = len(ipSplit)\n\n if (count != 8):\n raise Exception(\"IP erroné\")\n for i in range(count):\n try:\n hex(int(\"0x\" + (ipSplit[i]), 16))\n except:\n raise Exception(\"IP erroné\")\n finally:\n if (len(splitSpecial(ipSplit[i])) != 4):\n raise Exception(\"IP erroné\")\n\n\ndef compressedIpv6(ip):\n removedExtraZeros = ip.replace(\"0000\", \"*\")\n removedExtraZeros = re.sub(\":0+\", \":\", removedExtraZeros)\n removedExtraZeros = re.sub(\":\\\\*:\\\\*(:\\\\*)+:\", \"::\", removedExtraZeros)\n removedExtraZeros = re.sub(\"::\\\\*\", \"::\", removedExtraZeros)\n removedExtraZeros = removedExtraZeros.replace(\"*\", \"0\")\n return removedExtraZeros\n\n\ndef mergeSplitHex(hex):\n ipv6 = \"\"\n for i in range(len(hex)):\n if ((isMultipleof16(i + 1) == 1) and i != (len(hex) - 1)):\n ipv6 = ipv6 + hex[i] + \":\"\n else:\n ipv6 = ipv6 + hex[i]\n\n return ipv6\n\n\ndef binaryStringToHexSpecial(hexa):\n hexaSplit = hexa.split(\":\")\n hexadecimal = \"\"\n\n for i in range(len(hexaSplit)):\n if (i == 0):\n hexadecimal = binaryStringToHex(hexaSplit[i])\n else:\n hexadecimal = hexadecimal + \":\" + binaryStringToHex(hexaSplit[i])\n\n return hexadecimal\n\n\ndef networkAddress(ip, prefix):\n ipSplit = ip.split(\":\")\n binaryIp = \"\"\n binaryPrefixWithoutDelimiter = \"\"\n networkAddress = \"\"\n\n for i in range(len(ipSplit)):\n binTemp = intToHexWith0(ipSplit[i])\n\n binTemp = binTemp.zfill(16)\n if (i == 0):\n binaryIp = binTemp\n else:\n binaryIp = binaryIp + binTemp\n\n binarySplit = splitSpecial(binaryIp)\n\n for j in range(len(binarySplit)):\n if (j > (prefix - 1)):\n binarySplit[j] = '0'\n networkAddress = binaryStringToHexSpecial(mergeSplitHex(binarySplit))\n networkAddress = compressedIpv6(networkAddress)\n return networkAddress\n","sub_path":"Metier.py","file_name":"Metier.py","file_ext":"py","file_size_in_byte":6944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"472469281","text":"import logging\nfrom abc import ABC, abstractmethod\nimport os.path\nimport os\nimport pandas as pd\n\nclass AbstractDataLoader(ABC):\n\n def __init__(self):\n super().__init__()\n\n @abstractmethod\n def load_data(self, filename):\n logging.info('Checking file exists.')\n\n if not os.path.isfile(filename):\n logging.error('File does not exist')\n raise Exception(\"File does not exist\")\n else:\n logging.info('Found file: ' + filename)\n\nclass FileDataLoader(AbstractDataLoader):\n\n # Initialization\n def __init__(self, filename: str):\n super().__init__()\n logging.info('Initializing Data Loading')\n self.filename = filename\n \n def check_file_exists(self):\n if not os.path.isfile(self.filename):\n logging.error('File does not exist')\n raise Exception(\"File \\\"{}\\\" does not exist\".format(self.filename))\n else:\n logging.info('Found file: ' + self.filename)\n\n # Load data from file and return data\n def load_data(self):\n # Check file exists\n logging.info('Checking file exists.')\n self.check_file_exists()\n\n # Load data\n logging.info('Loading data using pandas')\n df = pd.read_csv(self.filename)\n\n return df\n\n","sub_path":"customer_classification/util/data_loaders.py","file_name":"data_loaders.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"292686037","text":"#Command Prompt\nimport os\nwhile True:\n try:\n user = input(os.getcwd()+'>>')\n if user == 'exit':\n break\n if user[0:2] == 'cd':\n os.chdir(user[3:])\n continue\n os.system(user+' & pause')\n except:\n print('Didnae work mate')\n","sub_path":"CMDbutPython.py","file_name":"CMDbutPython.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"629124790","text":"import re\nimport urllib.request\n\ndef imageCrawler(url, toPath):\n headers = {\n \"User-Agent\": \"Mozilla/5.0(iPad;U;CPUOS4_3_3likeMacOSX;en-us)AppleWebKit/533.17.9(KHTML,likeGecko)Version/5.0.2Mobile/8J2Safari/6533.18.5\"\n }\n\n req = urllib.request.Request(url, headers=headers)\n response = urllib.request.urlopen(req)\n\n # HtmlStr = response.read().decode(\"utf-8\")\n\n HtmlStr = response.read()\n with open(r\"/home/intfreedom/PycharmProjects/identify/000/day19/0.Crawler_study/yihaodian.html\", \"wb\") as f:\n f.write(HtmlStr)\n\n\n pat = r''\n re_image = re.compile(pat, re.S)\n imagesList = re_image.findall(HtmlStr)\n print(imagesList)\n print(len(imagesList))\n print(imagesList[0])\n\nurl = \"http://search.yhd.com/c9719-0-0\"\n\ntoPath = r\"/home/intfreedom/PycharmProjects/identify/000/day19/0.Crawler_study/image\"\n\nimageCrawler(url, toPath)","sub_path":"identify/000/day19/0.Crawler_study/Crawler_girl_picture.py","file_name":"Crawler_girl_picture.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"81560889","text":"import os\nimport tempfile\nimport unittest\n\nimport ijson\nimport mock\n\nfrom lint4jsondb import \\\n Lint4JsonCompilationDb, LintExecutor, \\\n JsonDbEntry, Invocation, BaseVisitor\n\n\nclass Lint4JsonCompilationDbUnitTest(unittest.TestCase):\n def setUp(self):\n self._json_tested = None\n\n def tearDown(self):\n if self._json_tested is not None and os.path.exists(self._json_tested):\n os.remove(self._json_tested)\n\n def __create_temp_json(self, content):\n with tempfile.TemporaryFile('wb', delete=False) as f:\n self._json_tested = f.name\n f.write(content)\n\n def test_00_coverage_for_repr(self):\n e = JsonDbEntry()\n e.file = \"\"\n e.directory = \"\"\n e.invocation = Invocation()\n e.invocation.includes = ['i1', 'i2']\n e.invocation.defines = ['d1', 'd2']\n\n as_string = str(e)\n\n self.assertIn(\":\", as_string)\n self.assertIn(\"\", as_string)\n self.assertIn(\"includes: i1\\ti2\", as_string)\n self.assertIn(\"defines: d1\\td2\", as_string)\n\n def test_01_json_db_does_not_exist(self):\n with self.assertRaises(Exception) as cm:\n Lint4JsonCompilationDb('unknown.json')\n\n self.assertIn(\"unknown.json\", str(cm.exception))\n\n def test_02a_broken_json_is_reported(self):\n self.__create_temp_json(b'broken')\n\n with self.assertRaises(ijson.JSONError):\n Lint4JsonCompilationDb(self._json_tested)\n\n def test_02b_not_having_an_array_does_not_create_an_entry(self):\n self.__create_temp_json(b'{}')\n\n db = Lint4JsonCompilationDb(self._json_tested)\n self.assertEqual(len(db.items), 0)\n\n def test_02c_having_an_empty_array_does_not_create_an_entry(self):\n self.__create_temp_json(b'[]')\n\n db = Lint4JsonCompilationDb(self._json_tested)\n self.assertEqual(len(db.items), 0)\n\n def test_02d_having_an_incomplete_object_fails_for_missing_token(self):\n self.__create_temp_json(b'[{\"key\":\"value\"}]')\n\n with self.assertRaises(AssertionError) as cm:\n Lint4JsonCompilationDb(self._json_tested)\n\n self.assertIn(\"Need to have at least one token\", str(cm.exception))\n\n def test_03a_source_trail_style_json_db(self):\n self.__create_temp_json(\n b'[{'\n b'\"directory\": \"F:/VS2017/working\",'\n b'\"command\": \"clang-tool -fms-extensions'\n b' -isystem \\\\\"F:/VS2017/working/CMake\\\\\"'\n b' -isystem \\\\\"C:/Qt/5.10.0/msvc2017_64/include\\\\\"'\n b' -D _DEBUG -D _MT -D _DLL -D WIN32 -D _WINDOWS\", '\n b'\"file\": \"F:/VS2017/working/mocs_compilation.cpp\"}]')\n\n db = Lint4JsonCompilationDb(self._json_tested)\n\n self.assertEqual(len(db.items), 1)\n self.assertEqual(db.items[0].directory,\n \"F:/VS2017/working\")\n self.assertEqual(db.items[0].file,\n \"F:/VS2017/working/mocs_compilation.cpp\")\n invocation = db.items[0].invocation\n self.assertListEqual(invocation.defines,\n ['_DEBUG', '_MT', '_DLL', 'WIN32'])\n self.assertListEqual(invocation.includes,\n ['F:/VS2017/working/CMake',\n 'C:/Qt/5.10.0/msvc2017_64/include'])\n\n def test_03b_cmake_style_json_db(self):\n self.__create_temp_json(\n b'[{'\n b'\"directory\": \"/home/user/build/login\",'\n b'\"command\": \"/opt/clang/bin/clang++ '\n b' -DQT_CORE_LIB -DQT_GUI_LIB -DQT_NO_DEBUG -DQT_WIDGETS_LIB'\n b' -I/home/user/code/login/'\n b' -Ilogin/login_autogen/include'\n b' -isystem /opt/Qt/5.11.0/gcc_64/include'\n b' -isystem /opt/Qt/5.11.0/gcc_64/include/QtWidgets'\n b' -Weverything -fPIC -std=gnu++14'\n b' -o login/CMakeFiles/login.dir/LoginDialog.cpp.o'\n b' -c /home/user/code/login/LoginDialog.cpp\",'\n b'\"file\": \"/home/user/code/login/LoginDialog.cpp\"}]')\n\n db = Lint4JsonCompilationDb(self._json_tested)\n self.assertEqual(len(db.items), 1)\n self.assertEqual(db.items[0].directory,\n \"/home/user/build/login\")\n self.assertEqual(db.items[0].file,\n \"/home/user/code/login/LoginDialog.cpp\")\n invocation = db.items[0].invocation\n self.assertListEqual(invocation.defines,\n ['QT_CORE_LIB', 'QT_GUI_LIB',\n 'QT_NO_DEBUG', 'QT_WIDGETS_LIB'])\n self.assertListEqual(invocation.includes,\n ['/home/user/code/login/',\n 'login/login_autogen/include',\n '/opt/Qt/5.11.0/gcc_64/include',\n '/opt/Qt/5.11.0/gcc_64/include/QtWidgets'\n ])\n\n def test_03c_qbs_created_for_msvc_json_db(self):\n self.__create_temp_json(\n b'[{\"arguments\":[\"C:/Program Files (x86)/Microsoft Visual '\n b'Studio/2017/Professional/VC/Tools/MSVC/14.13.26128/bin/'\n b'HostX64/x64/cl.exe\",\"/nologo\",\"/c\",\"/EHsc\",\"/Od\",\"/Zi\",\"/MDd\",'\n b'\"/IC:\\\\\\\\Qt\\\\\\\\5.11.0\\\\\\\\msvc2017_64\\\\\\\\include\",'\n b'\"/IC:\\\\\\\\Qt\\\\\\\\5.11.0\\\\\\\\msvc2017_64\\\\\\\\include\\\\\\\\QtCore\",'\n b'\"/DUNICODE\",\"/D_UNICODE\",\"/DWIN32\",\"/DQT_CORE_LIB\",'\n b'\"/DQT_GUI_LIB\",\"/DWINVER=0x0502\",'\n b'\"/FoG:\\\\\\\\qzipreader\\\\\\\\default\\\\\\\\qzipreader.ffaa043c\\\\\\\\3a52ce'\n b'd4d9\\\\\\\\main.cpp.obj\",'\n b'\"G:\\\\\\\\qzipreader\\\\\\\\main.cpp\",\"/TP\",\"/FS\",'\n b'\"/Zm200\"],'\n b'\"directory\":\"G:/qzipreader/default/'\n b'qzipreader.ffaa043c\",'\n b'\"file\":\"G:/qzipreader/main.cpp\"}]'\n )\n\n db = Lint4JsonCompilationDb(self._json_tested)\n self.assertEqual(len(db.items), 1)\n self.assertEqual(db.items[0].directory,\n \"G:/qzipreader/default/qzipreader.ffaa043c\")\n self.assertEqual(db.items[0].file,\n \"G:/qzipreader/main.cpp\")\n invocation = db.items[0].invocation\n self.assertListEqual(invocation.defines,\n ['UNICODE', '_UNICODE', 'WIN32', 'QT_CORE_LIB',\n 'QT_GUI_LIB', 'WINVER=0x0502'])\n self.assertListEqual(invocation.includes,\n [r'C:\\Qt\\5.11.0\\msvc2017_64\\include',\n r'C:\\Qt\\5.11.0\\msvc2017_64\\include\\QtCore'])\n\n def test_04_ensure_BaseVisitor_never_matches(self):\n b = BaseVisitor()\n\n with self.assertRaises(NotImplementedError) as cm:\n b.matches(\"\")\n\n self.assertIn(\"BaseVisitor can not match\", str(cm.exception))\n\n\nclass LintExecutorUnitTest(unittest.TestCase):\n\n @mock.patch('os.path.exists')\n @mock.patch('os.makedirs')\n @mock.patch('subprocess.call')\n def test_invocation(self, mock_call, mock_os_path_exists, mock_os_makedirs):\n mock_os_path_exists.return_value = False\n\n lint = LintExecutor(\"\", \"\", [\"o1\", \"o2\"])\n\n item_to_process = JsonDbEntry()\n item_to_process.file = \"\"\n item_to_process.directory = \"\"\n item_to_process.invocation = Invocation()\n item_to_process.invocation.defines = [\"d1\", \"d2\"]\n item_to_process.invocation.includes = [\"i1\", \"i2\"]\n\n lint.execute(item_to_process)\n\n self.assertEqual(mock_os_makedirs.call_count, 1)\n self.assertEqual(mock_os_makedirs.call_args[0][0], \"\")\n\n self.assertEqual(mock_call.call_count, 1)\n args = mock_call.call_args[0][0]\n\n # ensure lint is executed in the build directory\n self.assertDictEqual(mock_call.call_args[1], {'cwd': ''})\n\n # ensure lint is called\n self.assertEqual(args.pop(0), os.path.join(\"\", \"\"))\n # ensure lint suppresses the banner line\n self.assertEqual(args.pop(0), '-b')\n # ensure lint's \"lnt\" directory was added\n self.assertEqual(args.pop(0), '-i\"/lnt\"')\n # ensure all other options are passed as-is\n self.assertEqual(args.pop(0), \"o1\")\n self.assertEqual(args.pop(0), \"o2\")\n # ensure defines are passed as defines\n self.assertEqual(args.pop(0), \"-dd1\")\n self.assertEqual(args.pop(0), \"-dd2\")\n # ensure include paths a re passed quoted\n self.assertEqual(args.pop(0), '-i\"i1\"')\n self.assertEqual(args.pop(0), '-i\"i2\"')\n # ensure that the file to check is passed finally\n self.assertEqual(args.pop(0), \"\")\n","sub_path":"test_lint4jsondb.py","file_name":"test_lint4jsondb.py","file_ext":"py","file_size_in_byte":8691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"268894700","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport json\n\nfrom flask import Flask, render_template, request, redirect, flash, url_for\n\n# ----- INIT APPLICATION -----\n\napp = Flask(__name__)\napp.secret_key = \"something_special\"\n\n\n# ----- HELPER FUNCTIONS -----\n\n\ndef formatDate(date_str):\n \"\"\" Return a datetime object from a Y-M-d H:M:S date string \"\"\"\n return datetime.datetime.strptime(date_str, \"%Y-%m-%d %H:%M:%S\")\n\n\n# ----- DATA HANDLING -----\n\nCOST_PER_PLACE = 3\nMAX_PLACES_PER_CLUB = 12\n\n# -- load jsons\n\n\ndef loadClubs():\n with open(\"clubs.json\") as c:\n listOfClubs = json.load(c)[\"clubs\"]\n return listOfClubs\n\n\ndef loadCompetitions():\n with open(\"competitions.json\") as comps:\n listOfCompetitions = json.load(comps)[\"competitions\"]\n return listOfCompetitions\n\n\n# -- save bookings in dict\n\n\ndef addBooking(club, competition, places):\n \"\"\"Save club's booking to competitions in a dictionnay\n\n Parameters\n ----------\n club : str\n The name of the club booking the places\n competition : str\n The name of the competition for which places are booked\n places : int\n The number of places to book\n \"\"\"\n\n if club not in booking:\n booking[club] = {}\n\n if competition not in booking[club]:\n booking[club][competition] = 0\n\n booking[club][competition] += places\n\n\ndef getBooking(club, competition):\n \"\"\"Return the current club's booking number for a given competition\n\n Parameters\n ----------\n club : str\n The name of the club\n competition : str\n The name of the competition\n \"\"\"\n\n if club not in booking:\n return 0\n if competition not in booking[club]:\n return 0\n\n return booking[club][competition]\n\n\n# -- define globals\n\ncompetitions = loadCompetitions()\nclubs = loadClubs()\nbooking = {}\n\n\n# ----- EXCEPTIONS -----\n\n\nclass PointValueError(Exception):\n \"\"\" Returned when there is a problem with the clubs' points \"\"\"\n\n pass\n\n\nclass PlaceValueError(Exception):\n \"\"\" Returned when there is a problem with the competitions' places \"\"\"\n\n pass\n\n\nclass EventDateError(Exception):\n \"\"\" Returned when there is an error with competition dates \"\"\"\n\n pass\n\n\n# ----- ROUTES -----\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"This route displays the landing page with the authentificatio form \"\"\"\n\n return render_template(\"index.html\", clubs=clubs)\n\n\n@app.route(\"/showSummary\", methods=[\"POST\"])\ndef showSummary():\n \"\"\"This route validates the provided authentification information.\n\n POST Parameters\n ----------\n email : str\n The email to search in the club 'DB'\n \"\"\"\n try:\n club = [club for club in clubs if club[\"email\"] == request.form[\"email\"]][0]\n return showSummaryDisplay(club)\n except IndexError:\n flash(\"The provided email is invalid\")\n return render_template(\"index.html\", clubs=clubs), 404\n\n\ndef showSummaryDisplay(club, status_code=200):\n \"\"\"Gather informations for the main page (welcome.html) and render it.\n\n This main page is called from various route with various HTTP status_code.\n ( showSummaryDisplay with HTTP 200 )\n ( book// with HTTP 200 / 400 / 404 )\n ( purchasePlaces with HTTP 200 / 400 / 404 )\n\n This function will collect past & incoming competion informations along\n with the current club informations and all the other clubs.\n\n\n Parameters\n ----------\n club : dict\n The currently 'authentified' club\n status_code : int\n The HTTP status_code to return with the body html\n \"\"\"\n\n now = datetime.datetime.now()\n\n past_competitions = [\n compet for compet in competitions if formatDate(compet[\"date\"]) <= now\n ]\n\n next_competitions = [\n compet for compet in competitions if formatDate(compet[\"date\"]) > now\n ]\n\n return (\n render_template(\n \"welcome.html\",\n club=club,\n past_competitions=past_competitions,\n next_competitions=next_competitions,\n clubs=clubs,\n ),\n status_code,\n )\n\n\n@app.route(\"/book//\")\ndef book(competition, club):\n \"\"\"This route displays the given competition informations along with a purchase form.\n\n It will display the main page (showSummaryDisplay) instead, if something goes wrong.\n\n Parameters\n ----------\n club : str\n The name of the currently 'authentified' club\n # NOTE don't put the name of the identified club in the URL !\n competition : str\n The name of the competition to display\n \"\"\"\n\n # Is the provided club valid ?\n try:\n foundClub = [c for c in clubs if c[\"name\"] == club][0]\n except IndexError:\n flash(\"The provided club is invalid\")\n return render_template(\"index.html\", clubs=clubs), 404\n\n # Is the provided competition valid ?\n # Is the competition date valid ?\n try:\n foundCompetition = [c for c in competitions if c[\"name\"] == competition][0]\n\n now = datetime.datetime.now()\n\n if formatDate(foundCompetition[\"date\"]) > now:\n\n booked = getBooking(foundClub[\"name\"], foundCompetition[\"name\"])\n\n return (\n render_template(\n \"booking.html\",\n club=foundClub,\n competition=foundCompetition,\n booked=booked,\n maxplaces=min(\n int(foundClub[\"points\"]) // COST_PER_PLACE,\n MAX_PLACES_PER_CLUB - booked,\n ),\n ),\n 200,\n )\n else:\n raise EventDateError(\"The booking page for a past competition is closed\")\n\n except IndexError:\n flash(\"The provided competition is invalid\")\n status_code = 404\n\n except EventDateError as error_msg:\n flash(error_msg)\n status_code = 400\n\n # return redirect(url_for(\"showSummary\"), status_code)\n return showSummaryDisplay(foundClub, status_code)\n\n\n@app.route(\"/purchasePlaces\", methods=[\"POST\"])\ndef purchasePlaces():\n \"\"\"This route validates the purchase made from the competition booking page.\n\n Once done, it will display the main page (showSummaryDisplay) again.\n\n POST Parameters\n ----------\n club : str (hidden)\n The name of the currently 'authentified' club\n competition : str (hidden)\n The name of the competition on which to book places\n places : int\n The number of places to book for the given club in the given competition\n if all the validation steps are validated.\n \"\"\"\n\n # Is the provided club valid ?\n try:\n club = [c for c in clubs if c[\"name\"] == request.form[\"club\"]][0]\n except IndexError:\n flash(\"The provided club is invalid\")\n return render_template(\"index.html\", clubs=clubs), 404\n\n # Is the provided competition valid ?\n # Also check the various possible input errors\n try:\n competition = [\n c for c in competitions if c[\"name\"] == request.form[\"competition\"]\n ][0]\n\n placesRequired = int(request.form[\"places\"])\n club_points = int(club[\"points\"])\n competition_places = int(competition[\"numberOfPlaces\"])\n\n if placesRequired < 1:\n\n raise PointValueError(\"Something went wrong-please try again\")\n\n elif club_points < placesRequired * COST_PER_PLACE:\n\n raise PointValueError(\"You don't have enough points available\")\n\n elif competition_places < placesRequired:\n\n raise PlaceValueError(\"You can't book more places than available\")\n\n elif (\n placesRequired + getBooking(club[\"name\"], competition[\"name\"])\n > MAX_PLACES_PER_CLUB\n ):\n\n raise PlaceValueError(\n f\"You can't book more than {MAX_PLACES_PER_CLUB} places per competition\"\n )\n\n else:\n\n club[\"points\"] = club_points - (placesRequired * COST_PER_PLACE)\n competition[\"numberOfPlaces\"] = competition_places - placesRequired\n\n addBooking(club[\"name\"], competition[\"name\"], placesRequired)\n\n flash(\"Great-booking complete!\")\n status_code = 200\n\n except IndexError:\n flash(\"The provided competition is invalid\")\n status_code = 404\n\n except (PointValueError, PlaceValueError) as error_msg:\n flash(error_msg)\n status_code = 400\n\n return showSummaryDisplay(club, status_code)\n # return redirect(url_for(\"showSummary\"), status_code)\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"This route redirect to the landing page\n\n # NOTE : this page should actually logout users...\n \"\"\"\n return redirect(url_for(\"index\"))\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"583846503","text":"from PIL import Image\nimport numpy as np\nimport math\nimport random\n\n\"\"\"Helper class to ensure valid object placement\"\"\"\nclass Rectangle(object):\n def __init__ (self, lowX, width, lowY, height):\n self.left = lowX\n self.right = lowX + width\n self.bot = lowY\n self.top = lowY + height\n self.cenX = (self.left + self.right)/2\n self.cenY = (self.bot + self.top)/2\n def doesCollideWith(self, rect):\n \"\"\"returns true if two rectangles have overlap\"\"\"\n return (self.left < rect.right and self.right > rect.left and self.bot < rect.top and self.top > rect.bot)\n def hasCollision(self, rects):\n \"\"\"returns true if rectangle has overlap with any other rectangle in array\"\"\"\n for r in rects:\n if self.doesCollideWith(r):\n return True\n return False\n def within(self, maxD, p1, p2):\n \"\"\"returns true if points are within maximum distance of each other\"\"\"\n dist = math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n return dist < maxD\n def insideCircle(self, radius, centerX, centerY):\n \"\"\"returns true if no area of rectangle is outside of bounding circle\"\"\"\n center = (centerX, centerY)\n vertices = ((self.left, self.bot), (self.left, self.top), (self.right, self.bot), (self.right, self.top))\n isInside = True\n for v in vertices:\n isInside = isInside and self.within(radius, center, v)\n return isInside\n\n def insideRectangle(self, topL, bottomR):\n \"\"\"returns true if no area of rectangle is outside of bounding circle\"\"\"\n vertices = ((self.left, self.bot), (self.left, self.top), (self.right, self.bot), (self.right, self.top))\n isInside = True\n for v in vertices:\n isInside = isInside and v[0] < bottomR[0] and v[0] > topL[0] and v[1] > bottomR[1] and v[1] < topL[1] \n return isInside\n# Helper functions\nsign = lambda x: math.copysign(1, x)\ncosine = lambda theta: math.cos(math.radians(theta))\nsine = lambda theta: math.sin(math.radians(theta))\nw = lambda img: img.size[0]\nh = lambda img: img.size[1]\ngetRot = lambda rotLimit: int(np.random.normal(0, rotLimit/3))\ndef pasteOn(back, add, x, y):\n back.paste(add, (int(x), int(y)), add.convert('RGBA'))\n","sub_path":"scripts/objects/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"333928731","text":"from django.db import models\n\nfrom cyder.cydhcp.range.models import Range\nfrom cyder.cydhcp.vrf.models import Vrf\nfrom cyder.cydhcp.workgroup.models import Workgroup\nfrom cyder.core.ctnr.models import Ctnr\nfrom cyder.core.system.models import System\nfrom cyder.cydns.domain.models import Domain\nfrom cyder.base.mixins import ObjectUrlMixin\n\n\nclass DynamicInterface(models.Model, ObjectUrlMixin):\n ctnr = models.ForeignKey(Ctnr, null=False)\n range = models.ForeignKey(Range, null=False)\n workgroup = models.ForeignKey(Workgroup, null=True)\n mac = models.CharField(max_length=19,\n help_text=\"Mac address in format XX:XX:XX:XX:XX:XX\")\n system = models.ForeignKey(System, null=True, blank=True,\n help_text=\"System to associate \"\n \"the interface with\")\n vrf = models.ForeignKey(Vrf, null=True)\n domain = models.ForeignKey(Domain, null=True)\n\n search_fields = ('mac')\n\n class Meta:\n db_table = 'dynamic_interface'\n\n def details(self):\n data = super(DynamicInterface, self).details()\n data['data'] = [\n ('System', 'system', self.system),\n ('Mac', 'mac', self.mac),\n ('Range', 'range', self.range),\n ('Workgroup', 'workgroup', self.workgroup),\n ('Vrf', 'vrf', self.vrf),\n ('Domain', 'domain', self.domain)]\n return data\n","sub_path":"cyder/cydhcp/interface/dynamic_intr/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"562026229","text":"#\n# MIT License\n#\n# Copyright (c) 2022 GT4SD team\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\"\"\"HuggingFace tests.\"\"\"\n\nfrom typing import ClassVar, Type\n\nimport pytest\n\nfrom gt4sd.algorithms.core import AlgorithmConfiguration\nfrom gt4sd.algorithms.generation.hugging_face import (\n HuggingFaceCTRLGenerator,\n HuggingFaceGenerationAlgorithm,\n HuggingFaceGPT2Generator,\n HuggingFaceOpenAIGPTGenerator,\n HuggingFaceSeq2SeqGenerator,\n HuggingFaceTransfoXLGenerator,\n HuggingFaceXLMGenerator,\n HuggingFaceXLNetGenerator,\n)\nfrom gt4sd.algorithms.registry import ApplicationsRegistry\nfrom gt4sd.tests.utils import GT4SDTestSettings\n\ntest_settings = GT4SDTestSettings.get_instance()\n\n\ndef get_classvar_type(class_var):\n \"\"\"Extract type from ClassVar type annotation: `ClassVar[T]] -> T`.\"\"\"\n return class_var.__args__[0]\n\n\n@pytest.mark.parametrize(\n \"config_class, algorithm_type, domain, algorithm_name\",\n [\n (\n HuggingFaceXLMGenerator,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n ),\n (\n HuggingFaceCTRLGenerator,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n ),\n (\n HuggingFaceGPT2Generator,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n ),\n (\n HuggingFaceOpenAIGPTGenerator,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n ),\n (\n HuggingFaceXLNetGenerator,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n ),\n (\n HuggingFaceTransfoXLGenerator,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n ),\n (\n HuggingFaceSeq2SeqGenerator,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n ),\n ],\n)\ndef test_config_class(\n config_class: Type[AlgorithmConfiguration],\n algorithm_type: str,\n domain: str,\n algorithm_name: str,\n):\n assert config_class.algorithm_type == algorithm_type\n assert config_class.domain == domain\n assert config_class.algorithm_name == algorithm_name\n\n for keyword, type_annotation in config_class.__annotations__.items():\n if keyword in (\"algorithm_type\", \"domain\", \"algorithm_name\"):\n assert type_annotation.__origin__ is ClassVar # type: ignore\n assert str == get_classvar_type(type_annotation)\n\n\n@pytest.mark.parametrize(\n \"config_class\",\n [\n (HuggingFaceXLMGenerator),\n (HuggingFaceCTRLGenerator),\n (HuggingFaceGPT2Generator),\n (HuggingFaceOpenAIGPTGenerator),\n (HuggingFaceXLNetGenerator),\n (HuggingFaceTransfoXLGenerator),\n (HuggingFaceSeq2SeqGenerator),\n ],\n)\ndef test_config_instance(config_class: Type[AlgorithmConfiguration]):\n config = config_class() # type:ignore\n assert config.algorithm_application == config_class.__name__\n\n\n@pytest.mark.parametrize(\n \"config_class\",\n [\n (HuggingFaceXLMGenerator),\n (HuggingFaceCTRLGenerator),\n (HuggingFaceGPT2Generator),\n (HuggingFaceOpenAIGPTGenerator),\n (HuggingFaceXLNetGenerator),\n (HuggingFaceTransfoXLGenerator),\n (HuggingFaceSeq2SeqGenerator),\n ],\n)\ndef test_available_versions(config_class: Type[AlgorithmConfiguration]):\n versions = config_class.list_versions()\n assert len(versions) > 0\n\n\n@pytest.mark.parametrize(\n \"config, algorithm\",\n [\n pytest.param(\n HuggingFaceXLMGenerator,\n HuggingFaceGenerationAlgorithm,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceCTRLGenerator,\n HuggingFaceGenerationAlgorithm,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceGPT2Generator,\n HuggingFaceGenerationAlgorithm,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceOpenAIGPTGenerator,\n HuggingFaceGenerationAlgorithm,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceXLNetGenerator,\n HuggingFaceGenerationAlgorithm,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceTransfoXLGenerator,\n HuggingFaceGenerationAlgorithm,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(HuggingFaceSeq2SeqGenerator, HuggingFaceGenerationAlgorithm),\n ],\n)\ndef test_generation_via_import(config, algorithm):\n algorithm = algorithm(configuration=config(length=10, number_of_sequences=1))\n items = list(algorithm.sample(1))\n assert len(items) == 1\n\n\n@pytest.mark.parametrize(\n \"algorithm_application, algorithm_type, domain, algorithm_name\",\n [\n pytest.param(\n HuggingFaceXLMGenerator.__name__,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceCTRLGenerator.__name__,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceGPT2Generator.__name__,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceOpenAIGPTGenerator.__name__,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceXLNetGenerator.__name__,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceTransfoXLGenerator.__name__,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n marks=pytest.mark.skipif(test_settings.gt4sd_ci, reason=\"high_memory\"),\n ),\n pytest.param(\n HuggingFaceSeq2SeqGenerator.__name__,\n \"generation\",\n \"nlp\",\n HuggingFaceGenerationAlgorithm.__name__,\n ),\n ],\n)\ndef test_generation_via_registry(\n algorithm_type, domain, algorithm_name, algorithm_application\n):\n algorithm = ApplicationsRegistry.get_application_instance(\n target=None,\n algorithm_type=algorithm_type,\n domain=domain,\n algorithm_name=algorithm_name,\n algorithm_application=algorithm_application,\n length=10,\n number_of_sequences=1,\n )\n items = list(algorithm.sample(1))\n assert len(items) == 1\n","sub_path":"src/gt4sd/algorithms/generation/tests/test_hugging_face.py","file_name":"test_hugging_face.py","file_ext":"py","file_size_in_byte":8520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"383745465","text":"# PROBLEM:\n# Given an integer n, count number of unique BST's (binary search trees) that store values 1 ... n.\n# Input: 3\n# Output: 5\n\n\nclass Solution:\n def numTrees(self, n: int) -> int:\n stack = {}\n\n def count_bst(min_val, max_val):\n if min_val >= max_val:\n return 1\n\n key = (min_val, max_val)\n if key in stack:\n return stack[key]\n\n no_of_trees = 0\n for i in range(min_val, max_val + 1):\n no_of_trees += count_bst(min_val, i - 1) * count_bst(i + 1, max_val)\n\n stack[key] = no_of_trees\n\n return no_of_trees\n\n return count_bst(1, n)","sub_path":"LeetCode/Training/96. Unique Binary Search Trees.py","file_name":"96. Unique Binary Search Trees.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"467511308","text":"from openerp.osv import fields, osv\nfrom openerp import api\nfrom openerp import tools\nimport openerp.addons.decimal_precision as dp\nfrom openerp.exceptions import except_orm, Warning\nfrom openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT\nfrom datetime import datetime\nimport time\nimport logging\nimport pdb\n\n\nclass purchase_order(osv.osv):\n _inherit = 'purchase.order'\n\n READONLY_STATES = {\n 'confirmed': [('readonly', True)],\n 'approved': [('readonly', True)],\n 'done': [('readonly', True)]\n }\n\n def action_invoice_create(self, cr, uid, ids, context=None):\n \"\"\"Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.\n :param ids: list of ids of purchase orders.\n :return: ID of created invoice.\n :rtype: int\n \"\"\"\n self.write(cr, uid, ids, {'invoice_created': True})\n return super(purchase_order, self).action_invoice_create(cr, uid, ids, context)\n\n def _get_order(self, cr, uid, ids, context=None):\n result = {}\n for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):\n result[line.order_id.id] = True\n return result.keys()\n\n def _amount_all(self, cr, uid, ids, field_name, arg, context=None):\n res = {}\n cur_obj=self.pool.get('res.currency')\n user = self.pool.get('res.users').browse(cr, uid, uid)\n for order in self.browse(cr, uid, ids, context=context):\n res[order.id] = {\n 'amount_untaxed': 0.0,\n 'amount_tax': 0.0,\n 'amount_total': 0.0,\n }\n val = val1 = 0.0\n cur = user.company_id.currency_id\n if order.pricelist_id:\n cur = order.pricelist_id.currency_id\n for line in order.order_line:\n # Coloco el descuento en impuesto total DA\n if line.discount:\n amount_discount = float(line.discount)/line.product_qty\n else:\n amount_discount = 0.00\n val1 += line.price_subtotal\n for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit - amount_discount, line.product_qty, line.product_id, order.partner_id)['taxes']:\n val += c.get('amount', 0.0)\n res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)\n res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)\n res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']\n return res\n\n STATE_SELECTION_APP = [\n ('except', 'Gerente General'),\n ('presupuesto', 'Financiero'),\n # ('area', 'Gerente de Area'),\n ('controller', 'Controller'),\n ('approved', 'Aprobada'),\n ('cancel', 'Cancelada')\n ]\n\n _columns = {\n 'partner_id': fields.many2one('res.partner', 'Supplier', required=False, states=READONLY_STATES,\n change_default=True, track_visibility='always'),\n 'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=False,\n states=READONLY_STATES, help=\"The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities.\"),\n 'amount_untaxed': fields.function(_amount_all, digits=(16, 2), string='Untaxed Amount',\n multi=\"sums\", help=\"The amount without tax\", track_visibility='always'),\n 'amount_tax': fields.function(_amount_all, digits=(16, 2), string='Taxes',\n multi=\"sums\", help=\"The tax amount\"),\n 'amount_total': fields.function(_amount_all, digits=(16, 2), string='Total',\n multi=\"sums\", help=\"The total amount\"),\n 'state_manager': fields.selection(STATE_SELECTION_APP, 'Aprobaciones', readonly=True,select=True),\n 'control': fields.boolean('Aprobado por Controller'),\n 'control_comment': fields.text('Comentario'),\n 'is_send': fields.boolean('Enviado Aprobacion'),\n 'number_req': fields.char('No. Requisicion'),\n 'sale_order_id': fields.many2one('sale.order', 'Orden de Venta'),\n 'customer_id': fields.many2one('res.partner', 'Cliente'),\n 'not_apply': fields.boolean('No aplica'),\n 'invoice_created': fields.boolean('factura creada'),\n }\n\n _defaults = {\n 'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,\n }\n\n # Copio la funcion para generar el albaran cuando es producto o consumible, dejando solo cuando son productos DA\n def _create_stock_moves(self, cr, uid, order, order_lines, picking_id=False, context=None):\n \"\"\"Creates appropriate stock moves for given order lines, whose can optionally create a\n picking if none is given or no suitable is found, then confirms the moves, makes them\n available, and confirms the pickings.\n\n If ``picking_id`` is provided, the stock moves will be added to it, otherwise a standard\n incoming picking will be created to wrap the stock moves (default behavior of the stock.move)\n\n Modules that wish to customize the procurements or partition the stock moves over\n multiple stock pickings may override this method and call ``super()`` with\n different subsets of ``order_lines`` and/or preset ``picking_id`` values.\n\n :param browse_record order: purchase order to which the order lines belong\n :param list(browse_record) order_lines: purchase order line records for which picking\n and moves should be created.\n :param int picking_id: optional ID of a stock picking to which the created stock moves\n will be added. A new picking will be created if omitted.\n :return: None\n \"\"\"\n stock_move = self.pool.get('stock.move')\n todo_moves = []\n new_group = self.pool.get(\"procurement.group\").create(cr, uid, {'name': order.name, 'partner_id': order.partner_id.id}, context=context)\n\n for order_line in order_lines:\n if order_line.state == 'cancel':\n continue\n if not order_line.product_id:\n continue\n\n if order_line.product_id.type in ('product'):\n for vals in self._prepare_order_line_move(cr, uid, order, order_line, picking_id, new_group, context=context):\n move = stock_move.create(cr, uid, vals, context=context)\n todo_moves.append(move)\n\n todo_moves = stock_move.action_confirm(cr, uid, todo_moves)\n stock_move.force_assign(cr, uid, todo_moves)\n\n def wkf_bid_received(self, cr, uid, ids, context=None):\n user = self.pool.get('res.users').browse(cr, uid, uid)\n for ctl in self.browse(cr, uid, ids, context=context):\n #if ctl.app_user_id.id != uid:\n # raise osv.except_orm('Error!', 'Usted no esta autorizado a aprobar este documento')\n total_amount = 0\n for quotes in ctl.quotes_ids:\n if quotes.state == 'done':\n total_amount += quotes.amount_total\n\n if total_amount < user.company_id.max_amount:\n return self.write(cr, uid, ids, {'state': 'bid', 'bid_date': fields.date.context_today(self,cr,uid,context=context),\n 'state_manager': 'approved'})\n else:\n return self.write(cr, uid, ids, {'state': 'bid', 'bid_date': fields.date.context_today(self,cr,uid,context=context),\n 'state_manager': 'except'})\n\n def wkf_confirm_order(self, cr, uid, ids, context=None):\n for po in self.browse(cr, uid, ids, context=context):\n if po.state_manager == 'except':\n raise osv.except_orm('Error!', 'La Requisicion numero %s requiere aprobacion de gerencia' % po.name)\n return super(purchase_order, self).wkf_confirm_order(cr, uid, ids, context)\n\n def manager_approved(self, cr, uid, ids, context=None):\n for po in self.browse(cr, uid, ids, context=context):\n\n val = {\n\n 'tracing_id': [(0, 0, {\n 'purchase_order_id': po.id,\n 'user_id': po.env.user.id,\n 'date_tracing': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'state': 'approved_ger'\n })]\n }\n po.write(val)\n res = super(purchase_order, self).wkf_bid_received(cr, uid, ids, context)\n self.write(cr, uid, ids, {'state_manager': 'presupuesto'})\n\n def action_cancel_draft(self, cr, uid, ids, context=None):\n for order in self.browse(cr, uid, ids, context):\n for quote in order.quotes_ids:\n self.pool.get('purchase.quotes').write(cr, uid, [quote.id], {'state': 'draft'})\n for line in quote.quotes_lines:\n self.pool.get('quotes.line').write(cr, uid, [line.id], {'state': 'draft'})\n self.write(cr, uid, ids, {'state_manager': False,\n 'is_approve_quotes': False, 'control': False, 'control_comment':False})\n return super(purchase_order, self).action_cancel_draft(cr, uid, ids, context)\n\n def wkf_action_cancel(self, cr, uid, ids, context=None):\n todo = []\n for po in self.browse(cr, uid, ids, context=context):\n for line in po.order_line:\n todo.append(line.id)\n if todo:\n self.pool.get('purchase.order.line').unlink(cr, uid, todo)\n self.write(cr, uid, ids, {'state': 'cancel', 'state_manager': 'cancel'}, context=context)\n self.set_order_line_status(cr, uid, ids, 'cancel', context=context)\n\n def action_area_manager(self, cr, uid, ids, context=None):\n for rec in self.browse(cr, uid, ids, context):\n if rec.app_user_id.id != uid:\n raise osv.except_orm('Error!', 'Usted no esta autorizado a aprobar este documento')\n return self.write(cr, uid, ids, {'state_manager': 'except'})\n\n def purchase_finances(self, cr, uid, ids, context=None):\n valida = 0\n for rec in self.browse(cr,uid,ids,context):\n val = {\n\n 'tracing_id': [(0, 0, {\n 'purchase_order_id': rec.id,\n 'user_id': rec.env.user.id,\n 'date_tracing': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'state': 'control'\n })]\n }\n rec.write(val)\n for quote in rec.quotes_ids:\n if not quote:\n raise except_orm('Error!', 'Debe definir al menos una cotizacion')\n if quote.state == 'done':\n valida = 1\n if valida == 0:\n raise osv.except_orm('Alerta!', 'Debe aprobar al menos una cotizacion')\n\n return self.write(cr, uid, ids, {'state_manager': 'presupuesto'})\n\n def purchase_cancel_to_order(self, cr, uid, ids, context=None):\n for rec in self.browse(cr,uid,ids,context):\n val = {\n\n 'tracing_id': [(0, 0, {\n 'purchase_order_id': rec.id,\n 'user_id': rec.env.user.id,\n 'date_tracing': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'state': 'purchase'\n })]\n }\n rec.write(val)\n rec.write({'is_approve_quotes':False,'cancel_controller':True})\n rec.wkf_send_rfq()\n\n return True\n\n def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):\n \"\"\"Collects require data from purchase order line that is used to create invoice line\n for that purchase order line\n :param account_id: Expense account of the product of PO line if any.\n :param browse_record order_line: Purchase order line browse record\n :return: Value for fields of invoice lines.\n :rtype: dict\n \"\"\"\n taxes = []\n fiscal = False\n for tax in order_line.taxes_id:\n tax_to_add = tax.id\n if tax.description in ('1', '2'):\n fp = self.pool.get('account.fiscal.position.tax')\n if order_line.partner_id.property_account_position:\n fiscal = fp.search(cr, uid, [('position_id', '=', order_line.partner_id.property_account_position.id),\n ('tax_src_id.description', '=', tax.description)])\n if not fiscal and order_line.partner_id.property_account_position:\n raise osv.except_orm('Error!', 'Configure la posicion fiscal del proveedor')\n if fiscal:\n fiscal = fp.browse(cr, uid, fiscal[0])\n tax_to_add = fiscal.tax_dest_id.id\n else:\n continue\n taxes.append(tax_to_add)\n\n return {\n 'name': order_line.name,\n 'account_id': account_id,\n 'price_unit': order_line.price_unit or 0.0,\n 'quantity': order_line.product_qty,\n 'product_id': order_line.product_id.id or False,\n 'uos_id': order_line.product_uom.id or False,\n 'invoice_line_tax_id': [(6, 0, taxes)],\n 'account_analytic_id': order_line.account_analytic_id.id or False,\n 'purchase_line_id': order_line.id,\n }\n\n @api.multi\n def send_approved(self):\n if len(self.request_products) == 0:\n raise except_orm('Error!', 'No existen lineas de pedido')\n if self.is_procura:\n if len(self.quotes_ids) == 0:\n raise except_orm('Error!', 'Debe ingresar la cotizacion correspondiente')\n\n validate = 0\n for quotes in self.quotes_ids:\n if not quotes.attachment_ids and quotes.state != 'cancel':\n raise except_orm('Error!', 'Por favor adjunte el/los documentos necesarios al documento %s' % quotes.name)\n if quotes.state == 'done':\n validate += 1\n if validate < 1:\n raise except_orm('Error!',\n 'Por favor, debe aprobar una o mas cotizaciones para proceder con la compra')\n return self.write({'is_send': 'True'})\n\npurchase_order()\n\n\nclass purchase_order_line(osv.osv):\n _inherit = 'purchase.order.line'\n\n def _amount_line(self, cr, uid, ids, prop, arg, context=None):\n res = {}\n total = 0.00\n cur_obj=self.pool.get('res.currency')\n user = self.pool.get('res.users').browse(cr, uid, uid)\n tax_obj = self.pool.get('account.tax')\n for line in self.browse(cr, uid, ids, context=context):\n taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)\n cur = user.company_id.currency_id\n if line.order_id.pricelist_id:\n cur = line.order_id.pricelist_id.currency_id\n res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])\n if line.percent > 0:\n res[line.id] -= (res[line.id] * line.percent/100)\n return res\n\n _columns = {\n 'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),\n 'number': fields.char('No. parte'),\n 'price_unit': fields.float('Precio unitario', required=True, digits_compute= dp.get_precision('Product Price')),\n 'line_sequence': fields.char('Secuencia', readonly=True)\n }\n\npurchase_order_line()\n\n\nclass res_company_amount(osv.osv):\n _name = 'res.company'\n _inherit = 'res.company'\n\n def _get_image(self, cr, uid, ids, name, args, context=None):\n result = dict.fromkeys(ids, False)\n for obj in self.browse(cr, uid, ids, context=context):\n result[obj.id] = tools.image_get_resized_images(obj.manager_sig)\n return result\n\n def _set_image(self, cr, uid, id, name, value, args, context=None):\n return self.write(cr, uid, [id], {'manager_sig': tools.image_resize_image_big(value)}, context=context)\n\n def _get_image_op(self, cr, uid, ids, name, args, context=None):\n result = dict.fromkeys(ids, False)\n for obj in self.browse(cr, uid, ids, context=context):\n result[obj.id] = tools.image_get_resized_images(obj.op_manager_sig)\n return result\n\n def _get_image_imp(self, cr, uid, ids, name, args, context=None):\n result = dict.fromkeys(ids, False)\n for obj in self.browse(cr, uid, ids, context=context):\n result[obj.id] = tools.image_get_resized_images(obj.importation_sig)\n return result\n\n def _set_image_imp(self, cr, uid, id, name, value, args, context=None):\n return self.write(cr, uid, [id], {'importation_sig': tools.image_resize_image_big(value)}, context=context)\n\n def _get_image_po2(self, cr, uid, ids, name, args, context=None):\n result = dict.fromkeys(ids, False)\n for obj in self.browse(cr, uid, ids, context=context):\n result[obj.id] = tools.image_get_resized_images(obj.purchase_sig2)\n return result\n\n def _set_image_po2(self, cr, uid, id, name, value, args, context=None):\n return self.write(cr, uid, [id], {'purchase_sig2': tools.image_resize_image_big(value)}, context=context)\n\n def _set_image_op(self, cr, uid, id, name, value, args, context=None):\n return self.write(cr, uid, [id], {'op_manager_sig': tools.image_resize_image_big(value)}, context=context)\n\n def _get_image_po(self, cr, uid, ids, name, args, context=None):\n result = dict.fromkeys(ids, False)\n for obj in self.browse(cr, uid, ids, context=context):\n result[obj.id] = tools.image_get_resized_images(obj.purchase_sig)\n return result\n\n def _set_image_po(self, cr, uid, id, name, value, args, context=None):\n return self.write(cr, uid, [id], {'purchase_sig': tools.image_resize_image_big(value)}, context=context)\n\n def _get_image_oper2(self, cr, uid, ids, name, args, context=None):\n result = dict.fromkeys(ids, False)\n for obj in self.browse(cr, uid, ids, context=context):\n result[obj.id] = tools.image_get_resized_images(obj.sec_operation_sig)\n return result\n\n def _set_image_oper2(self, cr, uid, id, name, value, args, context=None):\n return self.write(cr, uid, [id], {'sec_operation_sig': tools.image_resize_image_big(value)}, context=context)\n\n _columns = {\n 'max_amount': fields.float('Valor maximo compras'),\n 'manager_sig': fields.binary(\"Firma Gerente\"),\n 'image_m': fields.function(_get_image, fnct_inv=_set_image,\n string=\"Medium-sized photo\", type=\"binary\", multi=\"_get_image\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['manager_sig'], 10),\n },\n help=\"Medium-sized photo of the employee. It is automatically \" \\\n \"resized as a 128x128px image, with aspect ratio preserved. \" \\\n \"Use this field in form views or some kanban views.\"),\n 'image_s': fields.function(_get_image, fnct_inv=_set_image,\n string=\"Small-sized photo\", type=\"binary\", multi=\"_get_image\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['manager_sig'], 10),\n },\n help=\"Small-sized photo of the employee. It is automatically \" \\\n \"resized as a 64x64px image, with aspect ratio preserved. \" \\\n \"Use this field anywhere a small image is required.\"),\n\n 'op_manager_sig': fields.binary(\"Firma Operaciones\"),\n 'image_op': fields.function(_get_image_op, fnct_inv=_set_image_op,\n string=\"Medium-sized photo\", type=\"binary\", multi=\"_get_image_op\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['op_manager_sig'], 10),\n },\n help=\"Medium-sized photo of the employee. It is automatically \" \\\n \"resized as a 128x128px image, with aspect ratio preserved. \" \\\n \"Use this field in form views or some kanban views.\"),\n 'image_op2': fields.function(_get_image_op, fnct_inv=_set_image_op,\n string=\"Small-sized photo\", type=\"binary\", multi=\"_get_image_op\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['op_manager_sig'], 10),\n },\n help=\"Small-sized photo of the employee. It is automatically \" \\\n \"resized as a 64x64px image, with aspect ratio preserved. \" \\\n \"Use this field anywhere a small image is required.\"),\n\n 'purchase_sig': fields.binary(\"Firma Compras\"),\n 'image_p': fields.function(_get_image_po, fnct_inv=_set_image_po,\n string=\"Medium-sized photo\", type=\"binary\", multi=\"_get_image_po\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['purchase_sig'], 10),\n },\n help=\"Medium-sized photo of the employee. It is automatically \" \\\n \"resized as a 128x128px image, with aspect ratio preserved. \" \\\n \"Use this field in form views or some kanban views.\"),\n 'image_p2': fields.function(_get_image_po, fnct_inv=_set_image_po,\n string=\"Small-sized photo\", type=\"binary\", multi=\"_get_image_po\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['purchase_sig'], 10),\n },\n help=\"Small-sized photo of the employee. It is automatically \" \\\n \"resized as a 64x64px image, with aspect ratio preserved. \" \\\n \"Use this field anywhere a small image is required.\"),\n\n 'purchase_sig2': fields.binary(\"Firma Compras 2\"),\n 'image_ap': fields.function(_get_image_po2, fnct_inv=_set_image_po2,\n string=\"Medium-sized photo\", type=\"binary\", multi=\"_get_image_po2\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['purchase_sig2'], 10),\n },\n help=\"Medium-sized photo of the employee. It is automatically \" \\\n \"resized as a 128x128px image, with aspect ratio preserved. \" \\\n \"Use this field in form views or some kanban views.\"),\n 'image_ap2': fields.function(_get_image_po2, fnct_inv=_set_image_po2,\n string=\"Small-sized photo\", type=\"binary\", multi=\"_get_image_po2\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['purchase_sig2'], 10),\n },\n help=\"Small-sized photo of the employee. It is automatically \" \\\n \"resized as a 64x64px image, with aspect ratio preserved. \" \\\n \"Use this field anywhere a small image is required.\"),\n\n 'importation_sig': fields.binary(\"Firma Importaciones\"),\n 'image_po': fields.function(_get_image_imp, fnct_inv=_set_image_imp,\n string=\"Medium-sized photo\", type=\"binary\", multi=\"_get_image_imp\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['importation_sig'], 10),\n },\n help=\"Medium-sized photo of the employee. It is automatically \" \\\n \"resized as a 128x128px image, with aspect ratio preserved. \" \\\n \"Use this field in form views or some kanban views.\"),\n 'image_po2': fields.function(_get_image_imp, fnct_inv=_set_image_imp,\n string=\"Small-sized photo\", type=\"binary\", multi=\"_get_image_imp\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['importation_sig'], 10),\n },\n help=\"Small-sized photo of the employee. It is automatically \" \\\n \"resized as a 64x64px image, with aspect ratio preserved. \" \\\n \"Use this field anywhere a small image is required.\"),\n\n 'sec_operation_sig': fields.binary(\"Firma Operaciones 2\"),\n 'image_med': fields.function(_get_image_oper2, fnct_inv=_set_image_oper2,\n string=\"Medium-sized photo\", type=\"binary\", multi=\"_get_image_oper2\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['sec_operation_sig'], 10),\n },\n help=\"Medium-sized photo of the employee. It is automatically \" \\\n \"resized as a 128x128px image, with aspect ratio preserved. \" \\\n \"Use this field in form views or some kanban views.\"),\n 'image_sml': fields.function(_get_image_oper2, fnct_inv=_set_image_oper2,\n string=\"Small-sized photo\", type=\"binary\", multi=\"_get_image_oper2\",\n store={\n 'res.company': (lambda self, cr, uid, ids, c={}: ids, ['sec_operation_sig'], 10),\n },\n help=\"Small-sized photo of the employee. It is automatically \" \\\n \"resized as a 64x64px image, with aspect ratio preserved. \" \\\n \"Use this field anywhere a small image is required.\"),\n }\n\n\nres_company_amount()\n\n","sub_path":"purchase_tiw/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":27514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"340354512","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 29 16:30:20 2020\n\n@author: travishartman\n\"\"\"\n\nimport csv\nimport sys\nimport datetime\nfrom datetime import datetime\n\n# read in the csv file\ncsv_file = open(sys.argv[1], 'r')\n#csv_file=open('/Users/travishartman/Desktop/schema/working_folder/csv/datamart_schema.csv', 'r')\n \n#list to hold all the dates\ndater=[]\nreader = csv.reader(csv_file)\nheaders_list = next(reader, None)\n\nfor a in csv.reader(csv_file, delimiter=','):\n dater.append(a[0])\n\n#### FUNCTIONS ####\n\n#verify that 'country' is in the header\ndef verify_country(headers_list):\n\n country = False\n \n if \"country\" in headers_list:\n country = True\n \n return country \n\n# Verify that 'time' is in the header\ndef verify_time(headers_list):\n \n stamp = False\n \n if \"time\" in headers_list:\n stamp = True\n \n return stamp\n\n# Verify date FORMAT\ndef verify_time_format(dt_str):\n \n last_one = dt_str[-1]\n \n if last_one == \"Z\" or last_one == \"z\":\n dt_str = dt_str[:-1]\n\n try:\n datetime.fromisoformat(dt_str)\n except:\n return False\n \n return True\n\n# get all the features listed in the dataset\ndef get_features(headers_list):\n \n feature_list=[]\n \n for header in headers_list:\n if \"_value\" in header:\n feature_list.append(header.split(\"_value\")[0])\n \n return feature_list\n\n#Function to add key,value pairs without overwriting existing info\ndef set_key(dictionary, key, value):\n \n if key not in dictionary:\n dictionary[key] = value\n elif type(dictionary[key]) == list:\n dictionary[key].append(value)\n else:\n dictionary[key] = [dictionary[key], value]\n\n# Create dictionary to add k.v of features and any associated atrributes for that feature\ndef get_feature_attr(feature_list, headers_list):\n \n header_dict ={}\n \n for feature in feature_list:\n for thing in headers_list:\n if feature in thing:\n set_key(header_dict, feature, thing)\n \n return header_dict\n\n# Check that 'description' is in the dataset for each feature\ndef verify_description(header_dict):\n \n list_bool=[]\n \n for key in header_dict:\n temp_bool = False\n temp_list = header_dict[key]\n \n for thing in temp_list:\n if 'description' in thing:\n temp_bool = True\n \n list_bool.append([key, temp_bool])\n \n return list_bool \n\n# Check for and return any differences in the dateset header versus the header columns that are accounted for\n# Used to ID any column headers that are non-schema comforming\ndef the_accounter(headers_list):\n \n not_accounted_for = []\n if verify_time(headers_list) == True:\n not_accounted_for.append(\"time\")\n if verify_country(headers_list) == True:\n not_accounted_for.append(\"country\")\n \n # get all the feautures and attributes\n things = get_feature_attr(get_features(headers_list), headers_list)\n \n for key in things:\n for thing in things[key]:\n not_accounted_for.append(thing)\n \n return (list(set(headers_list) - set(not_accounted_for)))\n\n#Put it all together\ndef wrapperitup(headers_list, dater):\n \n #populate with function outputs\n country = verify_country(headers_list) \n time_stamp = verify_time(headers_list)\n features_list = get_features(headers_list)\n header_dict = get_feature_attr(features_list, headers_list)\n descr_list = verify_description(header_dict)\n diff = the_accounter(headers_list)\n \n #features_in_set => [test if any features, number of features, list of features]\n \n holder_of_meta = {'qualifier': header_dict,\n 'country_in': None, \n 'time_in': None, \n 'time_format': None, \n 'features_in_set': [None,0,None], \n 'desc_for_feature':None,\n 'header_diff': diff}\n \n # COUNTRY CHECK --> Boolean\n holder_of_meta['country_in'] = country \n \n # TIME CHECK --> Boolean\n holder_of_meta['time_in'] = time_stamp\n \n # TIME FORMAT--> Boolean\n list_date_bool = []\n for t in dater:\n list_date_bool.append(verify_time_format(t))\n \n holder_of_meta['time_format'] = all(list_date_bool) \n \n # CHECK FOR the NAME TAG--> feature_list\n if len(features_list) > 0:\n holder_of_meta['features_in_set'][0] = True\n holder_of_meta['features_in_set'][1] = len(features_list) \n holder_of_meta['features_in_set'][2] = features_list\n \n # CHECK FOR DESCRIPTION --> header_dict\n holder_of_meta['desc_for_feature'] = descr_list\n \n \n return holder_of_meta\n\n# Display the results\ndef displayer(holder_of_meta):\n\n # Lists to hold the fails/good to gos\n success = []\n fail = []\n warn = []\n verifier_status = False\n \n print(\"Checking your file for schema compliance...\" + \"\\n\")\n \n # Check if there are any features first!\n temp = holder_of_meta['features_in_set']\n temp_q = holder_of_meta['qualifier']\n\n if temp[1] == 0:\n fail.append('Failed scan for features --> NO FEATURES FOUND')\n fail.append('Update your feature header to include the \"_value\" tag')\n fail.append('Example: change crop_price to crop_price_value')\n # Ok, there are features so run the rest of the verification\n else:\n for key in temp_q:\n \n if isinstance(temp_q[key], list):\n print(f\"Found Feature: {key} with {len(temp_q[key])-1} qualifier(s)\")\n for q in temp_q[key][1:]:\n print(f\" Qualifier: {q}\")\n else:\n print(f\"Found Feature: {key} with ZERO qualifiers\")\n \n print('\\n')\n print(f\"Found {temp[1]} total feature(s)\") \t\n print(f\"If you have more than {temp[1]} feature(s), verify your feature has the '_value' tag\" + '\\r\\n')\n\n # Stock strings for print out messages\t\n yes = 'Passed: '\t\n nope = 'FAILED ' \t\n maybe = 'WARNING '\t\n v = ' verified'\t\n \t\n # Allowable headers, if these are in the dataset, the verifier will ignore them since they are optional/allowed\n schema_nulls = ['latitude', 'longitude', 'polygon', 'admin_1', 'admin_2', 'admin_3']\n\t\t\n # Pull out data from dictionary for display to user\t\n # Any unrecognized headers...\n temp = holder_of_meta['header_diff']\n if temp == True:\n success.append(yes + 'header accounting' + v)\n else:\n for thing in temp:\n if thing not in schema_nulls:\n warn.append(maybe + \"--> Unrecognized column header: '\" + thing + \"'\") \n \n #COUNTRY\n temp = holder_of_meta['country_in']\n if temp == True:\n success.append(yes + 'country' + v)\n else:\n fail.append(nope + \"--> 'country' is a required column header\") \n\n #TIME\n temp = holder_of_meta['time_in']\n if temp == True:\n success.append(yes + 'time' + v)\n else:\n fail.append(nope + \"--> 'time' is a required column header\") \n\n #TIME FORMAT\n temp = holder_of_meta['time_format']\n if temp == True:\n success.append(yes + 'time format' + v)\n else:\n fail.append(nope + \"--> Format for 'time' must be ISO 8601\")\n \n #FEATURE DESCRIPTION\n temp = holder_of_meta['desc_for_feature']\n for t in temp:\n if t[1] == True:\n success.append(yes +\"Description verified for feature: \" + t[0])\n else:\n fail.append(nope + '--> No Description found for feature: ' + t[0])\n \n # Print out the results: \n if success != []:\n for s in success:\n print(s)\n print('\\n')\n\n if warn != []:\n for w in warn:\n print(w)\n print('* Required column headers: feature_n_value, feature_n_description, time, country')\n print('* Optional column headers: feature_n_value_unit, latitude, longitude, polygon, admin_1, admin_2, admin_3')\n print('* Qualifier column headers: feature_n_. Example: feature = crop_price --> qualifier = crop_price_currency') \n \n # If warnings and no fails\n if fail == []:\n print('\\n')\n print(\"YOUR FILE IS NOT SCHEMA-COMPLIANT. CORRECT THE WARNINGS AND RE-RUN\")\n print('\\n') \n\n if fail != []:\n print('\\n') \n for f in fail:\n print(f) \n print('\\n')\n print(\"YOUR FILE IS NOT SCHEMA-COMPLIANT. CORRECT THE VERIFICATION FAILURES AND/OR WARNINGS AND RE-RUN\")\n print('\\n') \n\n if fail == [] and warn == []: \n verifier_status = True\n print(\"YOUR DATASET IS SCHEMA-COMPLIANT AND READY FOR REGISTRATION\")\n \n \n return verifier_status\n\n\n# call functions and display results\n#verification_results = wrapperitup(headers_list, dater)\n#displayer(verification_results)\n\n\n","sub_path":"verify_and_register/verifier.py","file_name":"verifier.py","file_ext":"py","file_size_in_byte":9140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"78594989","text":"import os\nimport glob\nimport socket\nimport shutil\nimport validators\nimport subprocess\nimport requests, json\nfrom random import random\nfrom flask_cors import CORS\nfrom datetime import datetime\nfrom werkzeug.utils import secure_filename\nfrom flask import Flask, request, render_template, flash, redirect, url_for, make_response, jsonify\n\n# App wide variables\nUPLOAD_FOLDER = \"uploads\"\nJSON_TEMPLATE = \"json_template.html\"\nFORM_TEMPLATE = \"form_template.html\"\nALLOWED_EXTENSIONS = {\"n3\", \"ttl\", \"nt\"}\n\n# Create app with CORS enabled\napp = Flask(__name__)\nCORS(app)\n\n# Set app configuration\napp.config[\"UPLOAD_FOLDER\"] = UPLOAD_FOLDER\napp.config[\"JSON_TEMPLATE\"] = JSON_TEMPLATE\napp.config[\"FORM_TEMPLATE\"] = FORM_TEMPLATE\napp.secret_key = \"super secret key\" # ?\n\n# Function to check filenames\ndef allowed_file(filename):\n return \".\" in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n# Function to check URLs\ndef valid_url(url):\n return validators.url(url) # returns True if valid\n\n# Function to process 'files' POSTed as JSON data\ndef create_input_files(input_list, target_container, error_container, message_part): \n for file in input_list:\n # Check if file extension is allowed (either .ttl of .n3)\n if file and allowed_file(file[\"file\"]):\n # Allow secure filenames only\n filename = secure_filename(file[\"file\"])\n # Append data to data_input\n target_container.append(filename)\n # Create and save file with content\n f = open(os.path.join(UPLOAD_FOLDER, filename), \"w\")\n f.write(file[\"content\"])\n f.close()\n else:\n list_of_files = [str(s) for s in ALLOWED_EXTENSIONS]\n error_container.append(\"{}: upload {} files only\".format(message_part, \"/\".join(list_of_files)))\n\n# Function to process files POSTed in FileList\ndef take_input_files(input_list, target_container, error_container, message_part): \n for file in input_list:\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n # Append data to data_input\n target_container.append(filename)\n file.save(os.path.join(UPLOAD_FOLDER, filename))\n else:\n error_container.append(\"{}: upload {} files only\".format(message_part, \"/\".join(list_of_files)))\n\n# Function to create URL list based on POSTed JSON data \ndef create_input_urls(input_list, target_container, error_container, message):\n for url in input_list:\n if valid_url(url):\n target_container.append(url)\n else:\n error_container.append(\"{}: {} is invalid\".format(message, url))\n\n# Function to use the EYE reasoner\ndef reason(data_input, rule_input, query_input, **kwargs):\n # Enter uploads directory\n os.chdir(UPLOAD_FOLDER)\n\n # Create date strings for final reasoning output file\n now = datetime.now()\n now_str1 = \"#Execution date {} \\r\\n\".format(now.strftime(\"%Y-%m-%d %H:%M\"))\n now_str2 = now.strftime(\"reasoning_%Y%m%d%H%M\")\n\n # Reason with the EYE reasoner\n process = subprocess.run(\n [\"/opt/eye/bin/eye.sh\",\n \"--nope\"]\n + data_input\n + rule_input\n + query_input,\n universal_newlines=True, \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)\n\n # If run successfully\n if process.returncode == 0:\n # Leave uploads directory\n os.chdir(\"..\")\n # Delete upload folder and files\n shutil.rmtree(UPLOAD_FOLDER)\n\n # Return output\n response = make_response(now_str1 + process.stdout)\n response.headers[\"Content-type\"] = \"text/turtle\"\n response.headers[\"Content-Disposition\"] = \"inline; filename={}.ttl\".format(now_str2)\n\n # Return results as turtle file\n return response\n\n else:\n # Leave uploads directory\n os.chdir(\"..\")\n # Delete upload folder and files\n shutil.rmtree(UPLOAD_FOLDER)\n\n # return output\n if kwargs[\"gui\"] == True: \n return render_template(kwargs[\"template\"], output=process.stderr)\n else: \n return jsonify(process.stderr)\n\n\n@app.route(\"/\", methods=[\"POST\",\"GET\"])\ndef reasoningtask():\n\n # POST\n if request.method == \"POST\":\n \n gui = False\n\n if \"gui\" in request.args:\n\n gui = True\n\n if request.args.get(\"gui\") == \"form\":\n TEMPLATE = FORM_TEMPLATE\n\n if request.args.get(\"gui\") == \"json\":\n TEMPLATE = JSON_TEMPLATE\n \n if not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)\n\n #######################################\n ########### if POST is JSON ###########\n #######################################\n if request.is_json:\n\n # Take the posted json data\n req = request.get_json()\n\n # List for error messages\n check_parts = []\n\n # You might want to check the the JSON structure!\n\n # Check if there are data files or URLs\n if not req[\"data\"][\"files\"] and not req[\"data\"][\"urls\"]: \n check_parts.append(\"No data files or URLs posted\")\n else:\n data_files = req[\"data\"][\"files\"]\n data_urls = req[\"data\"][\"urls\"]\n\n # Check if there are rule files or URLs\n if not req[\"rules\"][\"files\"] and not req[\"rules\"][\"urls\"]: \n check_parts.append(\"No rule files or URLs posted\")\n else:\n rule_files = req[\"rules\"][\"files\"]\n rule_urls = req[\"rules\"][\"urls\"]\n\n # Query files are optional\n query_files = req[\"queries\"][\"files\"]\n query_urls = req[\"queries\"][\"urls\"]\n\n\n # If errors, return message(s)\n if check_parts:\n if gui == True:\n return \"\\n\".join(check_parts)\n else: \n return jsonify(\"EYE Reasoner: \" + \" | \".join(check_parts))\n\n # Containers for input files\n data_input = []\n rule_input = []\n query_input = []\n\n # Containers for file and url validation\n check_names = []\n check_urls = []\n\n # Handle data files if present\n if data_files:\n create_input_files(data_files, data_input, check_names, \"Data files\")\n\n # Handle data urls if present\n if data_urls:\n create_input_urls(data_urls, data_input, check_urls, \"Data URLs\")\n\n # Handle rule files if present\n if rule_files:\n create_input_files(rule_files, rule_input, check_names, \"Rule files\")\n\n # Handle rule urls if present\n if rule_urls:\n create_input_urls(rule_urls, rule_input, check_urls, \"Rule URLs\")\n\n # Handle query files if present\n if query_files:\n create_input_files(query_files, query_input, check_names, \"Query files\")\n\n # Handle query urls if present\n if query_urls:\n create_input_urls(query_urls, query_input, check_urls, \"Query URLs\")\n\n # Exit if there are forbidden files\n if check_names:\n shutil.rmtree(UPLOAD_FOLDER)\n if gui == True: \n return \"\\n\".join(check_names)\n else: \n return jsonify(\"EYE Reasoner: \" + \" | \".join(check_names))\n\n # Exit if there are invalid URLs\n if check_urls:\n shutil.rmtree(UPLOAD_FOLDER)\n if gui == True: \n return \"\\n\".join(check_urls)\n else: \n return jsonify(\"EYE Reasoner: \" + \" | \".join(check_urls))\n\n if query_input:\n # Add the needed parameter for the reasoner\n query_input.insert(0, \"--query\")\n\n #######################################\n ######### if POST is FileList #########\n #######################################\n else: \n\n check_parts = []\n # Fact parts\n if \"upl_data\" not in request.files:\n check_parts.append(\"No data files form part\")\n\n if \"data_list\" not in request.form:\n check_parts.append(\"No data urls form part\")\n\n # Rule parts\n if \"upl_rules\" not in request.files:\n check_parts.append(\"No rule files form part\")\n\n if \"rule_list\" not in request.form:\n check_parts.append(\"No rule urls form part\")\n\n # Query files are optional\n\n if check_parts:\n # There are no data or rule parts\n if gui == True: \n return render_template(FORM_TEMPLATE, output=\"\\n\".join(check_parts))\n else: \n return \"EYE Reasoner: \" + \" | \".join(check_parts)\n\n # Get the file lists\n data_files = request.files.getlist(\"upl_data\")\n rule_files = request.files.getlist(\"upl_rules\")\n query_files = request.files.getlist(\"upl_queries\")\n\n # Get the url lists. Split by line (i.e. \\r\\n) and remove any empty lines (i.e. \"\")\n data_urls = (request.form[\"data_list\"]).split(\"\\r\\n\")\n data_urls = list(filter((\"\").__ne__, data_urls))\n\n rule_urls = (request.form[\"rule_list\"]).split(\"\\r\\n\")\n rule_urls = list(filter((\"\").__ne__, rule_urls))\n\n query_urls = (request.form[\"query_list\"]).split(\"\\r\\n\")\n query_urls = list(filter((\"\").__ne__, query_urls))\n\n check_files = []\n \n if not data_files[0] and not data_urls:\n # Fact files or urls are missing\n check_files.append(\"No data files selected or URLs listed\")\n\n if not rule_files[0] and not rule_urls:\n # Rule files or urls are missing\n check_files.append(\"No rule files selected or URLs listed\")\n\n # Query files are optional\n\n if check_files:\n # Fact and/or rule files and/or urls are missing\n if gui == True: \n return render_template(FORM_TEMPLATE, output=\"\\n\".join(check_files))\n else: \n return \"EYE Reasoner: \" + \" | \".join(check_files)\n else:\n # Facts and rules are present\n check_names = []\n check_urls = []\n\n data_input = []\n rule_input = []\n query_input = []\n\n # Handle data files if present\n if data_files[0]:\n take_input_files(data_files, data_input, check_names, \"Data files\") \n\n # Handle data urls if present\n if data_urls:\n create_input_urls(data_urls, data_input, check_urls, \"Data URLs\")\n\n # Handle rule files if present\n if rule_files[0]: \n take_input_files(rule_files, rule_input, check_names, \"Rule files\")\n\n # Handle rule urls if present\n if rule_urls:\n create_input_urls(rule_urls, rule_input, check_urls, \"Rule URLs\")\n\n # Handle query files if present\n if query_files[0]:\n take_input_files(query_files, query_input, check_names, \"Query files\")\n\n # Handle query urls if present\n if query_urls:\n create_input_urls(query_urls, query_input, check_urls, \"Query URLs\")\n\n # Exit if there are forbidden files\n if check_names:\n if gui == True: \n return render_template(FORM_TEMPLATE, output=\"\\n\".join(check_names))\n else: \n return \"EYE Reasoner: \" + \" | \".join(check_names)\n\n # Exit if there are invalid URLs\n if check_urls:\n if gui == True: \n return render_template(FORM_TEMPLATE, output=\"\\n\".join(check_urls))\n else:\n return \"EYE Reasoner: \" + \" | \".join(check_names)\n\n if query_input:\n # Add the needed parameter for the reasoner\n query_input.insert(0, \"--query\")\n\n ###################\n # START REASONING #\n ###################\n if gui:\n reasoning = reason(data_input, rule_input, query_input, gui=gui, template=TEMPLATE)\n else:\n reasoning = reason(data_input, rule_input, query_input)\n\n return reasoning\n\n # GET\n else:\n gui_type = request.args.get(\"gui\", default = \"form\", type = str)\n if gui_type == \"json\":\n return render_template(JSON_TEMPLATE)\n else: \n return render_template(FORM_TEMPLATE)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=50001, debug=True)\n","sub_path":"reasoning-task.py","file_name":"reasoning-task.py","file_ext":"py","file_size_in_byte":13062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"524313590","text":"#coding:utf-8\r\nimport cv2\r\nimport numpy as np\r\n\r\n# 加载视频\r\ncap = cv2.VideoCapture('shipin01.mp4')\r\n\r\n# 调用熟悉的人脸分类器 识别特征类型\r\n# 人脸 - haarcascade_frontalface_default.xml\r\n# 人眼 - haarcascade_eye.xm\r\n# 微笑 - haarcascade_smile.xml\r\nface_detect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\nwhile True:\r\n # 读取视频片段\r\n flag, frame = cap.read()\r\n if flag == False:\r\n break\r\n\r\n # 灰度处理\r\n gray = cv2.cvtColor(frame, code=cv2.COLOR_BGR2GRAY)\r\n\r\n # 检查人脸 按照1.1倍放到 周围最小像素为5\r\n face_zone = face_detect.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 4)\r\n\r\n # 绘制矩形和圆形检测人脸\r\n for x, y, w, h in face_zone:\r\n cv2.rectangle(frame, pt1 = (x, y), pt2 = (x+w, y+h), color = [0,0,255], thickness=2)\r\n cv2.circle(frame, center = (x + w//2, y + h//2), radius = w//2, color = [0,255,0], thickness = 2)\r\n\r\n # 显示图片\r\n cv2.imshow('video', frame)\r\n \r\n # 设置退出键和展示频率\r\n if ord('q') == cv2.waitKey(40):\r\n break\r\n\r\n# 释放资源\r\ncv2.destroyAllWindows()\r\ncap.release()\r\n\r\n\r\n","sub_path":"blog28-opencv-face/blog28-face-video-05.py","file_name":"blog28-face-video-05.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"498104409","text":"#!/usr/bin/env python3\n\n# Copyright (C) 2017-2020 The btclib developers\n#\n# This file is part of btclib. It is subject to the license terms in the\n# LICENSE file found in the top-level directory of this distribution.\n#\n# No part of btclib including this file, may be copied, modified, propagated,\n# or distributed except according to the terms contained in the LICENSE file.\n\n\"\"\"Network constants and associated functions.\"\"\"\n\nfrom typing import List\n\nfrom .alias import String\nfrom .curve import Curve\nfrom .curves import secp256k1\n\n_NETWORKS = ['mainnet', 'testnet', 'regtest']\n\n_CURVES = [secp256k1, secp256k1, secp256k1]\n\n_WIF_PREFIXES = [\n b'\\x80', # WIF starts with {K,L} (if compressed) or 5 (if uncompressed)\n b'\\xef', # WIF starts with c (if compressed) or 9 (if uncompressed)\n b'\\xef', # WIF starts with c (if compressed) or 9 (if uncompressed)\n]\n\n_P2PKH_PREFIXES = [\n b'\\x00', # address starts with 1\n b'\\x6f', # address starts with {m, n}\n b'\\x6f' # address starts with {m, n}\n]\n_P2SH_PREFIXES = [\n b'\\x05', # address starts with 3\n b'\\xc4', # address starts with 2\n b'\\xc4', # address starts with 2\n]\n_P2W_PREFIXES = ['bc', 'tb', 'bcrt']\n\n\n# VERSION BYTES (4 bytes)\n#\n# Bitcoin core uses the m/0h (core) BIP32 derivation path\n# with xprv/xpub and tprv/tpub Base58 encoding\n\n# m/44h/0h p2pkh or p2sh\nMAIN_xprv = b'\\x04\\x88\\xAD\\xE4'\nMAIN_xpub = b'\\x04\\x88\\xB2\\x1E'\n# m/44h/1h p2pkh or p2sh\nTEST_tprv = b'\\x04\\x35\\x83\\x94'\nTEST_tpub = b'\\x04\\x35\\x87\\xCF'\n\n# m/49h/0h p2wpkh-p2sh (p2sh-wrapped-segwit)\nMAIN_yprv = b'\\x04\\x9D\\x78\\x78'\nMAIN_ypub = b'\\x04\\x9D\\x7C\\xB2'\n# m/49h/1h p2wpkh-p2sh (p2sh-wrapped-segwit)\nTEST_uprv = b'\\x04\\x4A\\x4E\\x28'\nTEST_upub = b'\\x04\\x4A\\x52\\x62'\n\n# --- p2wsh-p2sh (p2sh-wrapped-segwit)\nMAIN_Yprv = b'\\x02\\x95\\xB0\\x05'\nMAIN_Ypub = b'\\x02\\x95\\xB4\\x3F'\nTEST_Uprv = b'\\x02\\x42\\x85\\xB5'\nTEST_Upub = b'\\x02\\x42\\x89\\xEF'\n\n# m/84h/0h p2wpkh (native-segwit)\nMAIN_zprv = b'\\x04\\xB2\\x43\\x0C'\nMAIN_zpub = b'\\x04\\xB2\\x47\\x46'\n# m/84h/1h p2wpkh (native-segwit)\nTEST_vprv = b'\\x04\\x5F\\x18\\xBC'\nTEST_vpub = b'\\x04\\x5F\\x1C\\xF6'\n\n# --- p2wsh (native-segwit)\nMAIN_Zprv = b'\\x02\\xAA\\x7A\\x99'\nMAIN_Zpub = b'\\x02\\xAA\\x7E\\xD3'\nTEST_Vprv = b'\\x02\\x57\\x50\\x48'\nTEST_Vpub = b'\\x02\\x57\\x54\\x83'\n\n# p2pkh or p2sh\n_XPRV_PREFIXES = [MAIN_xprv, TEST_tprv, TEST_tprv]\n_XPUB_PREFIXES = [MAIN_xpub, TEST_tpub, TEST_tpub]\n\n# FIXME: these are not used/tested\n# p2wpkh p2sh-wrapped-segwit\n_P2WPKH_P2SH_PRV_PREFIXES = [MAIN_yprv, TEST_uprv, TEST_uprv]\n_P2WPKH_P2SH_PUB_PREFIXES = [MAIN_ypub, TEST_upub, TEST_upub]\n\n# FIXME: these are not used/tested\n# p2wsh p2sh-wrapped-segwit\n_P2WSH_P2SH_PRV_PREFIXES = [MAIN_Yprv, TEST_Uprv, TEST_Uprv]\n_P2WSH_P2SH_PUB_PREFIXES = [MAIN_Ypub, TEST_Upub, TEST_Upub]\n\n# p2wpkh native-segwit\n_P2WPKH_PRV_PREFIXES = [MAIN_zprv, TEST_vprv, TEST_vprv]\n_P2WPKH_PUB_PREFIXES = [MAIN_zpub, TEST_vpub, TEST_vpub]\n\n# FIXME: these are not used/tested\n# p2wsh native-segwit\n_P2WSH_PRV_PREFIXES = [MAIN_Zprv, TEST_Vprv, TEST_Vprv]\n_P2WSH_PUB_PREFIXES = [MAIN_Zpub, TEST_Vpub, TEST_Vpub]\n\n\n_XPRV_VERSIONS_MAIN = [MAIN_xprv, MAIN_yprv, MAIN_zprv, MAIN_Yprv, MAIN_Zprv]\n_XPRV_VERSIONS_TEST = [TEST_tprv, TEST_uprv, TEST_vprv, TEST_Uprv, TEST_Vprv]\n_XPUB_VERSIONS_MAIN = [MAIN_xpub, MAIN_ypub, MAIN_zpub, MAIN_Ypub, MAIN_Zpub]\n_XPUB_VERSIONS_TEST = [TEST_tpub, TEST_upub, TEST_vpub, TEST_Upub, TEST_Vpub]\n\n_XPRV_VERSIONS = [_XPRV_VERSIONS_MAIN,\n _XPRV_VERSIONS_TEST, _XPRV_VERSIONS_TEST]\n_XPUB_VERSIONS = [_XPUB_VERSIONS_MAIN,\n _XPUB_VERSIONS_TEST, _XPUB_VERSIONS_TEST]\n\n# it provides false match for regtest\n# not a problem as long as it is used for WIF/Base58Address/BIP32xkey\n# where the two network share same prefixes.\n_REPEATED_NETWORKS = [\n 'mainnet', 'mainnet', 'mainnet', 'mainnet', 'mainnet',\n 'testnet', 'testnet', 'testnet', 'testnet', 'testnet',\n 'regtest', 'regtest', 'regtest', 'regtest', 'regtest']\n_XPRV_VERSIONS_ALL = _XPRV_VERSIONS_MAIN + \\\n _XPRV_VERSIONS_TEST + _XPRV_VERSIONS_TEST\n_XPUB_VERSIONS_ALL = _XPUB_VERSIONS_MAIN + \\\n _XPUB_VERSIONS_TEST + _XPUB_VERSIONS_TEST\n\n\ndef curve_from_network(network: str) -> Curve:\n index = _NETWORKS.index(network)\n return _CURVES[index]\n\n\ndef curve_from_xpubversion(xpubversion: bytes) -> Curve:\n index = _XPUB_VERSIONS_ALL.index(xpubversion)\n return _CURVES[index]\n\n\ndef _xpub_versions_from_network(network: str) -> List[bytes]:\n index = _NETWORKS.index(network)\n return _XPUB_VERSIONS[index]\n\n\ndef wif_prefix_from_network(network: str) -> bytes:\n index = _NETWORKS.index(network)\n return _WIF_PREFIXES[index]\n\n\ndef p2pkh_prefix_from_network(network: str) -> bytes:\n network_index = _NETWORKS.index(network)\n return _P2PKH_PREFIXES[network_index]\n\n\ndef p2sh_prefix_from_network(network: str) -> bytes:\n index = _NETWORKS.index(network)\n return _P2SH_PREFIXES[index]\n\n\ndef p2w_prefix_from_network(network: str) -> str:\n index = _NETWORKS.index(network)\n return _P2W_PREFIXES[index]\n\n\ndef has_segwit_prefix(addr: String) -> bool:\n\n if isinstance(addr, str):\n str_addr = addr.strip()\n str_addr = str_addr.lower()\n else:\n str_addr = addr.decode('ascii')\n\n for prefix in _P2W_PREFIXES:\n if str_addr.startswith(prefix + '1'):\n return True\n\n return False\n\n\ndef network_from_wif_prefix(prefix: bytes) -> str:\n \"\"\"Return network string from WIF prefix.\n\n Warning: when used on 'regtest' it returns 'testnet', which is not\n a problem as long as it is used for WIF/Base58Address/BIP32xkey\n where the two network share same prefixes.\n \"\"\"\n index = _WIF_PREFIXES.index(prefix)\n return _NETWORKS[index]\n\n\ndef network_from_p2pkh_prefix(prefix: bytes) -> str:\n \"\"\"Return network string from p2pkh prefix.\n\n Warning: when used on 'regtest' it returns 'testnet', which is not\n a problem as long as it is used for WIF/Base58Address/BIP32xkey\n where the two network share same prefixes.\n \"\"\"\n index = _P2PKH_PREFIXES.index(prefix)\n return _NETWORKS[index]\n\n\ndef network_from_p2sh_prefix(prefix: bytes) -> str:\n \"\"\"Return network string from p2sh prefix.\n\n Warning: when used on 'regtest' it returns 'testnet', which is not\n a problem as long as it is used for WIF/Base58Address/BIP32xkey\n where the two network share same prefixes.\n \"\"\"\n index = _P2SH_PREFIXES.index(prefix)\n return _NETWORKS[index]\n\n\ndef network_from_xprv(xprvversion: bytes) -> str:\n \"\"\"Return network string from xprv prefix.\n\n Warning: when used on 'regtest' it returns 'testnet', which is not\n a problem as long as it is used for WIF/Base58Address/BIP32xkey\n where the two network share same prefixes.\n \"\"\"\n index = _XPRV_VERSIONS_ALL.index(xprvversion)\n return _REPEATED_NETWORKS[index]\n\n\ndef network_from_xpub(xpubversion: bytes) -> str:\n \"\"\"Return network string from xpub prefix.\n\n Warning: when used on 'regtest' it returns 'testnet', which is not\n a problem as long as it is used for WIF/Base58Address/BIP32xkey\n where the two network share same prefixes.\n \"\"\"\n index = _XPUB_VERSIONS_ALL.index(xpubversion)\n return _REPEATED_NETWORKS[index]\n\n\ndef network_from_p2w_prefix(prefix: str) -> str:\n \"Return network string from p2w prefix.\"\n index = _P2W_PREFIXES.index(prefix)\n return _NETWORKS[index]\n","sub_path":"btclib/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":7344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"538661847","text":"import discord\nimport discord.ext.commands as commands\n\nimport random\n\nclass Decide(commands.Cog):\n def __init__(self, bot, timeouts, generic_responses):\n self.bot = bot\n self.timeouts = timeouts\n self.generic_responses = generic_responses\n\n @commands.command()\n async def decide(self, ctx, *choices):\n choices = \" \".join(choices)\n choices = choices.split(\"|\")\n\n if len(choices) == 1:\n try:\n choices = int(choices[0])\n except:\n await ctx.send(\"brudi du musst die optionen mit | trennen\\nalso so jer!decide flexis|janos\\noder gib ne zahl an dann geht auch\")\n return\n\n choice = str(random.randint(1, choices))\n\n else:\n choice = random.choice(choices)\n\n await ctx.send(\"jrery bot sagt **\" + choice.replace(\"**\", \"\\\\*\\\\*\") + \"**\")\n\n","sub_path":"commands/decide.py","file_name":"decide.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"457938630","text":"#!/usr/bin/env python\nimport csv\nimport igraph\nimport os\nimport textwrap\nfrom Bio import pairwise2\nfrom Bio.SubsMat import MatrixInfo as matrizes\nfrom matplotlib import pyplot\n\ngrafos_dir = \"./grafos\"\n\nselvagem = \"\".join([\n \"MARTFFVGGNFKLNGSKQSIKEIVERLNTASIPENVEVVICPPATYLDYSVSLV\",\n \"KKPQVTVGAQNAYLKASGAFTGENSVDQIKDVGAKWVILGHSERRSYFHED\",\n \"DKFIADKTKFALGQGVGVILCIGETLEEKKAGKTLDVVERQLNAVLEEVKDW\",\n \"TNVVVAYEPVWAIGTGLAATPEDAQDIHASIRKFLASKLGDKAASELRILYGG\",\n \"SANGSNAVTFKDKADVDGFLVGGASLKPEFVDIINSRN\"\n])\n\nmutada = \"\".join([\n \"MARTPFVGGNWKMNGTKAEAKELVEALKAKLPDDVEVVVAPPAVYLDTAREAL\",\n \"KGSKIKVAAQNCYKEAKGAFTGEISPEMLKDLGADYVILGHSERRHYFGETDELV\",\n \"AKKVAHALEHGLKVIACIGETLEEREAGKTEEVVFRQTKALLAGLGDEWKNVVIA\",\n \"YEPVWAIGTGKTATPEQAQEVHAFIRKWLAENVSAEVAESVRILYGGSVKPANAK\",\n \"ELAAQPDIDGFLVGGASLKPEFLDIINSRN\"\n])\n\nfasta_map = {\n \"A\": \"ALA\",\n #\"B\": \"\",\n \"C\": \"CYS\",\n \"D\": \"ASP\",\n \"E\": \"GLU\",\n \"F\": \"PHE\",\n \"G\": \"GLY\",\n \"H\": \"HIS\",\n \"I\": \"ILE\",\n #\"J\": \"\",\n \"K\": \"LYS\",\n \"L\": \"LEU\",\n \"M\": \"MET\",\n \"N\": \"ASN\",\n #\"O\": \"\",\n \"P\": \"PRO\",\n \"Q\": \"GLN\",\n \"R\": \"ARG\",\n \"S\": \"SER\",\n \"T\": \"THR\",\n #\"U\": \"\",\n \"V\": \"VAL\",\n \"W\": \"TRP\",\n #\"X\": \"\",\n \"Y\": \"TYR\",\n #\"Z\": \"\"\n}\n\n\ndef mutacoes():\n \"\"\"\n Retorna uma lista de mutacoes entre as duas proteinas acima no formato:\n [\n (X, Y),\n (X, Y),\n .\n .\n .\n ]\n\n Onde X eh o indice do aminoacido modificado e Y eh a abreviatura PDB\n \"\"\"\n\n print(\"Calculando mutacoes entre dTIM e 2YPI\")\n\n # Configuracoes do alinhamento\n matriz = matrizes.pam250\n gap_novo = -10\n gap_continuo = -0.5\n\n # Alinhamento global usando algoritmo pairwise2\n alinhamento = pairwise2.align.globalds(selvagem, mutada, matriz, gap_novo, gap_continuo)\n\n print(\"Alinhamento obtido:\")\n\n\n seqa = textwrap.wrap(alinhamento[0][0], 80, break_on_hyphens=False)\n seqb = textwrap.wrap(alinhamento[0][1], 80, break_on_hyphens=False)\n ret = \"\"\n for linea, lineb in zip(seqa, seqb):\n ret += \"A: {}\\n\".format(linea)\n ret += \"B: {}\\n\".format(lineb)\n\n print(ret)\n\n # Retornamos todos os residuos da 2YPI que sao diferentes na dTIM, junto com seu indice\n i = 1\n for residuo_selvagem, residuo_mutado in zip(alinhamento[0][0], alinhamento[0][1]):\n\n if residuo_selvagem != residuo_mutado and residuo_selvagem != \"-\":\n yield (i, fasta_map[residuo_selvagem])\n\n i += 1\n\n\n\ndef estatisticas(mutacoes):\n\n grafos = []\n\n # Convertendo as mutacoes para o formato dos ID's do grafo\n mutacoes_ids = [(r[0], \"{res}({id})\".format(res=r[1], id=r[0])) for r in mutacoes]\n\n print(\"Foram encontradas {i} mutacoes\".format(i=len(mutacoes_ids)))\n\n print(\"Carregando grafos\")\n\n # Lemos todos os grafos gerados e armazenamos em lista\n for arquivo in [ f for f in os.listdir(grafos_dir) if f.endswith(\".net\") ]:\n\n grafo = igraph.Graph.Read_Pajek(grafos_dir+\"/\"+arquivo)\n \n # Se der erro reportamos e seguimos em frente \n if len(grafo.vs) is 0:\n print(\"ERRO carregando grafo {file}\".format(file=arquivo))\n continue \n\n # Se carregou o grafo corretamente prosseguimos com a analise\n else:\n grafos.append(grafo)\n \n for grafo in grafos:\n\n #print(grafo.summary())\n\n closeness = grafo.closeness()\n betweenness = grafo.betweenness()\n\n # Para cada mutacao procuramos no grafo se o vertice daquela posicao corresponde \n # ao mesmo residuo\n for mutacao in mutacoes_ids:\n\n # Caso os residuos coincidam:\n #\n # Obs.: o -1 eh necessario porque os vertices sao indexados a partir de 0\n #\n try:\n if grafo.vs[mutacao[0]-1][\"id\"] == mutacao[1]:\n\n print(\"Encontrada mutacao coincidente {mut} X {gra}\".format(\n mut=mutacao[1], \n gra=grafo.vs[mutacao[0]-1][\"id\"])\n )\n\n # Vamos calcular suas propriedades e retorna-lo\n yield (\n grafo.vs[mutacao[0]-1][\"id\"],\n grafo.degree(mutacao[0]-1),\n closeness[mutacao[0]-1],\n betweenness[mutacao[0]-1]\n )\n except:\n # se o grafo tiver menos vertices, que estamos tentando indexar, pula\n continue\n\ndef acumula_resultados(stats):\n \"\"\"\n Cria um dicionario de resultados no formato\n {\n \"XXX(YY) : [\n (G1, CL1, BT1),\n (G2, CL2, BT2),\n ...\n ]\n \"XXX(YY) : [\n ...\n ]\n ...\n }\n Onde XXX(YY) eh o id do nodo ex: PHY[12] e GN, CLN, BTN sao os valores para grau, closeness\n e betweenness calculados para cada vez que esse residuo aparece na familia\n \"\"\"\n residuos = {}\n\n for stat in stats:\n\n # Se nosso dicionario ainda nao tem uma key para este residuo, criamos a key como lista\n if stat[0] not in residuos:\n residuos[stat[0]] = list()\n residuos[stat[0]].append(stat[1:])\n\n # Se ja existe uma key, appendamos as estatisticas na lista de ocorrencias\n else:\n residuos[stat[0]].append(stat[1:])\n\n return residuos\n\n\ndef calcula_medias(resultados):\n\n medias = {}\n\n for residuo in resultados:\n\n num_resultados = len(resultados[residuo])\n\n # Se so tem uma tupla de resultados, nao precisa calcular media\n if num_resultados == 1:\n medias[residuo] = resultados[residuo][0]\n\n else:\n medias[residuo] = (\n float(sum([ grau[0] for grau in resultados[residuo] ])) / float(num_resultados),\n float(sum([ closeness[1] for closeness in resultados[residuo] ])) / float(num_resultados),\n float(sum([ betweenness[2] for betweenness in resultados[residuo] ])) / float(num_resultados),\n )\n\n return medias\n\ndef plot(medias):\n\n print(\"Ao todo, {i} residuos foram selecionados para analise\".format(i=len(medias.keys())))\n\n try:\n grau_file = open(\"resultados_data_grau.csv\", \"wb\")\n bt_file = open(\"resultados_data_btns.csv\", \"wb\")\n cl_file = open(\"resultados_data_clns.csv\", \"wb\")\n\n grau_writer = csv.writer(grau_file)\n bt_writer = csv.writer(bt_file)\n cl_writer = csv.writer(cl_file)\n\n except:\n raise\n\n for residuo in medias.keys():\n\n grau_writer.writerow([residuo, medias[residuo][0]])\n bt_writer.writerow([residuo, medias[residuo][2]])\n cl_writer.writerow([residuo, medias[residuo][1]])\n\ndef main():\n \n muts = mutacoes()\n stats = estatisticas(muts)\n resultados = acumula_resultados(stats)\n medias = calcula_medias(resultados)\n\n # for media in medias:\n # print(\"{:8s}: GR: {:6.6f}, CL: {:6.6f}, BT: {:6.6f}\".format(\n # media, \n # medias[media][0],\n # medias[media][1],\n # medias[media][2]\n # ))\n\n plot(medias)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bioinfo/TP3/tp3_analisa_familia.py","file_name":"tp3_analisa_familia.py","file_ext":"py","file_size_in_byte":7156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"366966574","text":"\"\"\"\nAttentive GRU Reader Model\n---------------------------\n\nAt a high level, this model reads both the story and the question forwards and backwards, and represents the document as a weighted sum of its token where each individual token weight is decided by an attention mechanism that reads the question.\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.models import Model\nfrom keras.engine import Input, Merge, merge\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.core import Activation, Dense, Dropout, RepeatVector, Lambda\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.layers import GRU\nmodule_home = os.environ['NEURAL_PATH']\nsys.path.insert(0, module_home)\nfrom custom import Reverse, masked_concat, masked_dot, masked_sum\n\n### MODEL\n\ndef get_model(\n data_path, #Path to dataset\n hid_dim, #Dimension of the hidden GRU layers\n optimizer='rmsprop', #Optimization function to be used\n loss='categorical_crossentropy' #Loss function to be used\n ):\n\n metadata_dict = {}\n f = open(os.path.join(data_path, 'metadata', 'metadata.txt'), 'r')\n for line in f:\n entry = line.split(':')\n metadata_dict[entry[0]] = int(entry[1])\n f.close()\n story_maxlen = metadata_dict['input_length']\n query_maxlen = metadata_dict['query_length']\n vocab_size = metadata_dict['vocab_size']\n entity_dim = metadata_dict['entity_dim']\n\n embed_weights = np.load(os.path.join(data_path, 'metadata', 'weights.npy'))\n word_dim = embed_weights.shape[1]\n\n########## MODEL ############\n\n story_input = Input(shape=(story_maxlen,), dtype='int32', name=\"StoryInput\")\n\n x = Embedding(input_dim=vocab_size+2,\n output_dim=word_dim,\n input_length=story_maxlen,\n mask_zero=True,\n weights=[embed_weights])(story_input)\n\n query_input = Input(shape=(query_maxlen,), dtype='int32', name='QueryInput')\n\n x_q = Embedding(input_dim=vocab_size+2,\n output_dim=word_dim,\n input_length=query_maxlen,\n mask_zero=True,\n weights=[embed_weights])(query_input)\n\n concat_embeddings = masked_concat([x_q, x], concat_axis=1)\n\n lstm = GRU(hid_dim, consume_less='gpu')(concat_embeddings)\n\n reverse_lstm = GRU(hid_dim, consume_less='gpu', go_backwards=True)(concat_embeddings)\n\n merged = merge([lstm, reverse_lstm], mode='concat')\n\n result = Dense(entity_dim, activation='softmax')(merged)\n\n model = Model(input=[story_input, query_input], output=result)\n model.compile(optimizer=optimizer,\n loss=loss,\n metrics=['accuracy'])\n print(model.summary())\n return model\n","sub_path":"models/simple_gru_model.py","file_name":"simple_gru_model.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"223779354","text":"from datetime import datetime\nfrom .classes import Poll, Schedule\nfrom .driver import Driver\nfrom .file import File, Google_File, Google_Folder\nfrom .settings import Settings\nfrom .user import User\nfrom .validators import NumberValidator, TimeValidator, DateValidator, DurationValidator, ExpirationValidator, ListValidator\nimport PyInquirer\nfrom PyInquirer import Validator, ValidationError\n\nclass Message():\n def __init__(self):\n self.text = None\n self.files = []\n ##\n self.keywords = []\n self.tags = []\n self.performers = []\n ## messages\n self.price = None\n self.recipients = [] # users to send to\n self.users = [] # prepared recipients\n ## posts\n self.expiration = None\n self.poll = None\n self.schedule = None\n ##\n self.gotten = False\n\n ###########################################################################\n\n def backup_files(self):\n for file in self.files:\n file.backup()\n\n def delete_files(self):\n for file in self.files:\n file.delete()\n\n def cleanup_files(self):\n self.backup_files()\n self.delete_files()\n\n @staticmethod\n def format_keywords(keywords):\n if len(keywords) > 0: return \" #{}\".format(\" #\".join(keywords))\n return \"\"\n\n @staticmethod\n def format_performers(performers):\n if len(performers) > 0: return \" w/ @{}\".format(\" @\".join(performers))\n return \"\"\n \n @staticmethod\n def format_tags(tags):\n if len(tags) > 0: return \" @{}\".format(\" @\".join(tags))\n return \"\"\n\n def format_text(self):\n return \"{}{}{}{}\".format(self.text, Message.format_performers(self.performers), Message.format_tags(self.tags),\n Message.format_keywords(self.keywords)).strip()\n\n def get_keywords(self):\n # if self.keywords: return self.keywords\n if len(self.keywords) > 0: return self.keywords\n keywords = Settings.get_keywords() or []\n if len(keywords) > 0: return keywords\n if not Settings.prompt(\"keywords\"): return []\n question = {\n 'type': 'input',\n 'name': 'keywords',\n 'message': 'Keywords:',\n 'validate': ListValidator\n }\n answers = PyInquirer.prompt(question)\n keywords = answers[\"keywords\"]\n keywords = keywords.split(\",\")\n keywords = [n.strip() for n in keywords]\n if not Settings.confirm(keywords): return self.get_keywords()\n self.keywords = keywords\n return self.keywords\n\n def get_performers(self):\n # if self.performers: return self.performers\n if len(self.performers) > 0: return self.performers\n performers = Settings.get_tags() or []\n if len(performers) > 0: return performers\n if not Settings.prompt(\"performers\"): return []\n question = {\n 'type': 'input',\n 'name': 'performers',\n 'message': 'Performers:',\n 'validate': ListValidator\n }\n answers = PyInquirer.prompt(question)\n performers = answers[\"performers\"]\n performers = performers.split(\",\")\n performers = [n.strip() for n in performers]\n if not Settings.confirm(performers): return self.get_performers()\n self.performers = performers\n return self.performers\n\n def get_tags(self):\n # if self.tags: return self.tags\n if len(self.tags) > 0: return self.tags\n tags = Settings.get_tags() or []\n if len(tags) > 0: return tags\n if not Settings.prompt(\"tags\"): return []\n question = {\n 'type': 'input',\n 'name': 'tags',\n 'message': 'Tags:',\n 'validate': ListValidator\n }\n answers = PyInquirer.prompt(question)\n tags = answers[\"tags\"]\n tags = tags.split(\",\")\n tags = [n.strip() for n in tags]\n if not Settings.confirm(tags): return self.get_tags()\n self.tags = tags\n return self.tags\n\n # ensures File references exist and are downloaded\n # files are File references\n # file references can be GoogleId references which need to download their source\n # files exist when checked for size\n # ?\n def get_files(self):\n if len(self.files) > 0: return self.files\n files = []\n if len(self.files) == 0 and len(Settings.get_input()) > 0:\n files.append(Settings.get_input_as_files())\n # elif len(self.files) == 0 and len(Google_File.get_files()) > 0:\n # files = Google_File.select_files()\n elif len(self.files) == 0:\n files = File.select_file_upload_method()\n filed = []\n for file in files:\n if isinstance(file, Google_Folder): filed.extend(file.get_files())\n else: filed.append(file)\n self.files = filed\n return self.files\n\n def get_expiration(self):\n if self.expiration: return self.expiration\n expires = Settings.get_expiration() or None\n if expires: return expires\n if not Settings.prompt(\"expiration\"): return None\n question = {\n 'type': 'input',\n 'name': 'expiration',\n 'message': 'Expiration [1, 3, 7, 99 (\\'No Limit\\')]',\n 'validate': ExpirationValidator\n }\n answers = PyInquirer.prompt(question)\n expiration = answers[\"expiration\"]\n if not Settings.confirm(expiration): return self.get_expiration()\n self.expiration = expiration\n return self.expiration\n\n def get_poll(self):\n if self.poll and self.poll.check(): return self.poll\n if not Settings.prompt(\"poll\"): return None\n poll = Settings.get_poll() or None\n if poll: return poll\n if not Settings.prompt(\"poll\"): return None\n poll = Poll()\n poll.get()\n if not poll.check(): return None\n self.poll = poll\n return poll\n\n def get_price(self):\n if self.price: return self.price\n price = Settings.get_price() or None\n if price: return price\n if not Settings.prompt(\"price\"): return \"\"\n question = {\n 'type': 'input',\n 'name': 'price',\n 'message': 'Price',\n 'validate': NumberValidator,\n 'filter': lambda val: int(val)\n }\n answers = PyInquirer.prompt(question)\n price = answers[\"price\"]\n if not Settings.confirm(price): return self.get_price()\n self.price = price\n return self.price\n\n # ensures listed recipients are users\n # Settings.USERS and self.recipients should be usernames\n # if includes [all, recent, favorite] & usernames it only uses the 1st found of [all,...]\n def get_recipients(self):\n if len(self.users) > 0: return self.users\n users = []\n if len(self.recipients) == 0 and len(Settings.get_users()) > 0: \n users = Settings.get_users()\n elif len(self.recipients) == 0 and Settings.get_user(): \n users = [Settings.get_user()]\n elif len(self.recipients) == 0:\n users = User.select_users()\n # users = []\n # for user in recipients:\n # if str(user.username).lower() == \"all\":\n # users = User.get_all_users()\n # break\n # elif str(user.username).lower() == \"recent\":\n # users = User.get_recent_users()\n # break\n # elif str(user.username).lower() == \"favorite\":\n # users = User.get_favorite_users()\n # break\n # else: users.append(user)\n self.users = users\n return self.users\n\n def get_schedule(self):\n if self.schedule: return self.schedule\n if not Settings.prompt(\"schedule\"): return None\n schedule = Settings.get_Schedule()\n if schedule: return schedule\n if not Settings.prompt(\"schedule\"): return None\n schedule = Schedule()\n schedule.get()\n if not schedule.check(): return None\n self.schedule = schedule\n return schedule\n \n def get_text(self):\n if self.text: return self.text\n text = Settings.get_text() or None\n if text: return text\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n answers = PyInquirer.prompt(question)\n text = answers[\"text\"]\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text\n\n def get(self):\n if self.gotten: return\n self.get_text()\n self.get_keywords()\n self.get_tags()\n self.get_price()\n self.get_poll()\n self.get_schedule()\n self.get_files()\n self.get_recipients()\n if not self.text:\n if len(self.files) > 0:\n self.text = self.files[0].get_title()\n self.gotten = True\n\n def get_post(self):\n if self.gotten: return\n self.get_text()\n self.get_keywords()\n self.get_tags()\n self.get_poll()\n self.get_schedule()\n self.get_files()\n if not self.text:\n if len(self.files) > 0:\n self.text = self.files[0].get_title()\n self.gotten = True\n\n def get_message(self):\n if self.gotten: return\n self.get_recipients()\n self.get_text()\n self.get_price()\n self.get_files()\n if not self.text:\n if len(self.files) > 0:\n self.text = self.files[0].get_title()\n self.gotten = True\n\n def post(self):\n self.get_post()\n if not Settings.prompt(\"Post\"): return\n successful = False\n try: successful = Driver.post(self)\n except Exception as e:\n Settings.dev_print(e)\n successful = False\n if successful: self.cleanup_files()\n\n # sends to recipients\n # 'post' as recipient will post message instead\n def send(self):\n self.get_message()\n if not Settings.prompt(\"Send\"): return\n successful = False\n try: \n # for user in self.get_recipients():\n for user in self.users:\n # if isinstance(user, str) and str(user) == \"post\": successful_ = Driver.post(self)\n # print(\"Messaging: {}\".format(user.username))\n if isinstance(user, User): successful_ = User.message_user(user.username, self)\n else: successful_ = User.message_user(user, self)\n if not successful_: continue\n successful_ = Driver.message(user.username)\n except Exception as e:\n Settings.dev_print(e)\n successful = False\n if successful: self.cleanup_files()","sub_path":"OnlySnarf/src/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":10859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"528924102","text":"class ActiviteDic:\n \n def __init__(self):\n self.devoirs = []\n self.autres = []\n\n def add(self, activite):\n if activite.estDevoir():\n if activite not in self.devoirs:\n self.devoirs.append(activite)\n self.devoirs.sort(key=str, reverse= True)\n else:\n if activite not in self.autres:\n self.autres.append(activite)\n self.autres.sort(key=str)\n","sub_path":"competences/datasAccess/ActiviteDic.py","file_name":"ActiviteDic.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"526165456","text":"import cv2\n\nvideo = cv2.VideoCapture(\"videos/people.mp4\")\ncascadePeople = cv2.CascadeClassifier('recog/haarcascade_pedestrian.xml')\n\nwhile True:\n _, frame = video.read()\n peopleCinza = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n peopleRecog = cascadePeople.detectMultiScale(peopleCinza, 1.8, 7)\n for (x,y,w,h) in peopleRecog:\n frame = cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)\n cv2.imshow(\"People Recognition\", frame)\n key = cv2.waitKey(60)\n if key == 27:\n break\ncv2.destroyAllWindows()","sub_path":"Reconhece Pessoas/PeopleRecog_haarcascade.py","file_name":"PeopleRecog_haarcascade.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"577853745","text":"import binascii\nimport sys\nimport os\n\n\n\n\n\n# Initilize some varibles\ndictionary = {}\nsp_dictionary = {'newline':'\\x0A\\x0A'}\ny = 0\nll = 0\nb = 0\n\n\n## Writes the uncompressed file into a new file within the same directiory and same\n## name as the source source file with a txt extension\n\ndef write_close_file():\n\n global file_block\n global file_path\n global txtexten\n global dictionary\n\n\n new_file_path = file_path[:-3] + 'txt'\n\n file = open(new_file_path, 'wb')\n file.write(file_block)\n file.close()\n dictionary = {}\n\n print ('File Written: ' + new_file_path)\n\n\n\n## Function that selects a certain search function(eg find_5_zeroes) based on\n## file length and other criteria\ndef check_start():\n global file_cont\n global cmp_key_block_start\n global cmp_key_block_length\n\n\n if len(file_cont) < 250:\n if find_5_zeros() == True:\n return()\n\n if len(file_cont) > 1200:\n find_3_zeros()\n if find_3_zeros() != True:\n find_4_zeros()\n return()\n\n\n else:\n find_4_zeros()\n return()\n\n\n\n\n## Search function that goes through small ztr files looking for the start of the\n## compression key block\n\ndef find_5_zeros():\n\n global file_cont\n global cmp_key_block_start\n global cmp_key_block_length\n global chunk\n\n try:\n for i in range(len(file_cont)):\n chunk = file_cont[i:i+8]\n\n if chunk[0:1] != '\\x00':\n\n if chunk[1:6] == '\\x00\\x00\\x00\\x00\\x00':\n\n if chunk[6:7] != '\\x00':\n cmp_key_block_start = i + 6\n\n if chunk[7:8] < '\\x21':\n cmp_key_block_length = chunk[7]\n return(True)\n\n else:\n cmp_key_block_length = 0\n return(False)\n\n except:\n return(1)\n\n\n## Search function that goes through big ztr files looking for the start of the\n## compression key block\n\ndef find_3_zeros():\n\n global cmp_key_block_start\n global cmp_key_block_length\n global file_cont\n global chunk\n\n\n for i in range(len(file_cont)):\n chunk = file_cont[i:i+6]\n\n\n if chunk[0:1] != '\\x00':\n\n if chunk[1:4] == '\\x00\\x00\\x00':\n\n\n if chunk[4:5] != '\\x00':\n if chunk[4:5] >= '\\x02':\n continue\n\n else:\n cmp_key_block_length = sum(ord(c) << (i * 8) for i, c in enumerate(str(chunk[4:6])[::-1]))\n cmp_key_block_start = i + 6\n return(True)\n\n\n\n\n## Search function that goes through medium ztr files looking for the start of the\n## compression key block\n\n\ndef find_4_zeros():\n\n global cmp_key_block_start\n global cmp_key_block_length\n global file_cont\n global chunk\n\n\n\n try:\n for i in range(len(file_cont)):\n chunk = file_cont[i:i+7]\n\n\n if chunk[0:1] != '\\x00':\n if chunk[1:5] == '\\x00\\x00\\x00\\x00':\n if chunk[5:6] != '\\x00':\n if chunk[6:7] != '\\x00':\n \n\n\n cmp_key_block_start = i + 6\n cmp_key_block_length = chunk[5]\n break\n\n except:\n return()\n\n\n\n\n\n\n# File block is the block of txt that is to be uncompressed\n# cmp_key block is the block with the compression keys in it\n\ndef make_blocks():\n\n\n global file_cont\n global cmp_key_block_start\n global cmp_key_block_length\n global cmp_key_block\n global file_block\n\n\n file_block = str(file_cont[cmp_key_block_start + cmp_key_block_length:])\n cmp_key_block = file_cont[cmp_key_block_start:cmp_key_block_start + cmp_key_block_length]\n\n\n\n\n# Loop that creates a dictionary entry for each compression key\ndef create_dictionary():\n\n global chunk\n global cmp_key_block\n global dictionary\n global ll\n global b\n\n\n\n ll = 0\n for ii in range((len(cmp_key_block) +1) // 3):\n chunk = str(cmp_key_block[ll:ll+3])\n key = chunk[0:1]\n\n\n\n value = chunk[1:3]\n dictionary[key] = value\n ll = ll + 3\n\n\n\n\n\n\n# Loop that interates through each key value and rewrites the value uncompressed\ndef rewrite_dictionary():\n\n global dictionary\n global cmp_key_block\n\n ll = 0\n\n\n for k in dictionary:\n for ii in range((len(cmp_key_block) +1) // 3):\n chunk = str(cmp_key_block[ll:ll+3])\n key = chunk[0:1]\n\n\n\n if chunk[1:3] == '\\x00\\x00':\n lft_chunk = '\\n'\n rght_chunk = '\\n'\n break\n\n\n\n if chunk[1:2] == '\\x81':\n lft_chunk = chunk[1:2]\n\n if chunk[2:3] in dictionary:\n rght_chunk = dictionary[chunk[2:3]]\n break\n\n rght_chunk = chunk[2:3]\n break\n\n\n\n if chunk[1:2] == '\\x82':\n lft_chunk = chunk[1:2]\n\n if chunk[2:3] in dictionary:\n rght_chunk = dictionary[chunk[2:3]]\n break\n\n rght_chunk = chunk[2:3]\n break\n\n\n\n if chunk[1:2] == '\\x83':\n lft_chunk = chunk[1:2]\n\n if chunk[2:3] in dictionary:\n rght_chunk = dictionary[chunk[2:3]]\n break\n\n rght_chunk = chunk[2:3]\n #print ('starts with 83')\n break\n\n\n\n if chunk[1:3] == '\\x40\\x72':\n lft_chunk = '\\x0A'\n rght_chunk = '\\x0A'\n #print ('starts with 83')\n break\n\n\n if chunk[1:2] in dictionary:\n lft_chunk = dictionary[chunk[1:2]]\n\n if chunk[2:3] in dictionary:\n rght_chunk = dictionary[chunk[2:3]]\n break\n\n\n\n\n if chunk[2:3] == '\\x81':\n rght_chunk = chunk[2:3]\n #print ('ends with 81')\n break\n\n\n if chunk[2:3] == '\\x82':\n rght_chunk = chunk[2:3]\n #print ('ends with 82')\n break\n\n if chunk[2:3] == '\\x83':\n rght_chunk = chunk[2:3]\n #print ('ends with 83')\n break\n\n else:\n rght_chunk = chunk[2:3]\n break\n\n\n\n else:\n lft_chunk = chunk[1:2]\n rght_chunk = chunk[2:3]\n break\n\n\n\n\n value = lft_chunk + rght_chunk\n\n dictionary[key] = value\n ll = ll + 3\n\n\n\n\n\n## Progresses through the text file and tests each byte to see if it has an entry\n## as a key in the dictionary, and if it does it rewrites the value back to the\n## file. Thus uncompressing the text.\ndef uncompress_file():\n\n global dictionary\n global file_block\n global key\n y = 0\n\n\n while y in range(len(file_block) + 1,):\n chunk = file_block[y:y+1]\n\n\n\n if chunk not in dictionary:\n\n\n\n if chunk <= '\\x39':\n lblock = file_block[:y]\n rblock = file_block[y+1:]\n file_block = lblock + rblock\n y = y + 1\n continue\n\n\n if chunk == '\\x40':\n if file_block[y+1:y+2] == '\\x72':\n\n lblock = file_block[:y]\n\n mblock = sp_dictionary['newline']\n\n rblock = file_block[y+2:]\n\n file_block = lblock + mblock + rblock\n\n y = y + 2\n continue\n\n\n\n\n try:\n\n\n lblock = file_block[:y]\n\n mblock = dictionary[chunk[0:1]]\n\n rblock = file_block[y+1:]\n\n\n file_block = lblock + mblock + rblock\n\n\n\n nl = len(dictionary[chunk[0:1]])\n y = y + nl\n continue\n\n except:\n y = y + 1\n continue\n\n## Function that removes 'Junk' bytes\n\ndef cleanup():\n\n global y\n global file_block\n\n\n chunk = bytearray(file_block[:])\n s = 1\n m = 0\n\n while s == 1:\n try:\n junk = chunk.index('\\x00')\n if m < 1:\n del chunk[junk:junk+1]\n\n chunk.insert(junk,'\\x0A')\n chunk.insert(junk + 1,'\\x0A')\n\n m = 1\n continue\n\n del chunk[junk:junk+1]\n continue\n\n except:\n try:\n junk = chunk.index('\\x01')\n del chunk[junk:junk+1]\n continue\n\n except:\n try:\n junk = chunk.index('\\x02')\n del chunk[junk:junk+1]\n continue\n\n except:\n try:\n junk = chunk.index('\\x03')\n del chunk[junk:junk+1]\n continue\n\n except:\n try:\n junk = chunk.index('\\x04')\n del chunk[junk:junk+1]\n continue\n\n except:\n file_block = str(chunk)\n s = 2\n continue\n\n\n\n\n\n\n\nfor root, dirs, files in os.walk(\".\"):\n for file in files:\n if file.endswith('jp.ztr'):\n file_path = os.path.join(root, file)\n file = open(os.path.join(root, file), 'rb')\n file_cont = bytearray(file.read())\n\n\n\n\n check_start()\n make_blocks()\n create_dictionary()\n rewrite_dictionary()\n uncompress_file()\n write_close_file()\n continue\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"test8.5.py","file_name":"test8.5.py","file_ext":"py","file_size_in_byte":9822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"255385034","text":"# __author__: Stanley\n# date: 2018/3/8\n\n# if True:\n# x=3\n# print(x)\n\n# x = int(2.3) # built-in\n# g_count = 0 # global 全局变量\n#\n#\ndef outer():\n o_count = 1 # enclosing 嵌套局部变量。\n def inner():\n nonlocal o_count # 嵌套变量修改,关键字 nonlocal\n o_count = 5\n i_conut = 2 # local 局部变量\n print(i_conut)\n print(o_count)\n\n inner()\n\n\nouter()\n\n\ncount = 10\n\ndef outer():\n global count # 局部变量无法修改全局变量,如果要强行修改,就需要写关键字global\n count=count+1\n print(count)\n\nouter()\n\n\n\n","sub_path":"day14/函数作用域.py","file_name":"函数作用域.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"544794325","text":"import pickle\n\ndef data_save(data): # Save to File\n outfile = open('wire.wjc', 'wb')\n pickle.dump(data, outfile)\n outfile.close()\n\ndef data_load(): # Load From File\n infile = open('wire.wjc', 'rb')\n newfile = pickle.load(infile)\n infile.close()\n return newfile\n\ndef create_data_file(): # Create File\n load = {}\n outfile = open('wire.wjc', 'wb')\n pickle.dump(load, outfile)\n outfile.close()\n\ndef del_key(dict): # Delete key + Data of key\n key = input('delete what?: ')\n try:\n del dict[key]\n data_save(dict)\n except:\n print('No Key')\n finally:\n main()\n\ndef exit_code(): # Exit Loop/Program\n print('quit')\n quit()\n\ndef help(): # Help\n print('\\n Commands ')\n print('***********************************************')\n print(' edit: -> Edit the weight of the code')\n print(' [del]ete: -> Delete a code')\n print('[q]uit/exit: -> Back or Quit the program')\n print(' help/?: -> Provides Help information.')\n print('***********************************************\\n')\n\n\ndef new_code(code): # Add New Code\n\n def Wire_Weight():\n try:\n global wire_weight\n wire_weight = float(input('Weight(KG): '))\n except:\n print('Not Valid! Try Again\\n')\n Wire_Weight()\n finally:\n return wire_weight\n\n def Wire_Description():\n global wire_description\n wire_description = input('Description (' + str(code) + '): ')\n return wire_description\n\n\n wire_weight = Wire_Weight()\n wire_description = Wire_Description()\n\n return {code:[wire_weight,wire_description]}\n\n\ndef edit_code(code,weight,description): # Edit Code Weight\n def Wire_Weight():\n try:\n global edit_weight\n print('Old Weight: ' + str(weight) + 'kg')\n edit_weight = float(input('New Weight(KG)?: '))\n except:\n print('Not Valid! Try Again\\n')\n Wire_Weight()\n finally:\n return edit_weight\n\n edit_weight = Wire_Weight()\n\n return {code: [edit_weight, description]}\n\n\ndef main(): # Main Loop\n\n try:\n print('DataBase: ' + str(data_load().keys()))\n except:\n create_data_file()\n\n load = data_load()\n user = input('>>>')\n\n if user in load:\n print('\\ncode: ' + str(user))\n print('weight: ' + str(load[user][0]) + 'kg')\n print('description: ' + str(load[user][1]) + '\\n')\n\n elif user == 'edit':\n edit_q = input('Edit New Weight Of Code?: ')\n if edit_q not in load:\n pass\n else:\n load = load.copy()\n load.update(edit_code(edit_q,load[edit_q][0],load[edit_q][1]))\n data_save(load)\n\n elif user == 'q' or user == 'quit' or user == 'exit':\n exit_code()\n\n elif user == 'del' or user == 'Del' or user == 'delete':\n del_key(load)\n\n elif user == 'help' or user == '?':\n help()\n\n elif user != load:\n New_q = input('Add New Code?: [y]/[n]: ')\n if New_q == 'y':\n load = load.copy()\n load.update(new_code(user))\n data_save(load)\n\n main()\n\nif __name__ == '__main__':\n main()","sub_path":"PC/WJC/Wire/wjc_wire_test.py","file_name":"wjc_wire_test.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"172380056","text":"#!/usr/bin/python3\nimport sys\nimport os\nimport io\nfrom orderedset import OrderedSet\nfrom shell import Shell\nimport time\n\ndef static_vars(**kwargs):\n def decorate(func):\n for k in kwargs:\n setattr(func, k, kwargs[k])\n return func\n return decorate\n\ndef scan_dir(path, filter_func):\n result_list = []\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n if filter_func(name):\n filename = os.path.join(root, name)\n result_list.append(filename)\n return result_list\n\ndef bz2file(name):\n return name.lower().endswith('.tar.bz2')\n\n@static_vars(filters=[])\ndef bz2fileAndPlatform(name):\n for filter_name in bz2fileAndPlatform.filters:\n if name.find(filter_name) == -1:\n return False\n return bz2file(name)\n\ndef get_next_version_path(version_list, needDate=False):\n max_version = 0\n max_idx = -1\n prefix = ''\n for idx, path in enumerate(version_list):\n version = path.replace('.tar.bz2','').split('.')[-1]\n if not version.isdigit():\n prefix = ''\n number = None\n for i, s in enumerate(version):\n if s.isdigit():\n number = int(version[i:])\n break\n prefix += s\n version = number\n else:\n version = int(version)\n if max_version < version:\n max_version = version\n max_idx = idx\n base_path = '.'.join(version_list[max_idx].replace('.tar.bz2','').split('.')[:-1])\n\n if needDate:\n today = time.strftime('%Y%m%d', time.localtime())\n real_base_path = ''\n for context in base_path.split('-'):\n if context.isdigit():\n real_base_path += (today + '-')\n else:\n real_base_path += (context + '-')\n base_path = real_base_path[:-1]\n print('How to generate next version number(currentVersion : %d)?' % max_version)\n print('1. Auto(just +1)')\n print('2. Manual(Specitif Number)')\n no = int(input('> '))\n if no == 1:\n return base_path + ('.%s%d' % (prefix, (max_version+1))) + '.tar.bz2'\n else:\n next_version = int(input('Input version: '))\n return base_path + ('.%s%d' % (prefix, (next_version))) + '.tar.bz2'\n\n\n\nif __name__ == '__main__':\n current_dir_result_list = scan_dir(os.path.realpath('.'), bz2file)\n os.environ['MY_BINARY_PATH'] = os.path.realpath('.')\n# /home/hyeonsub.jung/work/updater/updater-h15-2.5.0-drd4id.signage.5.tar.bz2\n for path in current_dir_result_list:\n only_file_name = os.path.basename(path)\n\n module = only_file_name.split('-')[0]\n chip = only_file_name.split('-')[1]\n region = ''\n if chip == 'atsc' or chip == 'dvb':\n region = chip\n chip = only_file_name.split('-')[2]\n if module == 'tvservice':\n print('You must select platform')\n print('1. Signage')\n print('2. Hotel')\n no = int(input('> '))\n bz2fileAndPlatform.filters.append('hotel' if no == 2 else 'signage')\n if no == 2 and not chip.endswith('hotel'):\n chip += 'hotel'\n else:\n bz2fileAndPlatform.filters.append('hotel' if only_file_name.find('hotel') != -1 else 'signage')\n binary_server_path = os.path.expanduser('~') + '/work/binary_server/id_only_binary/starfish-drd4id/%s/official/%s' % (chip, module)\n if region:\n binary_server_path += '-' + region\n file_filter = None\n if len(sys.argv) > 1:\n bz2fileAndPlatform.filters.append(sys.argv[1])\n print(binary_server_path)\n binary_result_list = scan_dir(binary_server_path, bz2fileAndPlatform)\n next_version_path = get_next_version_path(binary_result_list, module == 'tvservice')\n print('Next version : %s' % next_version_path)\n answer = input('If you want to copy? (y/n) ')\n if answer.lower() == 'y':\n Shell.execute('mv %s %s' % (path, os.path.dirname(path) + '/' + os.path.basename(next_version_path)))\n print('Copy [%s] to [%s]..' % (path, next_version_path))\n Shell.execute('sudo cp %s %s' % (path, next_version_path))\n os.system('bash')\n\n","sub_path":"origin/copy_binaryserver.py","file_name":"copy_binaryserver.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381353528","text":"#coding:utf-8\n\nfrom tornado.web import RequestHandler\nimport json\nfrom util.convert import is_mobile\nfrom express import express\nimport logging\n\nLOG = logging.getLogger(__name__)\n\nclass InputHandler(RequestHandler):\n def post(self):\n merchant_code = self.get_argument('merchant_code', '')\n tracking_number = self.get_argument('tracking_number', '')\n phone_number = self.get_argument('phone_number', '')\n dhl_code = self.get_argument('dhl_code', '')\n remark = self.get_argument('remark', '')\n try:\n # 判断商家编号merchant_code\n\n # 验证运单号tracking_number\n\n # 验证手机号phone_number\n if not is_mobile(phone_number):\n self.finish(json.dumps({'state': 3, 'message': 'phone error'}))\n return\n express_op = express.Express()\n _ = express_op.intput(merchant_code, tracking_number, phone_number, dhl_code, remark)\n self.finish(json.dumps({'state': 0, 'message': 'input success'}))\n except Exception as ex:\n LOG.error(\"Input express error:%s\"%ex)\n self.finish(json.dumps({'state': 4, 'message': 'input error'}))\n\nclass OutputHandler(RequestHandler):\n def post(self):\n code = self.get_argument('code', '')\n pick_code = self.get_argument('pick_code', '')\n phone_number = self.get_argument('phone_number', '')\n tracking_number = self.get_argument('tracking_number', '')\n try:\n express_op = express.Express()\n _ = express_op.output(code, pick_code, tracking_number, phone_number)\n self.finish(json.dumps({'state': 0, 'message': 'output success'}))\n except Exception as ex:\n LOG.error(\"Input express error:%s\" % ex)\n self.finish(json.dumps({'state': 2, 'message': 'output error'}))\n\nclass InfosHandler(RequestHandler):\n def get(self):\n tracking_number = self.get_argument('tracking_number', '')\n phone_number = self.get_argument('phone_number', '')\n merchant_code = self.get_argument('merchant_code', '')\n pick_code = self.get_argument('pick_code', '')\n limit = int(self.get_argument('limit', 0))\n page = int(self.get_argument('page', 0))\n\n try:\n offset = 0 if page < 1 else (page-1)*limit\n express_op = express.Express()\n _ = express_op.infos(offset=offset, limit=limit, tracking_number=tracking_number,\n phone_number=phone_number, merchant_code=merchant_code,\n pick_code=pick_code)\n self.finish(json.dumps(_.update({'state': 0, 'message': 'output success'})))\n except Exception as ex:\n LOG.error(\"Input express error:%s\" % ex)\n self.finish(json.dumps({'state': 1, 'message': 'get infos error'}))\n\n\n\n\n","sub_path":"api/express_api.py","file_name":"express_api.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"487784926","text":"import numpy as np\n\nfrom .base import Filter\n\nclass Gabor2D(Filter):\n def __init__(self, size, lmbda, theta, sigma, gamma):\n Filter.__init__(self, size)\n\n self._lambda = lmbda\n self._theta = theta\n self._sigma = sigma\n self._gamma = gamma\n\n def _index_value_func(self):\n radius = self._size // 2\n def index_value(i, j):\n i -= radius\n j -= radius\n\n x = i * np.cos(self._theta) + j * np.sin(self._theta)\n y = -j * np.sin(self._theta) + j * np.cos(self._theta)\n\n part_1 = np.exp(-(x ** 2 + self._gamma ** 2 * y ** 2) / (2 * self._sigma ** 2))\n part_2 = np.cos(2 * np.pi * x / self._lambda)\n return part_1 * part_2\n return index_value","sub_path":"neurokit/filters/gabor.py","file_name":"gabor.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"458357403","text":"'''\n\nConsider the fraction, n/d, where n and d are positive integers. If n n:\n\t\t\treturn True\n\treturn True\n\ndef getPrimeFactors(number,primeArray,primeFactors):\n\tprimeFactors[number] = []\n\tfor prime in primeArray:\n\t\tif number % prime == 0:\n\t\t\tprimeFactors[number].append(prime)\n\t\t\tif prime*prime != number and isPrime(number/prime,primeArray):\n\t\t\t\tprimeFactors[number].append(number/prime)\n\t\t\t\tbreak\n\t\t\tfor i in primeFactors[number/prime]:\n\t\t\t\tif i not in primeFactors[number]:\n\t\t\t\t\tprimeFactors[number].append(i)\n\t\t\tbreak\n\treturn primeFactors\n\ndef n_irreducible(number,primeFactors):\n\tif primeFactors[number] == []:\n\t\treturn number-1\n\tcount = number\n\tfor prime in primeFactors[number]:\n\t\tcount *= (1-1/prime)\n\treturn int(count)\n\n\ndef main():\n\tN=10**6\n\tprimeArray = getPrimes(N)\n\tprimeFactors = {}\n\tcount = 0\n\tfor i in range(1,N+1):\n\t\tgetPrimeFactors(i,primeArray,primeFactors)\n\t\tcount += n_irreducible(i,primeFactors)\n\tprint(count)\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n","sub_path":"Problem72.py","file_name":"Problem72.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"371513623","text":"#-*-coding:utf-8 -*-\n'''\n检查获取到的住院日期是否完整\n'''\n\n#-*-coding:utf-8 -*-\nfrom operator import add\n\n'''\n统计每月的住院总费用,统筹费用支出,门诊费用,门诊统筹费用支出\n'''\nimport datetime\n\nimport re\nfrom pyspark import SparkContext\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\ndef hospitalProcessing(line):\n line = line.encode('utf-8').split(',')\n for i in range(6, 20):\n if (line[i] == \"\"):\n line[i] = '0'\n if (line[21] != \"\"): ###可能存在未记录出院时间和住院时间的住院记录\n inHospital = line[21]\n s = inHospital.strip(\"\").split('-')\n s[1] = re.sub(\"\\D\", \"\", s[1]) ##提取其中数字部分\n if len(s[0]) < 2:\n s[0] = '0' + s[0]\n if len(s[1]) < 2:\n s[1] = '0' + s[1]\n day='20'+s[2]+s[1]+s[0]\n return (day, (float(line[6]), float(line[17]), 1))\n else:\n return (str(999999),1)\n\nsc=SparkContext()\n# hospital=sc.textFile('/mif/data_new/worker_hospital.txt')\n# ####((日期,(总费用,统筹费用支出,住院次数次数))\n# hospital=hospital.map(hospitalProcessing)\\\n# .filter(lambda (key,value):(isinstance(value,int)==False and key>'2006')) \\\n# .reduceByKey(lambda a, b: (a[0] + b[0], a[1] + b[1], a[2] + b[2])) \\\n# .sortByKey()\n\nmen_zhen=sc.textFile('/mif/data_new/worker_menzhen.txt')\n####((日期,(总费用,统筹费用支出,门诊人次))\nmen_zhen=men_zhen.map(lambda line:line.encode('utf-8').split(','))\\\n .filter(lambda line:line[5]!=\"\" and line[12]!=\"\")\\\n .map(lambda line:(line[15],(float(line[5]),float(line[12]),1)))\\\n .reduceByKey(lambda a,b:(a[0]+b[0],a[1]+b[1],a[2]+b[2]))\\\n .sortByKey()\n\n\n###(日期)\n#######通过检查可知,住院数据比较完整,门诊数据不完整\nout=open('output/m_dateCheck.csv','w+')\nfor (key,value)in men_zhen.collect():\n out.write(\"%s\\n\"%key)\nout.close()\n\n","sub_path":"timeseries/datecheck.py","file_name":"datecheck.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"604241513","text":"\"\"\"\n===============\n06. Evoked data\n===============\n\nThe evoked data sets are created by averaging different conditions.\n\"\"\"\n\nimport os.path as op\n\nimport mne\nfrom mne.parallel import parallel_func\n\nimport config\n\n\ndef run_evoked(subject):\n print(\"Processing subject: %s\" % subject)\n meg_subject_dir = op.join(config.meg_dir, subject)\n if config.use_ica or config.use_ssp:\n extension = '_cleaned-epo'\n else:\n extension = '-epo'\n fname_in = op.join(meg_subject_dir,\n config.base_fname.format(**locals()))\n extension = '-ave'\n fname_out = op.join(meg_subject_dir,\n config.base_fname.format(**locals()))\n\n print(\"Input: \", fname_in)\n print(\"Output: \", fname_out)\n\n print(' Creating evoked datasets')\n epochs = mne.read_epochs(fname_in, preload=True)\n\n evokeds = []\n for condition in config.conditions:\n evokeds.append(epochs[condition].average())\n mne.evoked.write_evokeds(fname_out, evokeds)\n\n if config.plot:\n ts_args = dict(gfp=True, time_unit='s')\n topomap_args = dict(time_unit='s')\n\n for condition, evoked in zip(config.conditions, evokeds):\n evoked.plot_joint(title=condition, ts_args=ts_args,\n topomap_args=topomap_args)\n\n\nparallel, run_func, _ = parallel_func(run_evoked, n_jobs=config.N_JOBS)\nparallel(run_func(subject) for subject in config.subjects_list)\n","sub_path":"07-make_evoked.py","file_name":"07-make_evoked.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"633988428","text":"number1 = 4\nnumber2 = 5\nsone1 = \"10\"\nsone2 = \"3\"\nsumma = number1+number2\nprint(\"Summa on: \",summa)\nprint(int(sone1)+int(sone2))\nprint(str(number1)+str(number2))\n\n\n\n\nint1 = int(input(\"Sisesta oma pikkus: \"))\nint2 = int(input(\"Sisesta oma vanus: \"))\n\nidentifikaator = int1*int2\n\n\nprint(\"Teie identifikaator on on: \",int1*int2)\nprint(\"Teie identifikaator on on: \",identifikaator)","sub_path":"FirstOne.py","file_name":"FirstOne.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"498126532","text":"from normality import stringify\nimport phonenumbers\nfrom phonenumbers.phonenumberutil import NumberParseException\n\nPHONE_FORMAT = phonenumbers.PhoneNumberFormat.INTERNATIONAL\n\n\ndef parse_phone(number, country=None):\n \"\"\"Parse a phone number and return in international format.\n\n If no valid phone number can be detected, None is returned. If\n a country code is supplied, this will be used to infer the\n prefix.\n\n https://github.com/daviddrysdale/python-phonenumbers\n \"\"\"\n number = stringify(number)\n if number is None:\n return\n if country is not None:\n country = country.upper()\n try:\n num = phonenumbers.parse(number, country)\n if phonenumbers.is_possible_number(num):\n if phonenumbers.is_valid_number(num):\n num = phonenumbers.format_number(num, PHONE_FORMAT)\n return num.replace(' ', '')\n return\n except NumberParseException:\n return\n","sub_path":"dalet/phones.py","file_name":"phones.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309703183","text":"# Reference: https://docs.python.org/2/library/unittest.html\nimport unittest\nfrom mp_socket import mp_socket\n\nclass mp_socket_test(unittest.TestCase):\n def test_setupServer(self):\n print(\"Test Setup Server\")\n self.assertEqual(mp_socket().connection(\"test_setupServer\"), True)\n\n def test_ConnectToServer(self):\n print(\"-------------------------\")\n print(\"Test Connect to Server\")\n self.assertEqual(mp_socket().connection(\"test_ConnectToServer\"), True)\n\n # def test_isupper(self):\n # self.assertTrue(\"FOO\".isupper())\n # self.assertFalse(\"Foo\".isupper())\n\n # def test_split(self):\n # s = \"hello world\"\n # self.assertEqual(s.split(), [\"hello\", \"world\"])\n \n # # Check that s.split fails when the separator is not a string.\n # with self.assertRaises(TypeError):\n # s.split(2)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"mp/socket_test.py","file_name":"socket_test.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"295896678","text":"import pymongo\n\nconnection = pymongo.MongoClient(\"mongodb://localhost\")\ndb = connection.school\nstudents = db.students\n\nquery = {'scores.type' : 'homework'}\n\ncursor = students.find(query)\n\nfor a_student in cursor:\n\tvalMin = 99999\n\t\t\n\tfor score in a_student['scores']:\n\t\tif score['type'] == 'homework' and score['score'] < valMin:\n\t\t\tvalMin = score['score']\n\tstudents.update( {'_id' : a_student['_id']}, {'$pull' : {'scores' : {'score' : valMin } } } )\n","sub_path":"c3/h3_1.py","file_name":"h3_1.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"340233118","text":"#/usr/bin/env python3\ndef multi_table(num=9):\n end=num\n for i in range(1,end+1):\n for j in range(1,i+1):\n if i*j<10:\n print('%d×%d=%d' % (j,i,i*j),end=' ')\n # print(j, '×', i, '=', i * j, sep='', end=' ')\n else:\n print('%d×%d=%d' % (j,i,i*j),end=' ')\n # print(j,'×',i,'=',i*j,sep='',end=' ')\n print('')\nmulti_table(4)","sub_path":"python100/python_100_35.py","file_name":"python_100_35.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"618726730","text":"import numpy as np\nfrom scipy.signal import savgol_filter\n\nmagic = -0.6\n\n# following codes get the elbow and wrist information from the kinect sensor\nclass Pointing:\n def __init__(self, pointing_mode='screen'):\n if pointing_mode == 'screen':\n self.screen_mode = True\n elif pointing_mode == 'desk':\n self.screen_mode = False\n else:\n raise ValueError('Pointing mode is not recognized!\\n Accepted: screen, desk\\n Received: %s' % pointing_mode)\n\n if not self.screen_mode:\n # use this if in desk mode\n self.WRISTLEFT = 6 # JointType specified by kinect\n self.WRISTRIGHT = 10\n self.ELBOWLEFT = 5\n self.ELBOWRIGHT = 9\n self.joint_interest_coded = [self.WRISTLEFT, self.WRISTRIGHT, self.ELBOWLEFT, self.ELBOWRIGHT]\n else:\n # use this if in screen mode\n self.HANDTIPLEFT = 21 # JointType specified by kinect\n self.HANDTIPRIGHT = 23\n self.SHOULDERLEFT = 4\n self.SHOULDERRIGHT = 8\n self.joint_interest_coded = [self.HANDTIPLEFT, self.HANDTIPRIGHT, self.SHOULDERLEFT, self.SHOULDERRIGHT]\n\n self.joint_info = {i: None for i in self.joint_interest_coded} # contains left/right wrists/elbows coordinates\n self.joint_info_buffer = {i: [] for i in self.joint_interest_coded}\n\n self.lpoint_buffer = []\n self.rpoint_buffer = []\n self.lpoint_tmp = (0.0, magic)\n self.rpoint_tmp = (0.0, magic)\n self.lpoint = (0.0, magic) # inferred pointing coordinate on the table from left arm\n self.rpoint = (0.0, magic) # inferred pointing coordinate on the table from right arm\n\n self.lpoint_var = (0, 0) # variance of left point, sent to Brandeis\n self.rpoint_var = (0, 0) # variance of right point, sent to Brandeis\n self.lpoint_stable = False # whether left hand pointing is stable\n self.rpoint_stable = False # whether right hand pointing is stable\n\n def get_pointing_main(self, src, is_smoothing_joint=True, is_smoothing_point=True):\n\n if not self._get_wrist_elbow(src):\n return\n\n if self.screen_mode:\n try:\n if is_smoothing_joint:\n self._smoothing_joint(5, 2)\n self._smoothing_joint_mean(5)\n self._get_pointing(True) # True is coordinates on screen\n if is_smoothing_point:\n pass\n self._smoothing_point_mean(5)\n self._smoothing_point(5, 2)\n self.lpoint = (self.lpoint_tmp[0] - 0.25, self.lpoint_tmp[1])\n self.rpoint = (self.rpoint_tmp[0] + 0.25, self.rpoint_tmp[1])\n except Exception as e:\n print(e)\n else:\n try:\n self._smoothing_joint_desk(3, 2)\n self._get_pointing(False)\n self._smoothing_point(3, 2)\n self.lpoint, self.rpoint = self.lpoint_tmp, self.rpoint_tmp\n except Exception as e:\n print(e)\n\n self.lpoint_var = np.std(self.lpoint_buffer, axis=0)\n self.rpoint_var = np.std(self.rpoint_buffer, axis=0)\n if np.any((np.amax(self.lpoint_buffer, axis=0) - np.amin(self.lpoint_buffer, axis=0)) > [0.005, 0.005]):\n self.lpoint_stable = False\n else:\n self.lpoint_stable = True\n if np.any((np.amax(self.rpoint_buffer, axis=0) - np.amin(self.rpoint_buffer, axis=0)) > [0.005, 0.005]):\n self.rpoint_stable = False\n else:\n self.rpoint_stable = True\n\n def _get_wrist_elbow(self, src):\n '''\n This function retrieves the coordinates for left/right wrists/elbows (4 sets of 3 values: x, y, z)\n @:param src: decoded frame retrieved from the decode_frame() function\n '''\n try:\n for i in range(25):\n if src[(i + 1) * 9] in self.joint_interest_coded:\n self.joint_info[src[(i + 1) * 9]] = src[(i + 1) * 9 + 2: (i + 2) * 9 + 5]\n return True\n except IndexError:\n print('Not enough coordinates to unpack')\n return False\n\n def _smoothing_joint(self, window_length=5, polyorder=2):\n for k, v in list(self.joint_info_buffer.items()):\n if len(v) >= window_length:\n self.joint_info_buffer[k].pop(0)\n self.joint_info_buffer[k].append(self.joint_info[k])\n joint_smoothed = savgol_filter(self.joint_info_buffer[k], window_length, polyorder, axis=0).tolist()\n self.joint_info[k] = joint_smoothed[window_length // 2]\n else:\n self.joint_info_buffer[k].append(self.joint_info[k])\n\n def _smoothing_joint_mean(self, window_length=5):\n for k, v in list(self.joint_info_buffer.items()):\n if len(v) >= window_length:\n self.joint_info_buffer[k].pop(0)\n self.joint_info_buffer[k].append(self.joint_info[k])\n self.joint_info[k] = np.mean(self.joint_info_buffer[k], axis=0)\n else:\n self.joint_info_buffer[k].append(self.joint_info[k])\n\n def _smoothing_point(self, window_length=5, polyorder=2):\n '''\n Smoothing function for left and right pointing coordinates\n :param window_length:\n :param polyorder:\n :return:\n '''\n if len(self.lpoint_buffer) >= window_length:\n self.lpoint_buffer.pop(0)\n self.lpoint_buffer.append(self.lpoint_tmp)\n self.lpoint_buffer = savgol_filter(self.lpoint_buffer, window_length, polyorder, axis=0).tolist()\n self.lpoint_tmp = self.lpoint_buffer[int(window_length / 2)]\n else:\n self.lpoint_buffer.append(self.lpoint_tmp)\n\n if len(self.rpoint_buffer) >= window_length:\n self.rpoint_buffer.pop(0)\n self.rpoint_buffer.append(self.rpoint_tmp)\n self.rpoint_buffer = savgol_filter(self.rpoint_buffer, window_length, polyorder, axis=0).tolist()\n self.rpoint_tmp = self.rpoint_buffer[int(window_length / 2)]\n else:\n self.rpoint_buffer.append(self.rpoint_tmp)\n\n def _smoothing_point_mean(self, window_length=5):\n if len(self.lpoint_buffer) >= window_length:\n self.lpoint_buffer.pop(0)\n self.lpoint_buffer.append(self.lpoint_tmp)\n self.lpoint_tmp = np.mean(self.lpoint_buffer, axis=0)\n else:\n self.lpoint_buffer.append(self.lpoint_tmp)\n\n if len(self.rpoint_buffer) >= window_length:\n self.rpoint_buffer.pop(0)\n self.rpoint_buffer.append(self.rpoint_tmp)\n self.rpoint_tmp = np.mean(self.rpoint_buffer, axis=0)\n else:\n self.rpoint_buffer.append(self.rpoint_tmp)\n\n def _get_pointing(self, screen=True):\n if not screen:\n l_coord1 = self.joint_info[self.WRISTLEFT]\n r_coord1 = self.joint_info[self.WRISTRIGHT]\n l_coord2 = self.joint_info[self.ELBOWLEFT]\n r_coord2 = self.joint_info[self.ELBOWRIGHT]\n else:\n l_coord1 = self.joint_info[self.HANDTIPLEFT]\n r_coord1 = self.joint_info[self.HANDTIPRIGHT]\n l_coord2 = self.joint_info[self.SHOULDERLEFT]\n r_coord2 = self.joint_info[self.SHOULDERRIGHT]\n\n self.lpoint_tmp = self._calc_coordinates(l_coord1, l_coord2, screen)\n self.rpoint_tmp = self._calc_coordinates(r_coord1, r_coord2, screen)\n\n def _calc_coordinates(self, wrist, elbow, screen=True):\n if screen:\n '''\n Both wrist and elbow should contain (x,y,z) coordinates\n screen plane: z=0, ie pz = 0\n Line equation:\n (ex - px)/(ex - wx) = (ez - pz)/(ez - wz) = ez/(ez - wz)\n (ey - py)/(ey - wy) = (ez - pz)/(ez - wz) = ez/(ez - wz)\n so:\n px = ex - ez(ex-wx) / (ez-wz)\n py = ey - ez(ey-wy) / (ez-wz)\n '''\n if (elbow[2] - wrist[2]) == 0:\n return -np.inf, -np.inf\n screen_x = elbow[0] - elbow[2] * (elbow[0] - wrist[0]) / (elbow[2] - wrist[2])\n screen_y = elbow[1] - elbow[2] * (elbow[1] - wrist[1]) / (elbow[2] - wrist[2])\n\n #print('******SCREEN X Y', '{:<10.2}'.format(screen_x), '{:<10.2}'.format(screen_y))\n return screen_x, screen_y\n else:\n\n '''\n Both wrist and elbow should contain (x,y,z) coordinates\n Table plane: y = -0.582\n Line equation: \n y = (y2-y1)/(x2-x1) * (x-x1) + y1\n z = (z2-z1)/(y2-y1) * (y-y1) + z1\n so:\n x = x1 - (y1-y) / (y2-y1) * (x2-x1)\n z = z1 - (y1-y) / (y2-y1) * (z2-z1)\n '''\n if (elbow[1] - wrist[1]) == 0:\n return -np.inf, -np.inf\n table_y = -0.582\n table_x = wrist[0] - (wrist[1] - table_y) / (elbow[1] - wrist[1]) * (elbow[0] - wrist[0])\n table_z = wrist[2] - (wrist[1] - table_y) / (elbow[1] - wrist[1]) * (elbow[2] - wrist[2])\n\n return table_x, table_z\n\n def _smoothing_joint_desk(self, window_length=3, polyorder=2):\n for k, v in list(self.joint_info_buffer.items()):\n if len(v) >= window_length:\n self.joint_info_buffer[k].pop(0)\n self.joint_info_buffer[k].append(self.joint_info[k])\n self.joint_info_buffer[k] = \\\n savgol_filter(self.joint_info_buffer[k], window_length, polyorder, axis=0).tolist()\n self.joint_info[k] = np.mean(self.joint_info_buffer[k], axis=0)\n else:\n self.joint_info_buffer[k].append(self.joint_info[k])","sub_path":"RealTime/components/skeletonRecognition/receiveAndShow.py","file_name":"receiveAndShow.py","file_ext":"py","file_size_in_byte":9783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"37389186","text":"'''\nAccept N numbers from user and display all such elements which are even and divisible by 5.\nInput : N : 6\nElements : 85 66 3 80 93 88\nOutput : 80 \n'''\n\ndef AcceptElements(arr,size):\n print(\"Enter elements: \");\n for i in range(0,size):\n arr.append(int(input()));\n \ndef DisplayElements(arr):\n for i in range(0,len(arr)):\n print(arr[i]);\n \ndef CheckDivisibility(arr,brr):\n for i in arr:\n if((i%5 == 0)and (i%2 == 0)):\n brr.append(i);\n\ndef main():\n size = int(input(\"Enter number of elements:\"));\n if(size<=0):\n print(\"Invalid size\");\n return;\n src = [];\n dest = [];\n AcceptElements(src,size);\n print(\"Elements of the array are:\");\n DisplayElements(src);\n CheckDivisibility(src,dest);\n print(\"Even elements divisible by 5 are:\");\n DisplayElements(dest);\n \n\nif __name__ == \"__main__\":\n main();","sub_path":"4 Python_Programs/5 Problems on N numbers/3_FindNumbers_EvenAndDivBy5/Demo.py","file_name":"Demo.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"333015058","text":"# Copyright (C) 2022 Intel Corporation\n# SPDX-License-Identifier: MIT License\n\nimport torch\nimport dgl\nfrom ocpmodels.models import DimeNetPP\n\ntorch.random.manual_seed(10)\ndgl.random.seed(10)\n\n\n@torch.no_grad()\ndef test_dimenet_pp_single():\n # use the default settings\n model = DimeNetPP()\n # construct a random graph\n single_graph = dgl.rand_graph(10, 50)\n single_graph = dgl.remove_self_loop(single_graph)\n # generate positions and atomic numbers\n single_graph.ndata[\"pos\"] = torch.rand(10, 3)\n single_graph.ndata[\"atomic_numbers\"] = torch.randint(0, 100, (10,)).long()\n # test distance computation\n single_graph = model.edge_distance(single_graph)\n # now test the full computation unit testing lol\n output = model(single_graph)\n assert torch.isfinite(output).all()\n\n\n@torch.no_grad()\ndef test_dimenet_pp_batch():\n # use the default settings\n model = DimeNetPP()\n # construct a random graph\n single_graph = dgl.rand_graph(10, 50)\n single_graph = dgl.remove_self_loop(single_graph)\n # generate positions and atomic numbers\n single_graph.ndata[\"pos\"] = torch.rand(10, 3)\n single_graph.ndata[\"atomic_numbers\"] = torch.randint(0, 100, (10,)).long()\n graphs = dgl.batch(\n [\n single_graph,\n ]\n * 10\n )\n # test distance computation\n graphs = model.edge_distance(graphs)\n # now test the full computation unit testing lol\n output = model(graphs)\n assert output.shape == (graphs.batch_size, 1)\n assert torch.isfinite(output).all()\n","sub_path":"ocpmodels/models/dgl/dpp/tests/test_dpp_dgl.py","file_name":"test_dpp_dgl.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"227207324","text":"# -*- coding: utf-8 -*-\nimport math\nn=int(input('Digite a:'))\ns=0\nfor i in range(1,n+1,1):\n if (i%2==1):\n s=s+(i/(i**2))\n else:\n s=s-(i/(i**2))\nprint('%.5f' %s)\n","sub_path":"moodledata/vpl_data/79/usersdata/174/43582/submittedfiles/serie1.py","file_name":"serie1.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"60601997","text":"class Solution:\n def canCross(self, stones) -> bool:\n if len(stones) == 0:\n return True\n if stones[1] > 1:\n return False\n ll = len(stones)\n j_val = {}\n for st in stones:\n j_val[st] = set()\n j_val[stones[1]].add(1)\n # print(j_val)\n for i in range(1, ll):\n for jump in j_val[stones[i]]:\n if jump > 0 and jump + stones[i] in j_val:\n # if not j_val[jump + stones[i]].__contains__(jump):\n j_val[jump + stones[i]].add(jump)\n # print('jump from', stones[i], 'to', jump + stones[i], jump)\n if jump - 1 > 0 and jump - 1 + stones[i] in j_val:\n # if not j_val[jump - 1 + stones[i]].__contains__(jump - 1):\n j_val[jump - 1 + stones[i]].add(jump - 1)\n # print('jump -1 from', stones[i], 'to', jump + stones[i], jump - 1)\n if jump + 1 + stones[i] in j_val:\n # if not j_val[jump + 1 + stones[i]].__contains__(jump + 1):\n j_val[jump + 1 + stones[i]].add(jump + 1)\n # print('jump +1 from', stones[i], 'to', jump + stones[i], jump + 1)\n # print(j_val)\n if j_val.get(stones[-1]):\n return True\n return False","sub_path":"leetcode/hard/done/frog_jump.py","file_name":"frog_jump.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"627868446","text":"import os\nimport pygame\nimport sys\nimport random\nfrom pygame import *\n\n\npygame.init()\n\nscr_size = (width,height) = (1000,700)\n\nfps = 40;\n\nback =(0,0,0)\nwhite = (255,255,255)\ngreen = (0,255,0)\n\nscreen = pygame.display.set_mode(scr_size)\nclock = pygame.time.Clock()\npygame.display.set_caption(\"mergeSort\")\n\n### variables\nn=200\nstart =1\nend = 500\nbarWidth = 2\n\n### function definitions\n\ndef randArray(n ,start ,end):\n arr = []\n for i in range(n):\n arr.append(random.randint(start,end))\n return arr\n\ndef mergeSort(arr,low,high):\n if low < high:\n mid = int((low +high)/2)\n mergeSort(arr,low,mid)\n mergeSort(arr,mid+1,high)\n merge(arr,low,mid,high)\n\ndef merge(arr,low,mid,high):\n n1 = mid-low +1\n n2 = high - mid\n L = [0]*n1\n R = [0]*n2\n for i in range(n1):\n L[i] = arr[low+i]\n for j in range(n2):\n R[j] = arr[mid+1+j]\n\n i = 0\n j = 0\n k = low\n while i \", next_node, \" -> \", feature_dict[next_node])\n\tif len(feature_dict[ tuple(next_node)]) != 2:\n\t\treturn next_node, dist, street_name\n\telse:\n\t\treturn crawl_2_way_node(current_node, tuple(next_node), feature_dict)\n\n\ndef get_other_side_of_two_way_node( visited_node, current_node, feature_dict ):\n\t# print(\"CURRENT NODE\", current_node)\n\t# print(\"VISITED NODE\", visited_node)\n\t# print(\"TO CHOOSE FROM 0\", tuple(feature_dict[current_node][0]['end'] ) == visited_node )\n\t# print(\"TO CHOOSE FROM 1\", tuple(feature_dict[current_node][1]['end'] ) == visited_node )\n\t# print(\"TO CHOOSE FROM 0\", tuple(feature_dict[current_node][0]['end'] ) )\n\t# print(\"TO CHOOSE FROM 1\", tuple(feature_dict[current_node][1]['end'] ) )\n\tif visited_node == tuple(feature_dict[current_node][0]['end']):\n\t\t#print(\"######street \", feature_dict[current_node][1]['st']) \n\t\t#print(\"returning \", feature_dict[current_node][1]['end']) \n\t\treturn [tuple(feature_dict[current_node][1]['end']),\n\t\t\t\t\t feature_dict[current_node][1]['dist'],\n\t\t\t\t\t feature_dict[current_node][1]['st']]\n\tif visited_node == tuple(feature_dict[current_node][1]['end']):\n\t\t#print(\"######street \", feature_dict[current_node][0]['st']) \n\t\t#print(\"returning \", feature_dict[current_node][0]['end']) \n\t\treturn [tuple(feature_dict[current_node][0]['end']),\n\t\t\t\t\t feature_dict[current_node][0]['dist'],\n\t\t\t\t\t feature_dict[current_node][0]['st']]\n\telse:\n\t\tprint(\"NEITHER\")\n\treturn\n\ndef sanity_check(network_graph):\n\tbig_points = 0\n\ttotal = 0\n\ttotal_streets = 0\n\tblank_streets = 0\n\tfor i in network_graph:\t\n\t\ttotal += 1\n\t\ttry:\n\t\t\tif len( network_graph[i] ) > 2:\n\t\t\t\tbig_points += 1\n\t\texcept:\n\t\t\tprint(i)\n\t\t\tpass\n\t\tfor end in network_graph[i]:\n\t\t\ttotal_streets += 1\n\t\t\tif end['st'] == \"\":\n\t\t\t\tblank_streets += 1\n\tprint(\"total \" , total) \n\tprint(\"big_points \", big_points)\n\tprint( \"total_streets \", total_streets )\n\tprint( \"blank_streets \", blank_streets )\n\treturn\n\ndef glance(compress_graph):\n\t\n\tfor n in compress_graph:\n\t\tprint( compress_graph[n] )\n\ndef write_to_file(compress_graph, outfile):\n\toutput_graph = {}\n\tfor g in compress_graph:\n\t\toutput_graph[str(g)] = compress_graph[g]\n\twith open(outfile, 'w') as write_file:\n\t\tjson.dump(output_graph, write_file)\n\treturn\n\ndef main(infile, outfile):\n\tfeatures = get_data(infile)\n\ttotal = len(features)\n\tprint( total )\n\tcompress_graph = {}\n\tcreate_compress_graph(features, compress_graph)\n\trewrite_nodes(features, compress_graph)\n\tsanity_check( compress_graph )\n\tprint( len(compress_graph) )\n\t#write_to_file(compress_graph, outfile)\n\treturn\n\n#sample input/output files\ninfile = '../../osmdata/delco/delcoosm_network-streetnames.json'\noutfile = '../../osmdata/delco/compress_delco-streetnames.json'\n\nmain(infile, outfile)\n","sub_path":"compressnetwork.py","file_name":"compressnetwork.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208934869","text":"\"\"\"\nThis module implements the calls related to the Politician entity\n\"\"\"\nimport requests\n\nfrom vigipy.model import Politician\n\n\ndef get_by_id(oid):\n url = \"https://vigilante-rest.herokuapp.com/politician/{oid}\" \\\n .format(oid=oid)\n response = requests.post(url)\n print(response)\n return Politician.from_dict(response)\n\n\ndef save(politician):\n url = \"https://vigilante-rest.herokuapp.com/politician\"\n if politician.id is not None:\n url += \"/\" + politician.id\n\n response = requests.put(url, data=politician.to_json())\n return politician.from_dict(response)\n","sub_path":"vigipy/politician.py","file_name":"politician.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272892219","text":"# coding:utf-8\r\nfrom common.db_dict_test import db_dict_test\r\n# 那么如何来控制警告错误的输出呢?很简单\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\nfor i in range(1700,1800):\r\n sql='insert into db_beaushare.tb_notices_info values ('+str(i)+', 1,2,1,1000,10266,526,\"1000-01-01 00:00:00\",\"2018-06-11 14:42:31\",0,0,\"1000-01-01 00:00:00\")'\r\n db_auto = db_dict_test(sql)\r\n aaa=db_auto.get_db_item()\r\n # print(aaa)","sub_path":"common/db_auto_all.py","file_name":"db_auto_all.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"602621518","text":"from Bio import SeqIO\nimport sys\nimport os \nfrom math import floor\nimport argparse\nimport numpy as np\nimport string\n\ndef write_fragment(contig_id, fragment, contig_length, a_start, a_end):\n\n fragment_name = \"{}.l{}.s{}.e{}.fasta\".format(contig_id, contig_length, a_start, a_end)\n #naming scheme contig.contig_length.start.end\n \n with open(outdir+'/'+fragment_name,'w') as f:\n f.write('>{}\\n'.format(fragment_name.strip('.fasta')))\n f.write(str(fragment))\n\ndef slide_window(contig_id, contig_seq, original_contig_length):\n # need to check if this is actually writing the correct nt positions for the windows\n # check for circular and for linear\n # maybe do to functions\n # SOMETHING IS VERY WRONG HERE SOMEHOW\n # THE 0,5000 WINDOW DOES NOT MATCH ACTUAL\n # slide window every 500\n start = 0 # start at ORI\n end = 5000 \n slide = 500\n \n for actual_start,actual_end in zip(range(-4500,original_contig_length+1500,slide), range(500,original_contig_length+6000,slide)):\n start = actual_start+4500\n end = actual_end+4500\n if actual_end>original_contig_length:\n actual_end = -(actual_end%original_contig_length)\n if start<0:\n fragment = contig_seq[:start]+contig_seq[:end]\n \n else:\n fragment = contig_seq[start:end]\n print(len(fragment))\n\n write_fragment(contig_id, fragment, original_contig_length, actual_start, actual_end)\n \n\n\ndef window_seq_circular(contig_id, contig_seq):\n # how to check if assembly is circular?\n original_contig_length = len(contig_seq)\n \n first_4500 = str(contig_seq[:6000])\n last_4500 = str(contig_seq[-4500:])\n contig_seq = str(contig_seq)\n contig_seq = last_4500+contig_seq # repeat last 4500 bp at beginning\n contig_seq = contig_seq+first_4500 # repeat first 4500 bp at end\n slide_window(contig_id, contig_seq, original_contig_length)\n\n\ndef window_seq_linear(contig_id, contig_seq):\n original_contig_length = len(contig_seq)\n contig_seq = str(contig_seq)\n\n # create a random 4500 bp fragment of nt defined by distribution of ATCG in full sequence\n A_frac = sum([1 for x in contig_seq if 'A' in x.upper()])/len(contig_seq)\n T_frac = sum([1 for x in contig_seq if 'T' in x.upper()])/len(contig_seq)\n C_frac = sum([1 for x in contig_seq if 'C' in x.upper()])/len(contig_seq)\n G_frac = sum([1 for x in contig_seq if 'G' in x.upper()])/len(contig_seq)\n nt_probs = np.array([A_frac, T_frac, C_frac, G_frac])/(A_frac+T_frac+C_frac+G_frac)\n random_start = ''.join([np.random.choice(['A','T','C','G'], p=nt_probs) for i in range(4500)])\n random_end = ''.join([np.random.choice(['A','T','C','G'], p=nt_probs) for i in range(5000)])\n contig_seq = random_start+contig_seq \n contig_seq = contig_seq+random_end\n #print(len(contig_seq))\n\n slide_window(contig_id, contig_seq, original_contig_length)\n \ndef parse_records(fasta_file,linear):\n for record in SeqIO.parse(fasta_file,'fasta'):\n contig_id = record.id\n contig_seq = record.seq\n if linear==False:\n window_seq_circular(contig_id, contig_seq)\n else:\n window_seq_linear(contig_id, contig_seq)\n\ndef help():\n print('usage: window_genome.py fasta_file outdir')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-f','--fasta',help='Fasta file',required=True)\n parser.add_argument('-o','--outdir',help='Output directory',required=True)\n parser.add_argument('-l','--linear',help='Linear DNA',required=False, action='store_true')\n args = parser.parse_args()\n fasta_file = args.fasta\n outdir = args.outdir\n parse_records(fasta_file,args.linear)\n\n \n ","sub_path":"ISFinder/window_genome.py","file_name":"window_genome.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"24038721","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nfrom .views import (\n EquipmentInspectionListView,\n EquipmentInspectionDetailView,\n EquipmentInspectionCreateView,\n EquipmentInspectionUpdateView,\n EquipmentInspectionQuickUpdateView,\n\n SprayPumproomInspectionListEditView,\n SprayPumproomInspectionListDisplayView,\n SprayPumproomInspectionDetailView,\n SprayPumproomInspectionUpdateView,\n SprayPumproomInspectionCreateView,\n\n SprayWarehouseInspectionListEditView,\n SprayWarehouseInspectionListDisplayView,\n SprayWarehouseInspectionDetailView,\n SprayWarehouseInspectionUpdateView,\n SprayWarehouseInspectionCreateView, \n\n HSSEKPIListEditView,\n HSSEKPIListDisplayView,\n HSSEKPIDetailView,\n HSSEKPIUpdateView,\n HSSEKPICreateView, \n)\n\nurlpatterns = [\n\n url(r'^equipmentinspection/quickupdate/$', EquipmentInspectionQuickUpdateView.as_view(), name='equipmentinsepction_quickupdate'),\n #url(r'^equipmentinspection/export/$', EquipmentInspectionQuickUpdateView.as_view(), name='equipmentinsepction_export'),\n url(r'^equipmentinspection/list/$', EquipmentInspectionListView.as_view(), name='equipmentinsepction_list'),\n url(r'^equipmentinspection/detail/(?P\\d+)/$', EquipmentInspectionDetailView.as_view(), name='equipmentinsepction_detail'),\n url(r'^equipmentinspection/update/(?P\\d+)/$', EquipmentInspectionUpdateView.as_view(), name='equipmentinsepction_update'),\n url(r'^equipmentinspection/create/(?P\\d+)/$', EquipmentInspectionCreateView.as_view(), name='equipmentinsepction_create'),\n\n url(r'^spraypumproominspection/listedit/$', SprayPumproomInspectionListEditView.as_view(), name='spraypumproominspection_list_edit'),\n url(r'^spraypumproominspection/listdisplay/$', SprayPumproomInspectionListDisplayView.as_view(), name='spraypumproominspection_list_display'),\n url(r'^spraypumproominspection/detail/(?P\\d+)/$', SprayPumproomInspectionDetailView.as_view(), name='spraypumproominspection_detail'),\n url(r'^spraypumproominspection/update/(?P\\d+)/$', SprayPumproomInspectionUpdateView.as_view(), name='spraypumproominspection_update'),\n url(r'^spraypumproominspection/create/(?P\\d+)/(?P\\d+)/$', SprayPumproomInspectionCreateView.as_view(), name='spraypumproominspection_create'), \n\n url(r'^spraywarehouseinspection/listedit/$', SprayWarehouseInspectionListEditView.as_view(),name='spraywarehouseinspection_list_edit'),\n url(r'^spraywarehouseinspection/listdisplay/$', SprayWarehouseInspectionListDisplayView.as_view(), name='spraywarehouseinspection_list_display'),\n url(r'^spraywarehouseinspection/detail/(?P\\d+)/$', SprayWarehouseInspectionDetailView.as_view(), name='spraywarehouseinspection_detail'),\n url(r'^spraywarehouseinspection/update/(?P\\d+)/$', SprayWarehouseInspectionUpdateView.as_view(), name='spraywarehouseinspection_update'),\n url(r'^spraywarehouseinspection/create/(?P\\d+)/(?P\\d+)/$', SprayWarehouseInspectionCreateView.as_view(), name='spraywarehouseinspection_create'), \n\n url(r'^hssekpi/listedit/$', HSSEKPIListEditView.as_view(), name='hssekpi_list_edit'),\n url(r'^hssekpi/listdisplay/$', HSSEKPIListDisplayView.as_view(), name='hssekpi_list_display'),\n url(r'^hssekpi/detail/(?P\\d+)/$', HSSEKPIDetailView.as_view(), name='hssekpi_detail'),\n url(r'^hssekpi/update/(?P\\d+)/$', HSSEKPIUpdateView.as_view(), name='hssekpi_update'),\n url(r'^hssekpi/create/(?P\\d+)/(?P\\d+)/$', HSSEKPICreateView.as_view(), name='hssekpi_create'), \n]","sub_path":"equipments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"439679897","text":"from socket import *\n\nip_port = ('127.0.0.1', 8080)\nbuffer_size=1024\nudp_client = socket(AF_INET, SOCK_DGRAM)\nwhile True:\n msg = input('请输入要发送的信息: ')\n udp_client.sendto(msg.encode('utf8'), ip_port)\n data, addr= udp_client.recvfrom(buffer_size)\n print('接收来自服务器的信息:',data.decode('utf8'))","sub_path":"PythonNetProgram/upd客户端.py","file_name":"upd客户端.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"201377479","text":"# -*- coding: utf-8 -*-\n#include \nimport numpy as np\nimport random\nfrom matplotlib import pyplot\nimport os,time\n\ntimeList=[]\n\ndef timer():\n if len(timeList)%2 == 0:\n print(\"\\nTime elapsed: \"+str(round(timeList[-1]-timeList[-2],4))+\"seconds.\\n\")\n timeList.pop()\n timeList.pop()\n return True ;\n\n\nNx=Ny=40\ntab=[]\ntemp=[]\n\ndef periodic2D(nx,ny) :\n nd,ng,nh,nb=nx+1,nx-1,ny+1,ny-1\n if(nx==Nx-1):\n nd=0\n elif(nx==0):\n ng=Nx-1\n if(ny==Ny-1):\n nh=0\n elif(ny==0):\n nb=Ny-1\n return nd,ng,nh,nb\n\nfor j in range(Ny):\n for i in range(Nx):\n temp.append([i,i,i,i])\n tab.append(temp)\n temp=[]\n\ntimeList.append(time.time())\nfor i in range (5000000):\n nx,ny=int((Nx)*np.random.rand()),int((Ny)*np.random.rand())\n (periodic2D(nx,ny))\ntimeList.append(time.time())\ntimer()\n\ntimeList.append(time.time())\nfor i in range (5000000):\n nx,ny=int((Nx)*np.random.rand()),int((Ny)*np.random.rand())\n (tab[nx][ny])\ntimeList.append(time.time())\ntimer()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"643240841","text":"Sexo = input(\"Digite: \\n M - Mulhuer; \\n H - Homem \\n Escolha: \")\nAltura = float(input(\"Digite sua altura: \"))\n\nif Sexo == \"M\":\n PesoI = (62.1 * Altura) - 44.7\n print(\"Seu peso ideal é: \", PesoI)\nelif Sexo == \"H\":\n PesoI = (72.7 * Altura) - 58\n print(\"Seu peso ideal é: \", PesoI)\nelse:\n print(\"Letra do sexo inválida ou digitou em minisculo\")","sub_path":"EstruturaCondiçao/list-exercicio2/exer4.py","file_name":"exer4.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"477396547","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat jun 5 20:57:51 2021\n\n@author: kwan\n\"\"\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nimport time\n \nstockList = ['BNB', 'BTC', 'ETH']\nlen (stockList)\n \noptions = Options()\noptions.add_argument(\"--disable-notifications\")\noptions.add_argument(\"disable-infobars\")\noptions.add_argument(\"--disable-extensions\")\n\ni = 0\n\nwhile i <3:\n \n Chrome = webdriver.Chrome('C:/Program Files (x86)/Google/Chrome/Application/chromedriver', chrome_options=options)\n\n Chrome.get(\"https://finance.yahoo.com/\")\n\n time.sleep(3)\n\n SB = Chrome.find_element_by_id('yfin-usr-qry')\n\n SB.send_keys(\"{0}\".format(stockList[i]))\n\n time.sleep(5)\n\n stockPg = SB.send_keys(Keys.RETURN)\n\n time.sleep(5)\n \n histData = Chrome.find_element(By.XPATH,'//span[text()=\"Historical Data\"]').click()\n \n time.sleep(5)\n \n grabTimePeriod = Chrome.find_element_by_css_selector('[data-icon=CoreArrowDown]').click()\n \n ClickMax = Chrome.find_element_by_css_selector('[data-value=\"5_Y\"]').click() \n \n #Outdate clickDone= Chrome.find_element(By.XPATH, '//span[text()=\"Done\"]').click()\n \n clickApply = Chrome.find_element(By.XPATH, '//span[text()=\"Apply\"]').click()\n \n downloadData = Chrome.find_element(By.XPATH, '//span[text()=\"Download\"]').click()\n \n time.sleep(5)\n \n Chrome.close()\n \n i +=1\n \n \n \n\n","sub_path":"selenium_stocks_versionB.py","file_name":"selenium_stocks_versionB.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"473033281","text":"''' Given a string, find the longest palindromic substring '''\n''' O(n) '''\ndef get_palindrome_length(string, index):\n length = 1\n while index + length < len(string) and index - length >= 0:\n if string[index + length] == string[index - length]:\n length += 1\n else:\n break\n return length - 1\n\ndef interleave(string):\n ret = []\n for s in string:\n ret.extend(['#', s])\n ret.append('#')\n return ''.join(ret)\n''' Find longest palindrome number '''\ndef manacher(string):\n right = 0\n center = 0\n string = interleave(string)\n P = map(lambda e: 0, xrange(len(string)))\n for i in xrange(1, len(string)):\n mirror = 2*center - i\n if i + P[mirror] <= right and mirror >= len(string) - i:\n P[i] = P[mirror]\n else:\n plength = get_palindrome_length(string, i)\n P[i] = plength\n if plength > 1:\n center = int(i)\n right = center + plength\n return [e/2 for e in P]\n''' Return the palindrome sub-string '''\ndef get_palindrome_number(string):\n return sum(manacher(string))\n","sub_path":"Competitive Coding/Strings/String Search/Manachar_algorithm/manachar_algorithm.py","file_name":"manachar_algorithm.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"635689028","text":"from django.shortcuts import render, redirect, HttpResponseRedirect\r\nfrom django.contrib.auth import login, authenticate, logout\r\nfrom django.contrib import auth\r\nfrom django.contrib.auth.models import User\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom .forms import TransferMoneyForm\r\nfrom django.views.generic import TemplateView\r\nfrom account_management.models import Account\r\nfrom user_management.models import ExternalUser\r\n\r\n# incomplete\r\ndef transfer(request):\r\n context = {}\r\n # print(request.user.get_username())\r\n # form = TransferMoneyForm(request.POST)\r\n if request.POST:\r\n # print(\"POST\")\r\n # print(form)\r\n # print(\"from account:\",request.POST['from_account'])\r\n context = {}\r\n\r\n try:\r\n from_account = Account.objects.get(account_number=request.POST['from_account'])\r\n except:\r\n\r\n # form = TransferMoneyForm\r\n context['from_account_not_exist'] = True\r\n context['user_type'] = request.user.get_username()\r\n return render(request, 'transfer.html', context)\r\n\r\n try:\r\n to_account = Account.objects.get(account_number=request.POST['to_account'])\r\n except:\r\n context['to_account_not_exist'] = True\r\n username = request.user.username\r\n currentsuer = ExternalUser.objects.get(username=username)\r\n context['user_type'] = currentsuer.user_type\r\n return render(request, 'transfer.html', context)\r\n # print(\"from account balance:\", from_account.account_balance)\r\n # print(\"to account balance\", to_account.account_balance)\r\n if float(request.POST['amount']) > float(from_account.account_balance):\r\n context['from_account_not_enough_money'] = True\r\n username = request.user.username\r\n currentsuer = ExternalUser.objects.get(username=username)\r\n context['user_type'] = currentsuer.user_type\r\n return render(request, 'transfer.html', context)\r\n #rint(\"dif:\", float(from_account.account_balance)- float(request.POST['from_account']))\r\n from_account.account_balance = str(float(from_account.account_balance) - float(request.POST['amount']))\r\n to_account.account_balance = str(float(to_account.account_balance) + float(request.POST['amount']))\r\n from_account.save()\r\n to_account.save()\r\n # print(\"after:\")\r\n # print(\"from account balance:\", from_account.account_balance)\r\n # print(\"to account balance\", to_account.account_balance)\r\n username = request.user.username\r\n currentsuer = ExternalUser.objects.get(username=username)\r\n context['user_type'] = currentsuer.user_type\r\n # form = TransferMoneyForm\r\n return render(request, 'transfer.html', context)\r\n else:\r\n #context = {}\r\n username = request.user.username\r\n currentsuer = ExternalUser.objects.get(username=username)\r\n user_type = currentsuer.user_type\r\n form = TransferMoneyForm\r\n\r\n return render(request, 'transfer.html', {'form': form, 'user_type': user_type})\r\n","sub_path":"transaction_management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"204476329","text":"import sqlite3\nfrom logs import logger\n\n\n\nCOFEE_DB = \"CoffeeForMeDB.db\"\nBEVERAGE_TYPES = 'select bev_type from beverage_type'\nADDITIONALS_TYPES = 'select addit_type from additionals_type'\nCOUNT_SALESMANS = 'select count(*) from employees where position = 1'\nSALESMAN_NAMES = 'SELECT name from employees where position = 1'\nBEVERAGE_PRICE = 'SELECT price_bev from beverage_type where bev_type = ?'\n\nclass DataBase(object):\n\n def __init__(self, db=COFEE_DB):\n self.database = db\n try:\n self.conn = sqlite3.connect(db, check_same_thread=False)\n except sqlite3.Error as error:\n logger.error(\"Cannot connect to db {}\".format(error))\n self.cursor = self.conn.cursor()\n\n def execute_query_get(self, query, param=None):\n info = 'Executing get query: {}, {}'.format(query, param)\n logger.info(info)\n with self.conn:\n if param:\n self.cursor.execute(query, param)\n else:\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n logger.info('query successful')\n return result\n\n def get_beverage_types(self):\n query = BEVERAGE_TYPES\n res = self.execute_query_get(query)\n logger.info(\"On get_beverage_types -- result {}\".format(res))\n return res\n\n def get_additionals_types(self):\n query = ADDITIONALS_TYPES\n res = self.execute_query_get(query)\n logger.info(\"On get_additionals_types -- result {}\".format(res))\n return res\n\n def get_beverage_price(self, bev):\n query = 'SELECT price_bev from beverage_type where bev_type = ?'\n param = bev,\n res = self.execute_query_get(query, param)\n logger.info(\"On get_beverage_price -- result {}\".format(res))\n return res\n\n def get_additinal_price(self, addit):\n query = 'SELECT price_addit from additionals_type where addit_type = ?'\n param = addit,\n res = self.execute_query_get(query, param)\n logger.info(\"On get_additinal_price -- result {}\".format(res))\n return res\n\n def get_salesmans_names(self):\n query = SALESMAN_NAMES\n res = self.execute_query_get(query)\n logger.info(\"On get_salesmans_names -- result {}\".format(res))\n return res\n\n def execute_query_post(self, query, param):\n info = 'Executing post query: {}, {}'.format(query, param)\n logger.info(info)\n self.cursor.execute(query, param)\n logger.info('post query successful')\n self.conn.commit()\n\n def check_user_in_db(self, user_name):\n name = user_name\n query = 'SELECT * FROM employees WHERE name = ?'\n param = name,\n result = self.execute_query_get(query, param)\n logger.info('User {} '.format(user_name, 'exists' if result else 'does not exist'))\n return bool(result)\n\n def check_salesman_in_db(self, salesman_name):\n name = salesman_name\n query = 'SELECT * FROM employees WHERE name = ? and position = 1'\n param = name,\n result = self.execute_query_get(query, param)\n logger.info('Salesman {} '.format(salesman_name, 'exists' if result else 'does not exist'))\n return bool(result)\n\n def check_manager_in_db(self, manager_name):\n name = manager_name\n query = 'SELECT * FROM employees WHERE name = ? and position = 2'\n param = name,\n result = self.execute_query_get(query, param)\n logger.info('Manager {} '.format(manager_name, 'exists' if result else 'does not exist'))\n return bool(result)\n\n def add_user(self, user_info):\n if not self.check_user_in_db(user_info):\n query = 'INSERT INTO employees (name, position) VALUES (?,?)'\n param = user_info\n self.execute_query_post(query, param)\n logger.info('User {} position: {} was successfully added to db!'.format(user_info[0], user_info[1]))\n else:\n print('User {} already exist'.format(user_info[1]))\n\n\n def send_bill_to_db(self, bill_info):\n query = 'INSERT INTO sales (name, bill, salesdate) VALUES (?, ?, ?)'\n self.execute_query_post(query, bill_info)\n logger.info('Salesman {}, bill {}.'.format(bill_info[0], bill_info[1]))\n\n def count_salesmans(self):\n query = COUNT_SALESMANS\n res = self.execute_query_get(query)\n logger.info(\"On count_salesmans -- result {}\".format(res))\n return res\n\n def salesnumber_of_salesman(self, salesman_name):\n query = 'SELECT COUNT(*) FROM sales where name = ?'\n param = salesman_name,\n res = self.execute_query_get(query, param)\n logger.info(\"On salesnumber_of_salesman -- result {}\".format(res))\n return res\n\n def total_number_of_sales(self):\n query = 'SELECT COUNT(*) FROM sales'\n res = self.execute_query_get(query)\n logger.info(\"On total_number_of_sales -- result {}\".format(res))\n return res\n\n def salessum_of_salesman(self, salesman_name):\n query = 'SELECT SUM(bill) FROM sales where name = ?'\n param = salesman_name,\n res = self.execute_query_get(query, param)\n if self.execute_query_get(query, param)[0][0] == None:\n res = [(0,)]\n logger.info(\"On salessum_of_manager -- result {}\".format(res))\n return res\n\n def total_sum(self):\n query = 'SELECT SUM(bill) FROM sales'\n res = self.execute_query_get(query)\n logger.info(\"On total sum -- result {}\".format(res))\n return res\n\n","sub_path":"final_coffee_db.py","file_name":"final_coffee_db.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"118720732","text":"from itertools import count\nfrom dataclasses import dataclass\nfrom math import inf as INF\nimport sys\n\nimport pytest\n\n@dataclass\nclass Player:\n health: int = 200\n attack: int = 3\n\nclass Elf(Player):\n disp: str = 'E'\n\nclass Goblin(Player):\n disp: str = 'G'\n\ndef parse(lines, elf_attack=3):\n return {x+y*1j: {'E': lambda s: Elf(attack=elf_attack),\n 'G': lambda s: Goblin()}.get(char, str)(char)\n for (y, line) in enumerate(lines)\n for (x, char) in enumerate(line)}\n\ndef distances(src, open_set):\n distance = {src: 0}\n while open_set:\n cur = min(open_set, key=lambda v: distance.get(v, INF))\n open_set.remove(cur)\n if cur not in distance:\n continue\n for nxt in [cur+ofs for ofs in [-1j, -1, +1, +1j] if (cur+ofs) in open_set]:\n if distance[cur]+1 < distance.get(nxt, INF):\n distance[nxt] = distance[cur]+1\n return distance\n\ndef day15(grid):\n for rnd in count(0):\n yield rnd, grid\n for (pos, player) in sorted((t for t in grid.items() if isinstance(t[1], Player)),\n key=lambda t: (t[0].imag, t[0].real)):\n Enemy = {Elf: Goblin, Goblin: Elf}[type(player)]\n if player.health <= 0:\n print(f\"{pos} {player} is already dead\")\n continue\n if {type(p) for p in grid.values()} != {Elf, Goblin, str}:\n print(f\"only one team left, bailing out!!\")\n return\n adj = [pos+o for o in [-1j, -1, +1, +1j] if isinstance(grid[pos+o], Enemy)]\n if not adj:\n dist = distances(pos, {c for c in grid if grid[c] == '.'} | {pos})\n target = min((c+o for c in grid if isinstance(grid[c], Enemy)\n for o in [-1j, -1, +1, +1j] if grid[c+o] == '.'),\n key=lambda c: (dist.get(c, INF), c.imag, c.real),\n default=None)\n if dist.get(target, INF) == INF:\n print(f\"{pos} {player} waiting...\")\n continue\n dist = distances(target, {c for c in grid if grid[c] == '.'} | {target})\n step = min((pos+o for o in [-1j, -1, 1, 1j] if grid[pos+o] == '.'),\n key=lambda c: dist.get(c, INF))\n print(f\"{pos} {player} move from {pos} to {step}\")\n grid[pos], grid[step] = grid[step], grid[pos]\n pos = step\n adj = [pos+o for o in [-1j, -1, +1, +1j] if isinstance(grid[pos+o], Enemy)]\n if adj:\n atk = min(adj, default=None, key=lambda c: grid[c].health)\n print(f\"{pos} {player} attacks {grid[atk]}\")\n grid[atk].health -= player.attack\n if grid[atk].health <= 0:\n print(f\"{grid[atk]} has died!\")\n grid[atk] = '.'\n\ndef day15a(grid):\n for rnd, grid in day15(grid):\n print(f\"\\nAfter {rnd} round{'s' if rnd != 1 else ''}:\")\n draw(grid)\n print()\n h = sum(v.health for v in grid.values() if isinstance(v, Player))\n return (rnd, h, rnd * h)\n\ndef day15b(lines):\n for elf_attack in count(4):\n grid = parse(lines, elf_attack)\n elfs = [v for v in grid.values() if isinstance(v, Elf)]\n for rnd, grid in day15(grid):\n if any(e.health <= 0 for e in elfs):\n break\n else:\n if any(e.health <= 0 for e in elfs):\n continue\n health = sum(v.health for v in grid.values() if isinstance(v, Player))\n return (elf_attack, rnd, health, rnd * health)\n\ndef draw(G):\n P = lambda s: sys.stdout.write(s)\n w, h = max(int(c.real) for c in G), max(int(c.imag) for c in G)\n for y in range(h+1):\n for x in range(w+1):\n P(G[x+y*1j].disp if isinstance(G[x+y*1j], Player) else G[x+y*1j])\n P(' ')\n P(', '.join(f\"{G[c].disp}({G[c].health})\" for c in sorted(\n (c for c in G if isinstance(G[c], Player) and c.imag == y),\n key=lambda c: c.real)))\n P('\\n')\n\nEX15A = '#######|#E..G.#|#...#.#|#.G.#G#|#######'.split('|')\nEX15B = '#######|#.G...#|#...EG#|#.#.#G#|#..G#E#|#.....#|#######'.split('|')\nEX15C = '#######|#G..#E#|#E#E.E#|#G.##.#|#...#E#|#...E.#|#######'.split('|')\nEX15D = '#######|#E..EG#|#.#G.E#|#E.##E#|#G..#.#|#..E#.#|#######'.split('|')\nEX15E = '#######|#E.G#.#|#.#G..#|#G.#.G#|#G..#.#|#...E.#|#######'.split('|')\nEX15F = '#######|#.E...#|#.#..G#|#.###.#|#E#G#G#|#...#G#|#######'.split('|')\nEX15G = '#########|#G......#|#.E.#...#|#..##..G#|#...##..#|#...#...#|#.G...G.#|#.....G.#|#########'.split('|')\n\ndef test_15a_ex0(): assert isinstance(parse(EX15A)[2+3j], Goblin)\n\ndef test_15a_ex1(): assert day15a(parse(EX15B)) == (47, 590, 27730)\ndef test_15a_ex2(): assert day15a(parse(EX15C)) == (37, 982, 36334)\ndef test_15a_ex3(): assert day15a(parse(EX15D)) == (46, 859, 39514)\ndef test_15a_ex4(): assert day15a(parse(EX15E)) == (35, 793, 27755)\ndef test_15a_ex5(): assert day15a(parse(EX15F)) == (54, 536, 28944)\ndef test_15a_ex6(): assert day15a(parse(EX15G)) == (20, 937, 18740)\n\ndef test_15b_ex1(): assert day15b(EX15B) == (15, 29, 172, 4988)\ndef test_15b_ex2(): assert day15b(EX15D) == (4, 33, 948, 31284)\ndef test_15b_ex3(): assert day15b(EX15E) == (15, 37, 94, 3478)\ndef test_15b_ex5(): assert day15b(EX15F) == (12, 39, 166, 6474)\ndef test_15b_ex6(): assert day15b(EX15G) == (34, 30, 38, 1140)\n\ndef test_15a(day15_lines): assert day15a(parse(day15_lines)) == (82, 2624, 215168)\n\ndef test_15a(day15_lines): assert day15b(day15_lines) == (16, 42, 1247, 52374)\n","sub_path":"python/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"197422841","text":"import json\nimport argparse\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom tqdm import tqdm\n\nfrom data import MelSpectrogramFixed, AudioDataset\nfrom utils import ConfigWrapper, parse_filelist\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', required=True, type=str, help='configuration file')\n parser.add_argument('-f', '--filelist', required=True, type=str, help='audios filelist path')\n args = parser.parse_args()\n\n with open(args.config) as f:\n config = ConfigWrapper(**json.load(f))\n \n mel_fn = MelSpectrogramFixed(\n sample_rate=config.data_config.sample_rate,\n n_fft=config.data_config.n_fft,\n win_length=config.data_config.win_length,\n hop_length=config.data_config.hop_length,\n f_min=config.data_config.f_min,\n f_max=config.data_config.f_max,\n n_mels=config.data_config.n_mels,\n window_fn=torch.hann_window\n ).cuda()\n\n dataset = AudioDataset(config, training=True)\n dataset.filelist_path = args.filelist\n dataset.audio_paths = parse_filelist(dataset.filelist_path)\n\n loader = DataLoader(dataset, batch_size=48)\n\n nans, infs = [], []\n for batch in tqdm(loader, total=int(np.ceil(len(dataset)/48))):\n batch = batch.cuda()\n mels = mel_fn(batch)\n\n nan_mask = torch.isnan(mels)\n inf_mask = torch.isinf(mels)\n\n nans.append(nan_mask.sum().cpu())\n infs.append(inf_mask.sum().cpu())\n \n print(f'Dataset has nans: {any([item != 0 for item in nans])}')\n print(f'Dataset has infs: {any([item != 0 for item in infs])}')\n","sub_path":"check_data.py","file_name":"check_data.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"638269410","text":"#La Tarta de Bulma\ngramos_de_la_tarta_de_bulma = 900\nporciones_para_goku = 3\nporciones_para_vegeta = porciones_para_goku\nporciones_para_beerus = 4\n\nporciones_a_dividir_la_tarta = (porciones_para_goku \n + porciones_para_vegeta \n + porciones_para_beerus)\n\nprint(\"Habrá que dividir la tarta en las siguientes porciones:\")\nprint(porciones_a_dividir_la_tarta)\nprint(\"Y para que el sr. Beerus no se enfade, cada porción deberá pesar:\")\nprint(gramos_de_la_tarta_de_bulma / porciones_a_dividir_la_tarta)","sub_path":"Variables/VAR_numeros_04_01.py","file_name":"VAR_numeros_04_01.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"196758335","text":"import collections\n\nimport numpy as np\n\nclass QCAspect(collections.namedtuple('QCAspect', 'lbl units data comment doi glossary')):\n \"\"\"Facilitates the storage of quantum chemical results by labeling them with basic metadata.\n\n Attributes\n ----------\n lbl : str\n Official label for `data`, often qcvar. May contain spaces.\n units : str\n ASCII, LaTeX-like representation of units, without square brackets.\n data : float or :py:class:`numpy.ndarray`\n Value for `lbl`.\n comment : str, optional\n Additional notes.\n doi : str, optional\n Literature citation or definition DOI link.\n glossary : str, optional\n Extended description or definition.\n\n \"\"\"\n def __new__(cls, lbl, units, data, comment='', doi=None, glossary=''):\n return super(QCAspect, cls).__new__(cls, lbl, units, data, comment, doi, glossary)\n\n def __str__(self, label=''):\n width = 40\n text = []\n text.append('-' * width)\n text.append('{:^{width}}'.format('QCAspect ' + self.lbl, width=width))\n if label:\n text.append('{:^{width}}'.format(label))\n text.append('-' * width)\n text.append('Data: {}'.format(self.data))\n text.append('Units: [{}]'.format(self.units))\n text.append('doi: {}'.format(self.doi))\n text.append('Comment: {}'.format(self.comment))\n text.append('Glossary: {}'.format(self.glossary))\n text.append('-' * width)\n return ('\\n'.join(text))\n\n\n def to_dict(self):\n dicary = dict(self._asdict()) # dict, not OrderedDict\n for d in ['doi', 'comment', 'glossary']:\n dicary.pop(d)\n if isinstance(self.data, (np.ndarray, np.number)):\n if self.data.dtype == np.complex:\n dicary['data'] = [dicary['data'].real.tolist(), dicary['data'].imag.tolist()]\n else:\n dicary['data'] = dicary['data'].tolist()\n elif isinstance(self.data, (complex, np.complex)):\n dicary['data'] = [self.data.real, self.data.imag]\n\n return dicary\n","sub_path":"qcdb/datastructures.py","file_name":"datastructures.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"583711932","text":"from binascii import hexlify\n\nimport hashlib\n\n\nBASE58_ALPHABET = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n\n\ndef hash160(s):\n return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()\n\n\ndef double_sha256(s):\n return hashlib.sha256(hashlib.sha256(s).digest()).digest()\n\n\ndef encode_base58(s):\n # determine how many 0 bytes (b'\\x00') s starts with\n count = 0\n for c in s:\n if c == 0:\n count += 1\n else:\n break\n prefix = b'1' * count\n # convert from binary to hex, then hex to integer\n num = int(hexlify(s), 16)\n result = bytearray()\n while num > 0:\n num, mod = divmod(num, 58)\n result.insert(0, BASE58_ALPHABET[mod])\n\n return prefix + bytes(result)\n","sub_path":"session2/complete/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"44894846","text":"import json\nimport jsonschema\nfrom pathlib import Path\nfrom base64 import b64decode\n\nfrom odoo.addons.edi_sale.tests.common import EdiSaleCase\n\n\nclass TestEdiSaleJSON(EdiSaleCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n EdiDocumentType = cls.env[\"edi.document.type\"]\n IrModel = cls.env[\"ir.model\"]\n Partner = cls.env[\"res.partner\"]\n Title = cls.env[\"res.partner.title\"]\n State = cls.env[\"res.country.state\"]\n cls.rec_type_sale_forward = cls.env.ref(\"edi_sale_json.sale_forward_record_type\")\n cls.rec_type_sale_line_forward = cls.env.ref(\"edi_sale_json.sale_line_forward_record_type\")\n cls.doc_type_sale_forward_type = cls.env.ref(\"edi_sale_json.sale_forward_document_type\")\n\n cls.dame_title = Title.create({\"name\": \"Dame\", \"shortcut\": \"Dm\"})\n cls.usa = cls.env[\"res.country\"].create({\"name\": \"USA\"})\n cls.new_york = State.create(\n {\n \"name\": \"New York\",\n \"country_id\": cls.usa.id,\n \"code\": \"NY\",\n }\n )\n\n cls.alice = Partner.create(\n {\n \"name\": \"Alice\",\n \"title\": cls.dame_title.id,\n \"street\": \"Central Park\",\n \"street2\": \"East 74th Street\",\n \"city\": \"New York\",\n \"zip\": \"10021\",\n \"state_id\": cls.new_york.id,\n }\n )\n cls.sale = cls.create_sale(cls.alice)\n cls.create_sale_line(cls.sale, cls.apple, 5)\n cls.create_sale_line(cls.sale, cls.banana, 7)\n\n cls.sale_json_schema = json.load(\n Path(__file__).parent.joinpath(\"../data/sale_request_schema.json\").resolve().open()\n )\n\n def _get_sale_forward_docs(self):\n return self.env[\"edi.document\"].search(\n [(\"doc_type_id\", \"=\", self.doc_type_sale_forward_type.id)]\n )\n\n def _get_json_from_output(self, output):\n return json.loads(b64decode(output.datas).decode(\"utf-8\"))\n\n def test_confirm_creates_document(self):\n prev_docs = self._get_sale_forward_docs()\n self.sale.action_confirm()\n docs = self._get_sale_forward_docs() - prev_docs\n self.assertTrue(docs)\n\n def test_confirm_with_no_doc_types(self):\n prev_docs = self._get_sale_forward_docs()\n self.sale.edi_doc_type_ids = [(5, 0, 0)] # Remove all doc types\n self.sale.action_confirm()\n docs = self._get_sale_forward_docs() - prev_docs\n self.assertFalse(docs)\n\n def test_check_json_output(self):\n prev_docs = self._get_sale_forward_docs()\n self.sale.action_confirm()\n docs = self._get_sale_forward_docs() - prev_docs\n self.assertEqual(len(docs.output_ids), 1)\n jsonschema.validate(self._get_json_from_output(docs.output_ids), self.sale_json_schema)\n","sub_path":"addons/edi_sale_json/tests/test_sale_json.py","file_name":"test_sale_json.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"252074803","text":"import boto3\nimport sys\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n# --------------------------------------------------------------------------------------------------------------------\n\n# This script shows and example of Boto3 integration with Zadara zCompute.\n\n# The scenario is as such:\n# 1. Instantiate an instance from an AMI\n# 2. Create a volume\n# 3. Attach the volume to the created AMI\n\n# This example was tested on versions:\n# - zCompute version 5.5.3\n# - boto3 1.4.7\n\n# ---------------------------------------------------------------------------------------------------------------------\n\n\n# Replace following parameters with your IP and credentials\nCLUSTER_IP = ''\n\n\n# Creating a connection to zCompute AWS Compatible region\ndef create_ec2_client():\n return boto3.Session.client(\n boto3.session.Session(),\n service_name=\"ec2\",\n region_name=\"zCompute\",\n endpoint_url=\"https://%s/api/v2/aws/ec2/\" % CLUSTER_IP,\n verify=False\n )\n \n\n# Finding our Centos image, grabbing its image ID\ndef import_centos_image(client):\n images = client.describe_images()\n image_id = next(image['ImageId'] for image in images['Images'] if 'centos' in image['Tags'][0]['Value'])\n waiter = client.get_waiter('image_available')\n waiter.wait(ImageIds=[image_id,])\n print(\"Found desired image with ID:{0}\".format(image_id))\n return image_id\n\n\n# Running a new instance using our Centos image ID\ndef run_instance(client, image_id):\n ec2_instance = client.run_instances(\n ImageId=image_id,\n MinCount=1,\n MaxCount=1\n )\n instance_id = ec2_instance['Instances'][0]['InstanceId']\n # check if EC2 instance was created successfully\n waiter = client.get_waiter('instance_running')\n waiter.wait(InstanceIds=[instance_id,])\n client.create_tags(\n Resources=[\n instance_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': 'Centos_instance'\n },\n ]\n )\n print(\"Successfully created instance!{0} \".format(instance_id))\n return instance_id\n\n\n# Create an EBS volume, 20G size\ndef create_ebs_volume(client):\n ebs_vol = client.create_volume(\n Size=20,\n AvailabilityZone='zCompute'\n )\n volume_id = ebs_vol['VolumeId']\n # check that the EBS volume had been created successfully\n waiter = client.get_waiter('volume_available')\n waiter.wait(VolumeIds=[volume_id,])\n client.create_tags(\n Resources=[\n volume_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': 'centos_volume'\n },\n ]\n )\n print(\"Successfully created Volume!{0} \".format(volume_id))\n return volume_id\n\n\n# Attaching EBS volume to our EC2 instance\ndef attach_ebs(client, instance_id, volume_id):\n client.attach_volume(\n VolumeId=volume_id,\n InstanceId=instance_id,\n Device='/dev/sdm'\n )\n print(\"Attached EBS: {0} to instance {1}:\" .format(volume_id,instance_id))\n\n\ndef main():\n client = create_ec2_client()\n image_id = import_centos_image(client)\n instance_id = run_instance(client, image_id)\n volume_id = create_ebs_volume(client)\n attach_ebs(client, instance_id, volume_id)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"boto3/EC2.py","file_name":"EC2.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"88006120","text":"from pi3bar.plugins.base import Plugin\ntry:\n import netifaces\nexcept ImportError:\n netifaces = None\n\n\nclass NetIFace(Plugin):\n \"\"\"\n :class:`pi3bar.app.Pi3Bar` plugin to show your current IP address.\n\n .. Note:: requires `netifaces`_ (install with ``pip install netifaces``)\n\n .. _netifaces: https://pypi.python.org/pypi/netifaces\n\n :param interface: :class:`str` - Network interface name to watch.\n :param connected_color: :class:`str` - Hex color to use when connected. (default: '#00ff00')\n :param connected_background: :class:`str` - Hex background color to use when connected. (default: None)\n :param disconnected_color: :class:`str` - Hex color to use when disconnected. (default: None)\n :param disconnected_background: :class:`str` - Hex background color to use when disconnected. (default: None)\n\n Examples:\n\n .. code-block:: python\n\n NetIface('eth0')\n \"\"\"\n\n #: Refresh every 5 seconds\n ticks = 5\n\n def __init__(self, interface, **kwargs):\n self.instance = interface\n self.interface = interface\n self.short_text = interface\n self.connected_color = kwargs.pop('connected_color', '#00ff00')\n self.connected_background = kwargs.pop('connected_background', None)\n self.disconnected_color = kwargs.pop('disconnected_color', None)\n self.disconnected_background = kwargs.pop('disconnected_background', None)\n super(NetIFace, self).__init__(**kwargs)\n\n def cycle(self):\n if netifaces is None:\n self.error('netifaces is not installed')\n self.full_text = 'not installed'\n self.color = None\n self.background = '#ff0000'\n return\n\n interfaces = netifaces.interfaces()\n\n if self.interface not in interfaces:\n self.error(\"'%s' not found in %s\" % (self.interface, interfaces))\n self.full_text = '%s not found' % (self.interface)\n self.color = None\n self.background = '#ff0000'\n return\n\n addresses = netifaces.ifaddresses(self.interface)\n\n try:\n addresses = addresses[netifaces.AF_INET]\n except KeyError:\n self.color = self.disconnected_color\n self.background = self.disconnected_background\n self.full_text = self.interface\n return\n\n ips = ', '.join([address['addr'] for address in addresses])\n\n self.color = self.connected_color\n self.background = self.connected_background\n self.full_text = '%s %s' % (self.interface, ips)\n\n","sub_path":"pi3bar/plugins/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"119245219","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# import modules\nfrom shutil import copyfile\nfrom optparse import OptionParser\nfrom netCDF4 import Dataset as nc\nfrom numpy.ma import masked_array\nfrom numpy import zeros, ones, where, resize, logical_not\n\n# parse inputs\nparser = OptionParser()\nparser.add_option(\"-i\", \"--inputfile\", dest = \"inputfile\", default = \"Campaign.nc4\", type = \"string\",\n help = \"Input netcdf4 file\", metavar = \"FILE\")\nparser.add_option(\"-m\", \"--maskfile\", dest = \"maskfile\", default = \"mask.all.0.01.nc4\", type = \"string\",\n help = \"Mask file\", metavar = \"FILE\")\nparser.add_option(\"-v\", \"--variable\", dest = \"variable\", default = \"cult_p1\", type = \"string\",\n help = \"Variable name\")\nparser.add_option(\"--wlat\", dest = \"wlat\", default = 1, type = \"float\",\n help = \"Weight assigned to latitude in distance metric\")\nparser.add_option(\"--wlon\", dest = \"wlon\", default = 1, type = \"float\",\n help = \"Weight assigned to longitude in distance metric\")\nparser.add_option(\"-o\", \"--outputfile\", dest = \"outputfile\", default = \"Campaign.extrap.nc4\", type = \"string\",\n help = \"Output netcdf4 file\", metavar = \"FILE\")\noptions, args = parser.parse_args()\n\ninputfile = options.inputfile\nmaskfile = options.maskfile\nvariable = options.variable\nwlat = options.wlat\nwlon = options.wlon\noutputfile = options.outputfile\n\nwith nc(inputfile) as f:\n lats, lons = f.variables['lat'][:], f.variables['lon'][:]\n var = f.variables[variable][:]\n\n# variable mask\nsh = var.shape\nif len(sh) == 2:\n vmask = var.mask\nelif len(sh) == 3:\n vmask = var[0].mask\nelif len(sh) == 4:\n vmask = var[0, 0].mask\nelse:\n raise Exception('Unknown dimension size')\n\n# get lat/lon map\nlatd = resize(lats, (len(lons), len(lats))).T\nlond = resize(lons, (len(lats), len(lons)))\n\n# convert to 1D arrays\nlatd = latd[~vmask]\nlond = lond[~vmask]\n\nlatidx, lonidx = where(logical_not(vmask))\nif len(sh) == 2:\n var = var[latidx, lonidx]\nelif len(sh) == 3:\n var = var[:, latidx, lonidx]\nelse:\n var = var[:, :, latidx, lonidx]\n\n# load mask\nwith nc(maskfile) as f:\n mlats, mlons = f.variables['lat'][:], f.variables['lon'][:]\n mask = f.variables['mask'][:]\n\n# find unmasked points\nlatidx, lonidx = where(logical_not(mask.mask))\n\n# extrapolate\nvar2 = masked_array(zeros(sh), mask = ones(sh))\nfor i in range(len(latidx)):\n l1, l2 = latidx[i], lonidx[i]\n\n totd = wlat * (latd - mlats[l1]) ** 2 + wlon * (lond - mlons[l2]) ** 2\n midx = totd.argmin()\n\n if len(sh) == 2:\n var2[l1, l2] = var[midx]\n elif len(sh) == 3:\n var2[:, l1, l2] = var[:, midx]\n else:\n var2[:, :, l1, l2] = var[:, :, midx]\n\ncopyfile(inputfile, outputfile)\n\nwith nc(outputfile, 'a') as f:\n vvar = f.variables[variable]\n vvar[:] = var2","sub_path":"bin/campaign/extrapolateGrid.py","file_name":"extrapolateGrid.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"443720193","text":"#!/usr/bin/env python3\nimport os\nimport time\nimport numpy as np\nimport signal\nimport sys\n\ncore = int(sys.argv[1])\nsamples = 0\nif len(sys.argv) > 2:\n samples = int(sys.argv[2])\n\nos.sched_setaffinity(0, {0})\nsignal.signal(signal.SIGINT, signal.default_int_handler)\n\ndef get_temps():\n result = int(int(file_temp.read()) / 1000)\n file_temp.seek(0)\n return np.array(result, dtype='uint8')\n\n\ndef save_results():\n out_name = 'raw_temps_' + str(core) + '.npy'\n np.save(out_name, temps)\n print('Saved', temps.size, 'samples for 1 core to', out_name)\n\n\nfile_temp = open('/sys/class/hwmon/hwmon2/temp' + str(core + 2) + '_input', 'r')\n\n\ntry:\n temps = get_temps()\n time.sleep(0.001)\n while samples == 0 or temps.size < samples:\n temps = np.append(temps, get_temps())\n time.sleep(0.001)\n save_results()\nexcept KeyboardInterrupt:\n save_results()\n","sub_path":"recordRawTemp.py","file_name":"recordRawTemp.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"319369991","text":"# Hint: You may not need all of these. Remove the unused functions.\nfrom hashtables import (HashTable,\n hash_table_insert,\n hash_table_retrieve)\n\n\ndef get_indices_of_item_weights(weights, length, limit):\n ht = HashTable(16)\n #Loop through\n\n for i in range(length):\n \n # weight is the weights at i\n weight = weights[i]\n\n\n # weight_limit is the limit - weight\n\n weight_limit = limit - weight\n\n # retrieve the weight_limit and set it as index_pair\n\n index_pair = hash_table_retrieve(ht, weight_limit)\n\n # if index_pair is not None and higher than i it'll be index_pair first, else it'll be i first\n\n if index_pair is not None:\n group = (index_pair, i) if i < index_pair else (i, index_pair)\n\n # return the group once we know what order it should go in \n return group\n\n # finally insert everything into the hash table \n # \n hash_table_insert(ht, weight, i)\n \n\n return None\n\n\ndef print_answer(answer):\n if answer is not None:\n print(str(answer[0] + \" \" + answer[1]))\n else:\n print(\"None\")\n","sub_path":"hashtables/ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"197982544","text":"import re\nfrom fuzzywuzzy import fuzz\nimport datetime\n\ndef start(ser):\n \"\"\"\n Processing file\n \"\"\"\n time_processing = datetime.datetime.now()\n Name = ser['Name']\n Version = []\n Rep = []\n Aplic = []\n All_rep = []\n Partya = []\n Ser_num = []\n Where = []\n As_a = []\n Tec_con = []\n words = ''\n for i in range(len(Name)):\n Name[i] = re.sub(r'\\(\\)', '', str(Name[i]))\n Name[i] = re.sub(r'(,(\\s)*$)', '', str(Name[i]))\n \"\"\"\n Other thing\n \"\"\"\n words = str(Name[i])\n Version0, Where0, Partya0, Ser_num0, Aplic0, As_a0, Tec_con0, words = other(\n words)\n \n if not Version0 == []:\n Version += Version0\n else:\n Version.append(str(''))\n if not Where0 == []:\n Where += Where0\n else:\n Where.append(str(''))\n if not Partya0 == []:\n Partya += Partya0\n else:\n Partya.append(str(''))\n if not Ser_num0 == []:\n Ser_num += Ser_num0\n else:\n Ser_num.append(str(''))\n if not Aplic0 == []:\n Aplic += Aplic0\n else:\n Aplic.append(str(''))\n if not As_a0 == []:\n As_a += As_a0\n else:\n As_a.append(str(''))\n if not Tec_con0 == []:\n Tec_con += Tec_con0\n else:\n Tec_con.append(str(''))\n words = re.sub(r'(\\(\\)|\\((\\s)*$)', '', str(words))\n words = words.replace(' ) ', ' ')\n \"\"\"\n End of other thing\n \"\"\"\n Iskl = re.findall(\n r'^[а-яА-Я-,\\s/]+?(?=[А-Я]{3,}|:|\\(|\\d|[a-zA-Z]|\\\"|«|$)', str(words), re.M)\n if not Iskl == []:\n if len(Iskl[0]) == 1:\n Rep.append(str(''))\n else:\n Rep.append(Iskl[0])\n words = words.replace(Iskl[0], '')\n else:\n Rep.append(str(''))\n All_rep.append(words)\n # Name[i] = re.sub(\n # r'(\\\"|\\s\\(|\\)\\s|\\)$|«|»|\\s-|\\:|\\,\\s$|\\s$|\\.$|\\,$|^[:\\s]*)',\n # ' ', str(Name[i]), re.M)\n ser = {\n 'Название': Name,\n 'Без лишнего': All_rep,\n 'Модели': Rep,\n 'Версия': Version,\n 'в составе согласно Приложению': Aplic,\n 'Партия': Partya,\n 'Номера': Ser_num,\n 'Расшифровки': Where,\n 'В качестве': As_a,\n 'Технические условия': Tec_con\n }\n print('Процесс => ' + str(datetime.datetime.now() - time_processing))\n return ser\n\ndef other(words):\n Version0 = []\n Where0 = []\n Partya0 = []\n Ser_num0 = []\n Aplic0 = []\n As_a0 = []\n Tec_con0 = []\n # версия\n vers = []\n vers = re.findall(r'(ПО:? не классифицируется( по версиям)?)', str(words))\n if not vers == []:\n vers = vers[0][0]\n if vers == []:\n vers = re.findall(\n r'''(верси.+?(?=верси|cерийные|с функ|в составе|технич|идентифи|завод|зав.|\n партия|по заявке|выполняю|реализу|и медиаш|где|ПО (\\\"|«)|\\)|\\(|$))''', \n str(words))\n if not vers == []:\n vers2 = vers.copy()\n vers = []\n for v in vers2:\n vers.append(v[0])\n for i in range(len(vers)):\n vers[i] = re.sub(\n r'(верси(я|и)( ПО(:| -|))|ПО|версия( программного обеспечения)?)',\n '', str(vers[i]))\n vers[i] = re.sub(r'(^(\\s)*|(\\s)*$|\\)(\\s)*$|;(\\s)*$)', '', str(vers[i]), re.M)\n # print(vers)\n if vers == []:\n vers = re.findall(r'программное обеспечение отсутствует', str(words))\n if not vers == []:\n vers = vers[0]\n if vers == []:\n vers = re.findall(r'ПО:.+?(?=\\)|$)', str(words))\n if not vers == []:\n Version0.append(vers)\n words = re.sub(r'ПО(:)? не классифицируется по версиям', '', words)\n words = re.sub(r'''(верси.+?)(?=верси|cерийные|с функ|в составе|технич|идентифи|завод|зав.|\n партия|по заявке|выполняю|реализу|и медиаш|где|ПО (\\\"|«)|\\)|\\(|$)''', '', words)\n words = re.sub(r'программное обеспечение отсутствует', '', words)\n words = re.sub(r'ПО:.+?(?=\\)|$)', '', words)\n # print(Version0)\n # print(Version0)\n # в составе согласно приложению\n hhh = re.findall(\n r'''(в составе(,|) (согласно|приведенном) ((П|п)риложению|в (п|П)риложении)|\n В СОСТАВЕ пРИЛОЖЕНИЯ).+?(.|,|$)''', words)\n if not hhh == []:\n Aplic0.append(True)\n words = re.sub(\n r'''(в составе(,|) (согласно|приведенном) ((П|п)риложению|в (п|П)риложении)|\n В СОСТАВЕ пРИЛОЖЕНИЯ)''', '', words)\n # где... 177 857\n where = re.findall(r'где.+?(?=\\)|;|$)', words)\n if not where == []:\n Where0.append(where)\n words = re.sub(r'где.+?(?=\\)|;|$)', '', words)\n # партия\n partya0 = re.findall(r'партия.+?(?=с серийными|$)', words)\n par2 = re.findall(r'Партия', words)\n if not par2 == []:\n partya0 = re.findall(r'(?=в количестве).+$', words)\n if not partya0 == []:\n Partya0.append(partya0)\n words = re.sub(r'в количестве.+$', '', words)\n words = re.sub(r'партия.+?(?=с серийными|$)', '', words)\n # номера 5874 5875 7242 7243\n other_num = re.findall(\n r'((зав(одск(ие|ой номер))|(с |)идентификацион).+?(?=\\)|;|$))', words)\n ser_num = re.findall(r'((с |)серийны.+?(?=\\)|;|$))', words)\n if not ser_num == []:\n ser_num = ser_num[0][0]\n rep_num = []\n rep_num = re.findall(r'(серий.+?серий){1}', words)\n if not rep_num == []:\n ser_num = re.findall(r'серийны.+?(?=\\,|;|\\)|$)', words)\n if not other_num == []:\n ser_num.append(other_num[0][0])\n words = re.sub(\n r'((зав(одск(ие|ой номер))|(с |)идентификацион).+?(?=\\)|;|$))', '', words)\n if not ser_num == []:\n Ser_num0.append(ser_num)\n words = re.sub(r'(с |)серийны.+?(?=\\)|;|$)', '', words)\n # в качестве\n asA = re.findall(r'в качестве.+$', words)\n if not asA == []:\n As_a0.append(asA)\n words = re.sub(r'в качестве.+$', '', words)\n # технические условия\n tec_con = re.findall(r'технические условия.+?(?=;|$)', words)\n if not tec_con == []:\n Tec_con0.append(tec_con)\n words = re.sub(r'технические условия.+?(?=;|$)', '', words)\n \n return Version0, Where0, Partya0, Ser_num0, Aplic0, As_a0, Tec_con0, words\n\n\ndef comma(words0):\n Iskl = []\n words = words0.replace(';', ',').split(', ')\n for i, word in enumerate(words):\n\n if(not i == len(words)-1):\n if(abs(len(words[i]) - len(words[i+1])) > 5):\n lenn = min(len(words[i]), len(words[i+1]))\n words[i] = words[i][slice(-lenn-1, len(words[i]))]\n words[i+1] = words[i+1][slice(-lenn-1, len(words[i+1]))]\n for word2 in words:\n fuz = fuzz.token_sort_ratio(word, word2)\n if fuz > 70 and fuz < 100:\n Iskl.append(word)\n Iskl.append(word2)\n Iskl = list(set(Iskl))\n return Iskl\n","sub_path":"Работа/Equip/code/modules/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":7943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"14133071","text":"from django.test import TestCase\n\nfrom mock import patch\nfrom money import Money\n\nfrom ..models import MangoPayPayIn\n\nfrom .factories import MangoPayPayInFactory\nfrom .client import MockMangoPayApi\n\n\nclass MangoPayPayInTests(TestCase):\n\n def setUp(self):\n self.pay_in = MangoPayPayInFactory()\n\n @patch(\"mangopay.models.get_mangopay_api_client\")\n def test_create(self, mock_client):\n id = 76\n mock_client.return_value = MockMangoPayApi(pay_in_id=id)\n self.assertIsNone(self.pay_in.mangopay_id)\n self.pay_in.create(debited_funds=Money(100, \"EUR\"),\n fees=Money(5, \"EUR\"),\n secure_mode_return_url=\"http://test.com\")\n MangoPayPayIn.objects.get(id=self.pay_in.id, mangopay_id=id)\n\n @patch(\"mangopay.models.get_mangopay_api_client\")\n def test_get(self, mock_client):\n mock_client.return_value = MockMangoPayApi()\n self.assertIsNone(self.pay_in.secure_mode_redirect_url)\n self.assertIsNone(self.pay_in.status)\n self.pay_in.get()\n self.pay_in = MangoPayPayIn.objects.get(id=self.pay_in.id)\n self.assertIsNotNone(self.pay_in.status)\n self.assertIsNotNone(self.pay_in.secure_mode_redirect_url)\n","sub_path":"mangopay/tests/payin.py","file_name":"payin.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"363268983","text":"from django.template.response import TemplateResponse\nfrom django.utils.functional import cached_property\nfrom django.views.generic.edit import FormView\n\nfrom core.helpers import api_client, get_session_request_id\nfrom eligibility import forms, helpers\nfrom sso.utils import SSOLoginRequiredMixin\n\n\nclass CheckIsCompanyOfficerView(SSOLoginRequiredMixin, FormView):\n http_method_names = ['post']\n form_class = forms.SAMLResponseForm\n success_template_name = 'eligibility/gov-verify-success.html'\n failure_template_name = 'eligibility/gov-verify-failure.html'\n\n def get_template_name(self, is_success):\n if is_success:\n return self.success_template_name\n return self.failure_template_name\n\n def form_invalid(self, form):\n return TemplateResponse(\n request=self.request,\n template=self.get_template_name(is_success=False),\n context=self.get_context_data(\n company_details=self.company_details,\n form=form,\n ),\n )\n\n def form_valid(self, form):\n user_attributes = form.get_user_attributes()\n is_success = helpers.is_probably_company_officer(\n first_name=user_attributes['first_name'],\n surname=user_attributes['surname'],\n birth_date=user_attributes['birth_date'],\n company_number=self.company_details['number'],\n )\n return TemplateResponse(\n request=self.request,\n template=self.get_template_name(is_success=is_success),\n context=self.get_context_data(company_details=self.company_details)\n )\n\n def get_form_kwargs(self, *args, **kwargs):\n form_kwargs = super().get_form_kwargs(*args, **kwargs)\n form_kwargs['request_id'] = get_session_request_id(self.request)\n return form_kwargs\n\n @cached_property\n def company_details(self):\n response = api_client.company.retrieve_private_profile(\n sso_session_id=self.request.sso_user.session_id,\n )\n response.raise_for_status()\n return response.json()\n","sub_path":"eligibility/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"68333913","text":"from setuptools import setup, find_packages\nimport os\n\n\n# extract version\nwith open(os.path.join(os.path.dirname(__file__),\n \"pypesto\", \"version.py\")) as f:\n version = f.read().split('\\n')[0].split('=')[-1].strip(' ').strip('\"')\n\n\n# read a file\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\n# project metadata\nsetup(name='pypesto',\n version=version,\n description=\"python Parameter EStimation TOolbox\",\n long_description=read('README.md'),\n long_description_content_type=\"text/markdown\",\n author=\"The pyPESTO developers\",\n author_email=\"yannik.schaelte@gmail.com\",\n url=\"https://github.com/icb-dcm/pypesto\",\n packages=find_packages(exclude=[\"doc*\", \"test*\"]),\n install_requires=['numpy>=1.15.1',\n 'scipy>=1.1.0',\n 'pandas>=0.23.4',\n 'matplotlib>=2.2.3',\n 'seaborn>=0.10.0',\n 'cloudpickle>=0.7.0',\n 'h5py>=2.10.0',\n 'tqdm>=4.46.0'],\n tests_require=['pytest>=5.4.2',\n 'flake8>=3.7.2',\n 'gitpython>=3.1.2'],\n extras_require={'amici': ['amici>=0.11.7'],\n 'petab': ['petab>=0.1.7'],\n 'ipopt': ['ipopt>=0.1.9'],\n 'dlib': ['dlib>=19.19.0'],\n 'pyswarm': ['pyswarm>=0.6'],\n 'cmaes': ['cma>=3.0.3'],\n 'pymc3': ['arviz>=0.8.1, <0.9.0',\n 'theano>=1.0.4',\n 'packaging>=20.0',\n 'pymc3>=3.8, <3.9.2']},\n python_requires='>=3.6',\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"103535968","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.homepage, name='homepage'),\n path('search/', views.search, name='search'),\n path('product//', views.product, name='product'),\n path('favorite/', views.favorite, name='favorite'),\n path('save-favorite//', views.save_favorite,\n name='save_favorite'),\n path('favorite-detail//', views.favorite_detail,\n name='favorite-detail'),\n path('contact/', views.contact, name='contact'),\n path('legal/', views.legal, name='legal'),\n path('vote/', views.vote, name='vote'),\n\n]\n","sub_path":"substitute/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"163535668","text":"import random\n\nfrom states import *\n\nCLEAN_WEIGHT = 1\nSTEPS_WEIGHT = 0.2\n\nclass Environment:\n Grid = []\n Width = 0\n Height = 0\n NumCollisions = 0\n NumTurns = 0\n InitialDirtyAmount = 0\n\n def __init__(self,width,height):\n self.Width = width\n self.Height = height\n self.Grid = []\n for x in range(width):\n newCol = []\n for y in range(height):\n newCol.append(CLEAN)\n self.Grid.append(newCol)\n\n def Reset(self):\n self.NumCollisions = 0\n self.NumTurns = 0\n self.RandomizeWithoutWalls()\n\n def GetTile(self,x,y):\n if x >= self.Width or y >= self.Height or x < 0 or y < 0:\n return WALL\n else:\n return self.Grid[x][y]\n\n def SetTile(self,x,y,val):\n #assert self.Grid[x][y] == WALL and not val == WALL\n self.Grid[x][y] = val\n\n def SetTileByNumber(self,tileNumber,val):\n y = int(tileNumber/self.Width)\n x = int(tileNumber%self.Width)\n self.SetTile(x,y,val)\n\n def SetGridFromBinary(self,bin):\n self.NumCollisions = 0\n self.NumTurns = 0\n number = bin\n binDigits = self.Width * self.Height\n for b in range(0,binDigits):\n if (number%2 != 0):\n self.SetTileByNumber(b,DIRTY)\n number = number // 2\n self.InitialDirtyAmount = self.CountDirty()\n return\n\n def SetWallsFromBinary(self,bin):\n self.NumCollisions = 0\n self.NumTurns = 0\n number = bin\n binDigits = self.Width * self.Height\n for b in range(0,binDigits):\n if (number%2 != 0):\n self.SetTileByNumber(b,WALL)\n number = number // 2\n self.InitialDirtyAmount = self.CountDirty()\n return\n\n def Collide(self,x=0,y=0):\n self.NumTurns += 1\n #assert self.NumTurns > (self.Width * self.Height * 4)\n #print(\"Turns: {}\".format(self.NumTurns))\n if self.GetTile(x,y) == WALL:\n self.NumCollisions += 1\n return True\n else:\n return False\n\n def CountDirty(self):\n numDirty = 0\n for x in range(self.Width):\n for y in range(self.Height):\n if self.GetTile(x,y) == DIRTY:\n numDirty += 1\n return numDirty\n\n def GetTileState(self, tile):\n if self.Grid[tile[0]][tile[1]] == DIRTY:\n return DIRTY\n else:\n return CLEAN\n\n def GetPercentClean(self):\n assert self.CountDirty() <= self.InitialDirtyAmount\n if self.InitialDirtyAmount <= 0:\n return 100\n return (1-(self.CountDirty()/self.InitialDirtyAmount)) * 100\n\n def RandomizeWithoutWalls(self):\n #assert False\n for x in range(self.Width):\n for y in range(self.Height):\n self.SetTile(x=x,y=y,val=random.randint(CLEAN,DIRTY))\n self.InitialDirtyAmount = self.CountDirty()\n if self.InitialDirtyAmount == 0:\n self.RandomizeWithoutWalls()\n\n def RandomizeWithWalls(self):\n #assert False\n for x in range(self.Width):\n for y in range(self.Height):\n self.SetTile(x,y,random.randint(CLEAN,WALL))\n self.InitialDirtyAmount = self.CountDirty()\n if self.InitialDirtyAmount == 0:\n self.RandomizeWithWalls()\n\n def GetPerformanceMeasure(self):\n perfmeasure = {\"collisions\":self.NumCollisions,\"numTurns\":self.NumTurns,\"percentClean\":self.GetPercentClean(),\"score\":((self.GetPercentClean() * CLEAN_WEIGHT) - (max(self.NumTurns,1)* STEPS_WEIGHT))}\n return perfmeasure\n\n def Visualize(self):\n print(\"visualizing w:{}, h:{}\".format(self.Width,self.Height))\n for y in range (self.Height):\n for x in range (self.Width):\n print(\"[{}] \".format(self.GetTile(x,y)),end=\"\")\n print(\"\")\n perf = self.GetPerformanceMeasure()\n print(\"Collisions: {}, Steps: {}, Percent Cleaned: {}% Score: {}\".format(perf['collisions'],perf['numTurns'],perf['percentClean'],perf['score']))\n return\n","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"550815314","text":"#! /usr/bin/env python\n# -*- coding: utf8 -*-\n\n\"\"\"\nRabbitMQ 运行详情\n\n检测机器运行状态:fab -w -f check_pushiphone_fabfile.py server_status\n获取当天推送数量:fab -w -f check_pushiphone_fabfile.py push_num\n获取当天推送成功:fab -w -f check_pushiphone_fabfile.py push_success_num\n获取当天推送失败:fab -w -f check_pushiphone_fabfile.py push_failure_num\n清零统计日志: fab -w -f check_pushiphone_fabfile.py clean_log\n查看日志详情: fab -w -f check_pushiphone_fabfile.py view_log\n\n\"\"\"\n\nfrom fabric.api import run, env, roles, execute, sudo\nfrom fabric.colors import red, green\nfrom fabric.context_managers import cd\n\nenv.roledefs = {\n 'rabbitmq_server':['10.10.10.66', '10.10.10.81'],\n 'rabbitmq_consumer':[ '10.10.10.61', '10.10.10.81', '10.10.10.66']\n }\nenv.user = ''\nenv.password = ''\nenv.port = 50022\n\ncount = 0\n\n@roles('rabbitmq_server')\ndef server_status():\n with cd('/tmp'):\n print(green('[检测机器运行状态……]'))\n sudo('netstat -nutlp')\n\n@roles('rabbitmq_consumer')\ndef push_success_num():\n with cd('/data/logs/push'):\n print(green('[扫描推送成功的消息……]'))\n num = run('grep -c success iphone-info.log')\n global count\n count += int(num)\n print(green('累积推送消息数: %d ' % count))\n\n@roles('rabbitmq_consumer')\ndef push_failure_num():\n with cd('/data/logs/push'):\n print(green('[扫描推送失败的消息……]'))\n num = run('grep -c failure iphone-error.log')\n global count\n count += int(num)\n print(green('累积推送消息数: %d ' % count))\n \n@roles('rabbitmq_consumer')\ndef clean_log():\n with cd('/data/logs/push'):\n print(green('[清除日志……]'))\n run('ls -lh *')\n run('wc -l *')\n run('echo -n \"\" > iphone-info.log')\n run('echo -n \"\" > iphone-error.log')\n run('rm -f iphone-info.log.*')\n run('rm -f iphone-error.log.*')\n\n@roles('rabbitmq_consumer')\ndef view_log():\n with cd('/data/logs/push'):\n print(green('[查看日志……]'))\n run('ls -ilh *')\n \ndef push_num():\n global count\n execute(push_success_num)\n print(red('累积推送成功消息数: %d ' % count, bold=True))\n count = 0\n execute(push_failure_num)\n print(red('累积推送失败消息数: %d ' % count, bold=True))\n \n \n","sub_path":"apps/fabric/check_pushiphone_fabfile.py","file_name":"check_pushiphone_fabfile.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"647576020","text":"from django.core.exceptions import EmptyResultSet\n\nfrom bot import bot\nfrom bot.models import User, Lesson\nfrom messages import no_lessons_notification, not_enough_lessons, not_enough_lessons_number\n\n\ndef send_lessons(user: User):\n amount = user.lessons_amount\n last_lesson = user.last_sent_lesson\n if not last_lesson:\n last_lesson = Lesson.objects.order_by('id').first()\n if not last_lesson:\n bot.send_message(user.id, no_lessons_notification)\n raise EmptyResultSet()\n else:\n last_lesson.id -= 1\n lessons = Lesson.objects.filter(id__gt=last_lesson.id).order_by('id')\n for lesson in lessons:\n if amount > 0:\n bot.send_message(user.id, lesson.link)\n amount -= 1\n last_lesson = lesson\n else:\n bot.send_message(user.id, not_enough_lessons_number)\n break\n\n if amount > 0:\n bot.send_message(user.id, not_enough_lessons % amount)\n\n user.last_sent_lesson = last_lesson\n user.lessons_amount = amount\n user.save()","sub_path":"bot/services/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"165801980","text":"import numpy as np\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\nn = 10000\nr = norm.rvs(size=n)\nmin, max, len = -5, 5, 40\nbins = np.linspace(min, max, len)\ncount, bins, ignored = plt.hist(r, bins=bins, rwidth=0.9) # 統計長條圖\nplt.plot(bins, norm.pdf(bins)*n*(max-min)/len) # 曲線圖\nplt.show()","sub_path":"python/08-scientific/probability/normal3.py","file_name":"normal3.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"45428577","text":"import math\n\n\ndef calc_distance(start_point, end_point):\n distance = math.sqrt(\n (start_point[0] - end_point[0]) ** 2 +\n (start_point[1] - end_point[1]) ** 2 +\n (start_point[2] - end_point[2]) ** 2\n )\n return distance\n\n\ndef calc_speed(start_point, end_point, fps=10):\n time_delta = 1/fps\n distance = calc_distance(start_point, end_point)\n return distance / time_delta\n\n\ndef get_position(drone):\n return drone.matrix_world.to_translation()\n\n\ndef get_distance(drone1, drone2):\n point1 = get_position(drone1)\n point2 = get_position(drone2)\n\n return calc_distance(point1, point2)\n\n\ndef get_drone_properties(drone):\n return dict(filter(lambda x: x[0].lower().startswith(\"drone_\"), drone.items()))\n\n\ndef add_bool_property(obj, name, description=\"bool property\"):\n rna_ui = obj.get('_RNA_UI')\n if rna_ui is None:\n rna_ui = obj['_RNA_UI'] = {}\n obj[name] = 0\n\n rna_ui[name] = {\"description\": description,\n \"default\": False,\n \"min\": 0,\n \"max\": 1,\n \"soft_min\": 0,\n \"soft_max\": 1}\n\n # def insert_prop_keyframe(obj, prop_path: str, value):\n # obj.keyframe_insert(data_path='[\"prop\"]')","sub_path":"operators/general_functions.py","file_name":"general_functions.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"58621773","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py as h5\nimport scipy.ndimage.measurements as snm\n\n\n\nmode=\"dpc\" #\"stxm\" or \"dpc\"\nimport pyqtgraph as pg\nimport sys\n#scanno=sys.argv[1]\nscanno=59597\ndim=(50,50)\nR=(-1,1,-1,1)\n#roi=None\nroi=(100,130,80,110)\n#vmax=700000\nvmax=None\nvmin=None\n#vmin=800000\n#scanno=str(sys.argv[1])\n#dim=sys.argv[2].astype(np.int)\n#R=sys.argv[3].astype(np.float)\n\n\ndef fill_edge_pixels(data, edge_ss=202, edge_fs=172):\n d = np.zeros((data.shape[0], data.shape[1] + 4, data.shape[2] + 4), dtype=data.dtype)\n nx1 = edge_ss\n ny1 = edge_fs\n for i in range(data.shape[0]):\n tt = data[i]\n nx, ny = np.shape(tt)\n t = np.zeros((nx + 4, ny + 4))\n\n t[:nx1, :ny1] = tt[:nx1, :ny1]\n t[:nx1, ny1 + 6:] = tt[:nx1, ny1 + 2:]\n t[nx1 + 6:, :ny1] = tt[nx1 + 2:, :ny1]\n t[nx1 + 6:, ny1 + 6:] = tt[nx1 + 2:, ny1 + 2:]\n\n a = np.rint(tt[nx1, :ny1] / 3).astype(data.dtype)\n t[nx1, :ny1] = a\n t[nx1 + 1, :ny1] = a\n t[nx1 + 2, :ny1] = a\n\n a = np.rint(tt[nx1 + 1, :ny1] / 3).astype(data.dtype)\n t[nx1 + 3, :ny1] = a\n t[nx1 + 4, :ny1] = a\n t[nx1 + 5, :ny1] = a\n\n a = np.rint(tt[nx1, ny1 + 2:] / 3).astype(data.dtype)\n t[nx1, ny1 + 6:] = a\n t[nx1 + 1, ny1 + 6:] = a\n t[nx1 + 2, ny1 + 6:] = a\n\n a = np.rint(tt[nx1 + 1, ny1 + 2:] / 3).astype(data.dtype)\n t[nx1 + 3, ny1 + 6:] = a\n t[nx1 + 4, ny1 + 6:] = a\n t[nx1 + 5, ny1 + 6:] = a\n\n a = np.rint(tt[:nx1, ny1] / 3).astype(data.dtype)\n t[:nx1, ny1] = a\n t[:nx1, ny1 + 1] = a\n t[:nx1, ny1 + 2] = a\n\n a = np.rint(tt[:nx1, ny1 + 1] / 3).astype(data.dtype)\n t[:nx1, ny1 + 3] = a\n t[:nx1, ny1 + 4] = a\n t[:nx1, ny1 + 5] = a\n\n a = np.rint(tt[nx1 + 2:, ny1] / 3).astype(data.dtype)\n t[nx1 + 6:, ny1] = a\n t[nx1 + 6:, ny1 + 1] = a\n t[nx1 + 6:, ny1 + 2] = a\n\n a = np.rint(tt[nx1 + 2:, ny1 + 1] / 3).astype(data.dtype)\n t[nx1 + 6:, ny1 + 3] = a\n t[nx1 + 6:, ny1 + 4] = a\n t[nx1 + 6:, ny1 + 5] = a\n\n t[nx1:nx1 + 3, ny1:ny1 + 3] = int(round(tt[nx1, ny1] / 9))\n t[nx1:nx1 + 3, ny1 + 3:ny1 + 6] = int(round(tt[nx1, ny1 + 1] / 9))\n t[nx1 + 3:nx1 + 6, ny1:ny1 + 3] = int(round(tt[nx1 + 1, ny1] / 9))\n t[nx1 + 3:nx1 + 6, ny1 + 3:ny1 + 6] = int(round(tt[nx1 + 1, ny1 + 1] / 9))\n\n d[i] = t\n\n return d\n\n\ndef load_plot_stxm(scanno, dim, R, roi=None ,transpose=False, path=\"/gpfs/cfel/cxi/scratch/user/murrayke/Raw_Data/Brookhaven/March_2019/\",\n savepath=\"/gpfs/cfel/cxi/scratch/user/murrayke/Processed_Data/Brookhaven/Mar_2019/STXM_images/\", edge_ss=202, edge_fs=172, vmin=None,\n vmax=None):\n # Dim:(ss,fs)\n # R: (ssmin,ssmax,fsmin,fsmax) [µm]\n\n # Make_STXM_array\n stxm = np.zeros(dim)\n\n # Load Data\n print(\"Loading Data\")\n f = h5.File(path + \"scan_\" + str(scanno) + \".h5\", \"r\")\n print(\"Loading finished\")\n if roi==None:\n data = fill_edge_pixels(f[\"entry/instrument/detector/data\"][()], edge_ss=202, edge_fs=172)\n roitxt=\"\"\n else:\n data = fill_edge_pixels(f[\"entry/instrument/detector/data\"][()], edge_ss=202, edge_fs=172)[:,roi[0]:roi[1],roi[2]:roi[3]]\n roitxt = \"_roi_%s_%s_%s_%s\"%(roi[0],roi[1],roi[2],roi[3])\n print(data.shape)\n # Get all ionchamber readings\n # ionchamber_readings=np.zeros((data.shape[0]))\n print(\"Processing and plotting\")\n name_ionchamber = \"sclr1_ch3\"\n txtfnam = path + \"scan_\" + str(scanno) + \".txt\"\n f2 = open(txtfnam, \"r\")\n f2_line_list = []\n for line in f2:\n f2_line_list.append(line)\n list_motornames = f2_line_list[0].split(\"\\t\")\n motorpositions = np.zeros((len(f2_line_list) - 1, len(list_motornames)))\n # Looking which entries are not numbers\n test_list_now = f2_line_list[1].split(\"\\t\")\n\n def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n list_nonnumbers = []\n for i in range(0, len(test_list_now), 1):\n # print(test_list_now[i], is_number(test_list_now[i]), i)\n if is_number(test_list_now[i]) is False:\n list_nonnumbers.append(i)\n # print('list:', list_nonnumbers)\n #####################################\n for i in range(0, motorpositions.shape[0], 1):\n list_now = f2_line_list[i + 1].split(\"\\t\")\n # setting non numbers to zero\n for i2 in list_nonnumbers:\n list_now[i2] = 0\n motorpositions[i, :] = np.asarray(list_now)\n i_ion = None\n for i in range(0, len(list_motornames), 1):\n if list_motornames[i] == str(name_ionchamber):\n i_ion = i\n if i_ion is None:\n raise IOError(\"Coulndt find the ionchamber motorname\")\n ionchamber_readings = motorpositions[:, i_ion]\n ionchamber_readings = ionchamber_readings.astype(np.float) / ionchamber_readings.max()\n\n # Now go through STXM\n if mode==\"stxm\":\n for i1 in range(0, dim[0]):\n for i2 in range(0, dim[1]):\n if (i1*dim[1]+i2)<(dim[0]*dim[1]):\n stxm[i1, i2] = np.sum(data[i1 * dim[1] + i2, :, :]) / ionchamber_readings[i1 * dim[1] + i2]\n else:\n stxm[i1,i2]=None\n elif mode==\"dpc\":\n dpc_map=stxm\n for i1 in range(0,dim[0]):\n for i2 in range(0,dim[1]):\n if (i1 * dim[1] + i2) < (dim[0] * dim[1]):\n dpc_r=snm.center_of_mass(data[i1 * dim[1] + i2, :, :] / ionchamber_readings[i1 * dim[1] + i2])\n dpc_map[i1,i2]=np.sqrt(dpc_r[0]**2+dpc_r[1]**2)\n else:\n dpc_map[i1,i2]=None\n stxm=dpc_map\n else:\n IOError(\"Bad mode!\")\n extent = np.roll(R, 2)\n print(np.amin(stxm),np.median(stxm),np.amax(stxm))\n if transpose == True:\n stxm = np.rot90(stxm)\n extent = np.roll(extent, 2)\n # Now save image\n f.close()\n if vmax == None and vmin == None:\n plt.imshow(stxm, extent=extent,cmap=\"gray\")\n else:\n if vmax == None:\n plt.imshow(stxm, extent=extent, cmap=\"gray\", vmin=vmin)\n if vmin == None:\n plt.imshow(stxm, extent=extent, cmap=\"gray\", vmax=vmax)\n if vmin != None and vmax != None:\n plt.imshow(stxm, extent=extent, cmap=\"gray\", vmin=vmin, vmax=vmax)\n plt.xlabel(\"Distance (µm)\")\n plt.ylabel(\"Distance (µm)\")\n if mode==\"stxm\":\n plt.savefig(savepath + \"image_\"+roitxt+\"_\" + str(scanno) + \".png\", dpi=600)\n if mode==\"dpc\":\n plt.savefig(savepath + \"dpc_\" + roitxt + \"_\" + str(scanno) + \".png\", dpi=600)\n \n \n #configarr=(\"scanno=\"+ str(scanno),\"dim=\"+str(dim), \"R=\"+str(R), \"roi=\"+str(roi) ,\"transpose=\"+str(transpose), \"path=\"+str(path),\"savepath=\"+str(savepath), \"edge_ss=\"+str(edge_ss),\"edge_fs=\"+str(edge_fs), \"vmin=\" +str(vmin),\"vmax=\"+str(vmax))\n #write(savepath + \"image_\"+roitxt+\"_\" + str(scanno) + \".txt\",configarr)\nload_plot_stxm(scanno,dim,R,roi=roi,vmax=vmax,vmin=vmin)","sub_path":"process_stxm.py","file_name":"process_stxm.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"241529808","text":"from random import choice\n\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import Q\nfrom rest_framework import viewsets, status, mixins, permissions, authentication\nfrom rest_framework.response import Response\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\nfrom rest_framework_jwt.serializers import jwt_payload_handler, jwt_encode_handler\n\nfrom .serializers import SmsSerializer, EmailSerializer, UserRegSerializer, UserDetailSerializer, \\\n UpdatePasswordSerializer, ChangSmsSerializer, SmsSerializer1\nfrom lib.yunpian import YunPian\nfrom djangoreactredux.settings.base import APIKEY, EMAIL_FROM\nfrom .models import VerifyCode, EmailVerifyRecord\n\nUser = get_user_model()\n\n\nclass CustomBackend(ModelBackend):\n \"\"\" 自定义用户验证 \"\"\"\n\n def authenticate(self, request, username=None, password=None, **kwargs):\n try:\n user = User.objects.get(Q(username=username) | Q(email=username) | Q(mobile=username))\n if user.check_password(password):\n return user\n except Exception as e:\n return None\n\n\nclass SmsCodeViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet):\n \"\"\" 发送短信验证码 \"\"\"\n serializer_class = SmsSerializer\n\n def generate_code(self):\n \"\"\" 生成四位数字的验证码 \"\"\"\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n mobile = serializer.validated_data[\"mobile\"]\n yun_pian = YunPian(APIKEY)\n code = self.generate_code()\n sms_status = yun_pian.send_sms(code=code, mobile=mobile)\n if sms_status[\"code\"] != 0:\n return Response({\n \"mobile\": sms_status[\"msg\"]\n }, status=status.HTTP_400_BAD_REQUEST)\n else:\n code_record = VerifyCode(code=code, mobile=mobile)\n code_record.save()\n return Response({\n \"mobile\": mobile\n }, status=status.HTTP_201_CREATED)\nclass SmsCodeViewSet1(mixins.CreateModelMixin, viewsets.GenericViewSet):\n \"\"\" 发送短信验证码 \"\"\"\n serializer_class = SmsSerializer1\n\n def generate_code(self):\n \"\"\" 生成四位数字的验证码 \"\"\"\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n mobile = serializer.validated_data[\"mobile\"]\n yun_pian = YunPian(APIKEY)\n code = self.generate_code()\n sms_status = yun_pian.send_sms(code=code, mobile=mobile)\n if sms_status[\"code\"] != 0:\n return Response({\n \"mobile\": sms_status[\"msg\"]\n }, status=status.HTTP_400_BAD_REQUEST)\n else:\n code_record = VerifyCode(code=code, mobile=mobile)\n code_record.save()\n return Response({\n \"mobile\": mobile\n }, status=status.HTTP_201_CREATED)\n\nclass EmailCodeViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet):\n \"\"\" 发送邮箱验证码 \"\"\"\n serializer_class = EmailSerializer\n\n def generate_code(self):\n \"\"\" 生成四位数字的验证码 \"\"\"\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n email = serializer.validated_data[\"email\"]\n code = self.generate_code()\n email_title = \"YaK芽课 验证码\"\n email_body = \"您的邮箱验证码为:{0}\".format(code)\n email_status = send_mail(email_title, email_body, EMAIL_FROM, [email])\n if email_status == 1:\n code_record = EmailVerifyRecord(code=code, email=email)\n code_record.save()\n return Response({\n \"email\": email\n }, status=status.HTTP_201_CREATED)\n else:\n return Response({\n \"mobile\": email_status[\"msg\"]\n }, status=status.HTTP_400_BAD_REQUEST)\n\"\"\"\nmixins.CreateModelMixin, mixins.UpdateModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet\n\"\"\"\n\nclass UserViewSet(mixins.CreateModelMixin, mixins.UpdateModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet ):\n \"\"\" 用户 \"\"\"\n serializer_class = UserRegSerializer\n queryset = User.objects.all()\n authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)\n # def list(self,request,*args,**kwargs):\n # print(request.user)\n # super(UserViewSet, self).list(request,*args,**kwargs)\n def get_serializer_class(self):\n if self.action == 'retrieve':\n return UserDetailSerializer\n elif self.action == 'create':\n return UserRegSerializer\n elif self.action == \"update\":\n return UpdatePasswordSerializer\n return UserDetailSerializer\n\n # permission_classes = (permissions.IsAuthenticated, )\n def get_permissions(self):\n if self.action == 'retrieve':\n return [permissions.IsAuthenticated()]\n elif self.action == 'update':\n return [permissions.IsAuthenticated()]\n elif self.action == 'create':\n return []\n\n return [] # 返回默认值为空一定要加,否则会出错的\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = self.perform_create(serializer)\n\n re_dict = serializer.data\n payload = jwt_payload_handler(user)\n re_dict[\"token\"] = jwt_encode_handler(payload)\n re_dict[\"name\"] = user.name if user.name else user.username\n\n headers = self.get_success_headers(serializer.data)\n return Response(re_dict, status=status.HTTP_201_CREATED, headers=headers)\n\n def get_object(self):\n return self.request.user\n\n def perform_update(self, serializer):\n serializer.save()\n\n\n def perform_create(self, serializer):\n return serializer.save()\n\n\n","sub_path":"src/apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303185464","text":"from django.apps import apps\nfrom django.conf.urls import url\nfrom django.contrib import admin, messages\nfrom django.contrib.admin.options import IncorrectLookupParameters, TO_FIELD_VAR\nfrom django.contrib.admin.utils import unquote\nfrom django.contrib.admin.views.main import ChangeList\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.http import Http404\nfrom django.shortcuts import redirect, render\nfrom django.template.loader import render_to_string\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.html import format_html\n\nfrom .constants import DRAFT, PUBLISHED\nfrom .forms import grouper_form_factory\nfrom .models import Version\n\n\nGROUPER_PARAM = 'grouper'\n\n\nclass VersioningAdminMixin:\n \"\"\"Mixin providing versioning functionality to admin classes of\n content models.\n \"\"\"\n def save_model(self, request, obj, form, change):\n \"\"\"\n Overrides the save method to create a version object\n when a content object is created\n \"\"\"\n super().save_model(request, obj, form, change)\n if not change:\n # create a new version object and save it\n Version.objects.create(content=obj, created_by=request.user)\n\n def get_queryset(self, request):\n \"\"\"Limit query to most recent content versions\n \"\"\"\n queryset = super().get_queryset(request)\n versioning_extension = apps.get_app_config('djangocms_versioning').cms_extension\n versionable = versioning_extension.versionables_by_content[queryset.model]\n return queryset.filter(pk__in=versionable.distinct_groupers())\n\n def change_view(self, request, object_id, form_url='', extra_context=None):\n # Raise 404 if the version associated with the object is not\n # a draft\n to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))\n content_obj = self.get_object(request, unquote(object_id), to_field)\n if content_obj is not None:\n version = Version.objects.get_for_content(content_obj)\n if version.state != DRAFT:\n raise Http404\n return super().change_view(\n request, object_id, form_url='', extra_context=None)\n\n\nclass VersionChangeList(ChangeList):\n\n def get_filters_params(self, params=None):\n lookup_params = super().get_filters_params(params)\n lookup_params.pop(GROUPER_PARAM, None)\n return lookup_params\n\n def get_queryset(self, request):\n \"\"\"Adds support for querying the version model by content grouper\n field using ?grouper={id}.\n\n Filters by the value of grouper field (specified in VersionableItem\n definition) of content model.\n\n Functionality is implemented here, because list_filter doesn't allow\n for specifying filters that work without being shown in the UI\n along with filter choices.\n \"\"\"\n qs = super().get_queryset(request)\n try:\n grouper = int(request.GET.get(GROUPER_PARAM))\n except (TypeError, ValueError):\n raise IncorrectLookupParameters(\"Missing grouper\")\n versioning_extension = apps.get_app_config(\n 'djangocms_versioning').cms_extension\n versionable = versioning_extension.versionables_by_content[\n self.model_admin.model._source_model]\n return qs.filter_by_grouper(versionable, grouper)\n\n\nclass VersionAdmin(admin.ModelAdmin):\n \"\"\"Admin class used for version models.\n \"\"\"\n\n class Media:\n js = ('djangocms_versioning/js/actions.js',)\n css = {\n 'all': ('djangocms_versioning/css/actions.css',)\n }\n\n\n # disable delete action\n actions = None\n\n list_display = (\n 'nr',\n 'created',\n 'created_by',\n 'state',\n 'state_actions',\n )\n list_display_links = None\n\n def get_queryset(self, request):\n return super().get_queryset(request).prefetch_related('content')\n\n def get_changelist(self, request, **kwargs):\n return VersionChangeList\n\n def nr(self, obj):\n \"\"\"Get the identifier of the version. Might be something other\n than the pk eventually.\n \"\"\"\n return obj.pk\n nr.admin_order_field = 'pk'\n\n def _get_archive_link(self, obj):\n \"\"\"Helper function to get the html link to the archive action\n \"\"\"\n if not obj.state == DRAFT:\n # Don't display the link if it can't be archived\n return ''\n archive_url = reverse('admin:{app}_{model}_archive'.format(\n app=obj._meta.app_label, model=self.model._meta.model_name,\n ), args=(obj.pk,))\n return render_to_string(\n 'djangocms_versioning/admin/archive_icon.html',\n {'archive_url': archive_url}\n )\n\n def _get_publish_link(self, obj):\n \"\"\"Helper function to get the html link to the publish action\n \"\"\"\n if not obj.state == DRAFT:\n # Don't display the link if it can't be published\n return ''\n publish_url = reverse('admin:{app}_{model}_publish'.format(\n app=obj._meta.app_label, model=self.model._meta.model_name,\n ), args=(obj.pk,))\n return render_to_string(\n 'djangocms_versioning/admin/publish_icon.html',\n {'publish_url': publish_url}\n )\n\n def _get_unpublish_link(self, obj):\n \"\"\"Helper function to get the html link to the unpublish action\n \"\"\"\n if not obj.state == PUBLISHED:\n # Don't display the link if it can't be unpublished\n return ''\n unpublish_url = reverse('admin:{app}_{model}_unpublish'.format(\n app=obj._meta.app_label, model=self.model._meta.model_name,\n ), args=(obj.pk,))\n return render_to_string(\n 'djangocms_versioning/admin/unpublish_icon.html',\n {'unpublish_url': unpublish_url}\n )\n\n def _get_edit_link(self, obj):\n \"\"\"Helper function to get the html link to the edit action\n \"\"\"\n if obj.state == PUBLISHED:\n pks_for_grouper = obj.versionable.for_grouper(\n obj.grouper).values_list('pk', flat=True)\n drafts = Version.objects.filter(\n object_id__in=pks_for_grouper, content_type=obj.content_type,\n state=DRAFT)\n if drafts.exists():\n return ''\n elif not obj.state == DRAFT:\n # Don't display the link if it's a draft\n return ''\n edit_url = reverse('admin:{app}_{model}_edit_redirect'.format(\n app=obj._meta.app_label, model=self.model._meta.model_name,\n ), args=(obj.pk,))\n return render_to_string(\n 'djangocms_versioning/admin/edit_icon.html',\n {'edit_url': edit_url}\n )\n\n def state_actions(self, obj):\n \"\"\"Display links to state change endpoints\n \"\"\"\n archive_link = self._get_archive_link(obj)\n publish_link = self._get_publish_link(obj)\n unpublish_link = self._get_unpublish_link(obj)\n edit_link = self._get_edit_link(obj)\n return format_html(\n edit_link + publish_link + unpublish_link + archive_link)\n state_actions.short_description = 'actions'\n\n def grouper_form_view(self, request):\n \"\"\"Displays an intermediary page to select a grouper object\n to show versions of.\n \"\"\"\n context = dict(\n self.admin_site.each_context(request),\n opts=self.model._meta,\n form=grouper_form_factory(self.model._source_model)(),\n )\n return render(request, 'djangocms_versioning/admin/grouper_form.html', context)\n\n def archive_view(self, request, object_id):\n \"\"\"Archives the specified version and redirects back to the\n version changelist\n \"\"\"\n # FIXME: We should be using POST only for this, but some frontend\n # issues need to be solved first. The code below just needs to\n # be uncommented and a test is also already written (but currently\n # being skipped) to handle the POST-only approach\n\n # This view always changes data so only POST requests should work\n # if request.method != 'POST':\n # raise Http404\n\n # Check version exists\n version = self.get_object(request, unquote(object_id))\n if version is None:\n return self._get_obj_does_not_exist_redirect(\n request, self.model._meta, object_id)\n # Raise 404 if not in draft status\n if version.state != DRAFT:\n raise Http404\n # Archive the version\n version.archive(request.user)\n # Display message\n messages.success(request, \"Version archived\")\n # Redirect\n url = reverse('admin:{app}_{model}_changelist'.format(\n app=self.model._meta.app_label,\n model=self.model._meta.model_name,\n )) + '?grouper=' + str(version.grouper.pk)\n return redirect(url)\n\n def publish_view(self, request, object_id):\n \"\"\"Publishes the specified version and redirects back to the\n version changelist\n \"\"\"\n # FIXME: We should be using POST only for this, but some frontend\n # issues need to be solved first. The code below just needs to\n # be uncommented and a test is also already written (but currently\n # being skipped) to handle the POST-only approach\n\n # This view always changes data so only POST requests should work\n # if request.method != 'POST':\n # raise Http404\n\n # Check version exists\n version = self.get_object(request, unquote(object_id))\n if version is None:\n return self._get_obj_does_not_exist_redirect(\n request, self.model._meta, object_id)\n # Raise 404 if not in draft status\n if version.state != DRAFT:\n raise Http404\n # Publish the version\n version.publish(request.user)\n # Display message\n messages.success(request, \"Version published\")\n # Redirect\n url = reverse('admin:{app}_{model}_changelist'.format(\n app=self.model._meta.app_label,\n model=self.model._meta.model_name,\n )) + '?grouper=' + str(version.grouper.pk)\n return redirect(url)\n\n def unpublish_view(self, request, object_id):\n \"\"\"Unpublishes the specified version and redirects back to the\n version changelist\n \"\"\"\n # FIXME: We should be using POST only for this, but some frontend\n # issues need to be solved first. The code below just needs to\n # be uncommented and a test is also already written (but currently\n # being skipped) to handle the POST-only approach\n\n # This view always changes data so only POST requests should work\n # if request.method != 'POST':\n # raise Http404\n\n # Check version exists\n version = self.get_object(request, unquote(object_id))\n if version is None:\n return self._get_obj_does_not_exist_redirect(\n request, self.model._meta, object_id)\n # Raise 404 if not in published status\n if version.state != PUBLISHED:\n raise Http404\n # Unpublish the version\n version.unpublish(request.user)\n # Display message\n messages.success(request, \"Version unpublished\")\n # Redirect\n url = reverse('admin:{app}_{model}_changelist'.format(\n app=self.model._meta.app_label,\n model=self.model._meta.model_name,\n )) + '?grouper=' + str(version.grouper.pk)\n return redirect(url)\n\n def edit_redirect_view(self, request, object_id):\n \"\"\"Redirects to the admin change view and creates a draft version\n if no draft exists yet.\n \"\"\"\n # FIXME: We should be using POST only for this, but some frontend\n # issues need to be solved first. The code below just needs to\n # be uncommented and a test is also already written (but currently\n # being skipped) to handle the POST-only approach\n\n # This view always changes data so only POST requests should work\n # if request.method != 'POST':\n # raise Http404\n\n version = self.get_object(request, unquote(object_id))\n if version is None:\n return self._get_obj_does_not_exist_redirect(\n request, self.model._meta, object_id)\n # If published then there's extra things to do...\n if version.state == PUBLISHED:\n # First check there is no draft record for this grouper\n # already.\n pks_for_grouper = version.versionable.for_grouper(\n version.grouper).values_list('pk', flat=True)\n content_type = ContentType.objects.get_for_model(version.content)\n drafts = Version.objects.filter(\n object_id__in=pks_for_grouper, content_type=content_type,\n state=DRAFT)\n if drafts.exists():\n # There is a draft record so people should be editing\n # the draft record not the published one. Return 404.\n raise Http404\n # If there is no draft record then create a new version\n # that's a draft with the content copied over\n version = version.copy(request.user)\n # Raise 404 if the version is neither draft or published\n elif version.state != DRAFT:\n raise Http404\n # Redirect\n url = reverse('admin:{app}_{model}_change'.format(\n app=version.content._meta.app_label,\n model=version.content._meta.model_name,\n ), args=(version.content.pk,))\n return redirect(url)\n\n def compare_view(self, request, object_id):\n \"\"\"Compares two versions\n \"\"\"\n # Get version 1 (the version we're comparing against)\n v1 = self.get_object(request, unquote(object_id))\n if v1 is None:\n return self._get_obj_does_not_exist_redirect(\n request, self.model._meta, object_id)\n v1_preview_url = reverse(\n 'admin:cms_placeholder_render_object_preview',\n args=(v1.content_type_id, v1.object_id))\n # Get the list of versions for the grouper. This is for use\n # in the dropdown to choose a version.\n version_list = Version.objects.filter_by_grouper(\n v1.versionable, v1.grouper)\n # Add the above to context\n context = {\n 'version_list': version_list,\n 'v1': v1,\n 'v1_preview_url': v1_preview_url,\n }\n # Now check if version 2 has been specified and add to context\n # if yes\n if 'compare_to' in request.GET:\n v2 = self.get_object(request, unquote(request.GET['compare_to']))\n if v2 is None:\n return self._get_obj_does_not_exist_redirect(\n request, self.model._meta, request.GET['compare_to'])\n else:\n context['v2'] = v2\n context['v2_preview_url'] = reverse(\n 'admin:cms_placeholder_render_object_preview',\n args=(v2.content_type_id, v2.object_id))\n return TemplateResponse(\n request, 'djangocms_versioning/admin/compare.html', context)\n\n def changelist_view(self, request, extra_context=None):\n if not request.GET:\n # redirect to grouper form when there's no GET parameters\n opts = self.model._meta\n return redirect(reverse('admin:{}_{}_grouper'.format(\n opts.app_label,\n opts.model_name,\n )))\n return super().changelist_view(request, extra_context)\n\n def get_urls(self):\n info = self.model._meta.app_label, self.model._meta.model_name\n return [\n url(\n r'^grouper/$',\n self.admin_site.admin_view(self.grouper_form_view),\n name='{}_{}_grouper'.format(*info),\n ),\n url(\n r'^(.+)/archive/$',\n self.admin_site.admin_view(self.archive_view),\n name='{}_{}_archive'.format(*info),\n ),\n url(\n r'^(.+)/publish/$',\n self.admin_site.admin_view(self.publish_view),\n name='{}_{}_publish'.format(*info),\n ),\n url(\n r'^(.+)/unpublish/$',\n self.admin_site.admin_view(self.unpublish_view),\n name='{}_{}_unpublish'.format(*info),\n ),\n url(\n r'^(.+)/edit-redirect/$',\n self.admin_site.admin_view(self.edit_redirect_view),\n name='{}_{}_edit_redirect'.format(*info),\n ),\n url(\n r'^(.+)/compare/$',\n self.admin_site.admin_view(self.compare_view),\n name='{}_{}_compare'.format(*info),\n ),\n ] + super().get_urls()\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Return True for changelist and False for change view.\n \"\"\"\n return obj is None\n\n def has_delete_permission(self, request, obj=None):\n return False\n","sub_path":"djangocms_versioning/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":17329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"206022254","text":"# Queue implementation using an array\n\nclass Queue:\n\n def __init__(self):\n self.queue = []\n\n def check_empty(self):\n return self.queue == []\n\n def queue_size(self):\n return len(self.queue)\n\n def enqueue(self, data):\n self.queue.append(data)\n\n def dequeue(self):\n if self.check_empty():\n print('Oops! Queue is empty')\n return\n else:\n data = self.queue[0]\n del self.queue[0]\n return data\n\n def view(self):\n print(self.queue)\n\n\nif __name__ == '__main__':\n\n queue = Queue()\n\n queue.enqueue(12)\n queue.enqueue(52)\n queue.enqueue(25)\n queue.enqueue(35)\n queue.enqueue(76)\n queue.enqueue(24)\n queue.enqueue(43)\n\n print('Queue size:', queue.queue_size())\n queue.view()\n\n print('Dequeue begins:')\n print(queue.dequeue())\n print('Queue size:', queue.queue_size())\n print(queue.dequeue())\n print('Queue size:', queue.queue_size())\n print(queue.dequeue())\n print('Queue size:', queue.queue_size())\n print(queue.dequeue())\n\n queue.view()\n\n","sub_path":"stacks/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"540407421","text":"\"\"\"\nAutomatically update the docstring of parameter_sets.py\n\"\"\"\n\nfrom collections import defaultdict\nimport os\nimport re\n\nimport pybtex\n\nimport pybamm\nfrom pybamm.parameters import parameter_sets\n\n\nDOC_INTRO = \"\"\"\nParameter sets from papers. The 'citation' entry provides a reference to the appropriate\npaper in the file \"pybamm/CITATIONS.txt\". To see which parameter sets have been used in\nyour simulation, add the line \"pybamm.print_citations()\" to your script.\"\"\"\n\n\ndef get_ps_dict():\n \"\"\"\n Returns {chemistry:(author_year, [citations])} from all dictionaries in\n parameter_sets.py\n \"\"\"\n parameter_set_dict = defaultdict(list)\n for ps_name, ps_dict in parameter_sets.__dict__.items():\n if not isinstance(ps_dict, dict):\n continue\n elif \"citation\" not in ps_dict or \"chemistry\" not in ps_dict:\n continue\n\n chemistry = ps_dict[\"chemistry\"]\n citation = ps_dict[\"citation\"]\n\n # Enclose citation in a list if not already enclosed\n if not isinstance(citation, list):\n citation = [citation]\n\n parameter_set_dict[chemistry].append((ps_name, citation))\n return parameter_set_dict\n\n\ndef generate_ps_doc(parameter_set_dict):\n \"\"\"\n Generates docstring of parameter_sets.py from the given dictionary\n \"\"\"\n output_list = [DOC_INTRO]\n citations_file = os.path.join(pybamm.root_dir(), \"pybamm\", \"CITATIONS.txt\")\n\n for ps_chemistry in sorted(parameter_set_dict.keys()):\n output_list.append(\"\")\n ps_citations = parameter_set_dict[ps_chemistry]\n chem_name = ps_chemistry.capitalize() + \" \" + \"parameter sets\"\n output_list.append(chem_name)\n dashes = \"-\" * len(ps_chemistry) + \"-\" * 15\n output_list.append(dashes)\n\n for ps_name, ps_citation in sorted(ps_citations):\n citations = pybtex.format_from_file(\n citations_file,\n style=\"plain\",\n output_backend=\"plaintext\",\n citations=ps_citation,\n nocite=True,\n )\n # Remove citation labels \"[3]\"\n citations = re.split(r\"(?:^|\\n)\\[\\d+\\]\\s\", citations)\n # Remove empty strings\n citations = filter(bool, citations)\n fmt_citations = []\n for citation in citations:\n # Break line at the first space before 80 characters\n citation_parts = re.findall(r\"(.{1,79})(?:\\s|$)\", citation)\n # first_line = citation.split('\\n')\n\n indent_citation_parts = []\n for idx, citation_part in enumerate(citation_parts):\n if idx == 0:\n citation_part = \"- \" + citation_part\n else:\n citation_part = \" \" + citation_part\n indent_citation_parts.append(\" \" * 7 + citation_part)\n\n # Join to create a single citation paragraph\n citation = \"\\n\".join(indent_citation_parts)\n fmt_citations.append(citation)\n fmt_citations = \"\\n\".join(fmt_citations)\n ps_doc = f\" * {ps_name:} :\\n{fmt_citations}\"\n output_list.append(ps_doc)\n\n output = \"\\n\".join(output_list)\n output += \"\\n\"\n return output\n\n\ndef update_doc(generated_doc):\n \"\"\"\n Opens parameter_sets.py, replaces the docstring and then writes it\n \"\"\"\n with open(\n os.path.join(pybamm.root_dir(), \"pybamm\", \"parameters\", \"parameter_sets.py\"),\n \"r+\",\n ) as ps_fp:\n ps_output = ps_fp.read()\n ps_output = ps_output.replace(parameter_sets.__doc__, generated_doc)\n ps_fp.truncate(0)\n ps_fp.seek(0)\n ps_fp.write(ps_output)\n\n\ndef main():\n parameter_set_dict = get_ps_dict()\n generated_doc = generate_ps_doc(parameter_set_dict)\n update_doc(generated_doc)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pybamm/parameters/update_parameter_sets_doc.py","file_name":"update_parameter_sets_doc.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"635437514","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n\nfrom books.models import Book\nfrom books.forms import BookForm\n\n\ndef root(request):\n return redirect('books:index')\n\ndef all_books(request):\n books = Book.objects.all()\n context = {\n 'description' : {\n 'title' : 'Book list',\n 'h1' : 'Book list',\n },\n 'books' : books,\n }\n\n return render(request, 'books/book-list.html', context=context)\n\ndef get_book(request, isbn):\n try:\n book = Book.objects.get(isbn=isbn)\n except ObjectDoesNotExist:\n book = None\n\n context = {\n 'description': {\n 'title' : 'Book detail',\n 'h1' : 'Book detail',\n },\n 'book' : book,\n }\n\n return render(request, 'books/book-detail.html', context=context)\n\n\ndef add_book(request):\n context = {\n 'description': {\n 'title' : 'Add book',\n 'h1' : 'Add book',\n },\n }\n\n if request.method == 'POST':\n form = BookForm(request.POST)\n if form.is_valid():\n book = form.save()\n return redirect('books:get_book', isbn=book.isbn)\n\n else:\n form = BookForm()\n context['form'] = form\n return render(request, 'books/add-book.html', context=context)\n\ndef edit_book(request, isbn):\n context = {\n 'description': {\n 'title' : 'Edit book',\n 'h1' : 'Edit book',\n },\n }\n\n try:\n book = Book.objects.get(isbn=isbn)\n except ObjectDoesNotExist:\n book = None\n return render(request, 'books/edit-book.html', context=context)\n \n if request.method == 'POST':\n form = BookForm(request.POST, instance=book)\n if form.is_valid():\n book = form.save()\n return redirect('books:get_book', isbn=book.isbn)\n\n else:\n form = BookForm(instance=book)\n context['form'] = form\n context['book'] = book\n return render(request, 'books/edit-book.html', context=context)\n\ndef delete_book(request, isbn):\n try:\n book = Book.objects.get(isbn=isbn)\n except ObjectDoesNotExist:\n book = None\n\n if book:\n book.delete()\n \n return redirect('books:index')\n","sub_path":"django-bookstore-1/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"29291220","text":"t=int(input())\nfor i in range(t):\n d=int(input())\n print(\"Case {}:\".format(i+1))\n total=1\n for i in range(d):\n if i==0:\n new=0\n total+=new\n print(\"Day = {0}, New = {1}, Total = {2}\".format(i+1,new,total))\n\n\n else:\n new=total*2\n total+=new\n print(\"Day = {0}, New = {1}, Total = {2}\".format(i+1,new,total))","sub_path":"code for pandemic.py","file_name":"code for pandemic.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316688932","text":"from pathlib import Path\r\nfrom unittest import TestCase\r\nfrom unittest.mock import (\r\n Mock,\r\n MagicMock,\r\n)\r\n\r\nfrom folders import Folders\r\nfrom mp3_tagger_manager import Mp3TaggerManager\r\n\r\n\r\nclass TestMp3TaggerManager(TestCase):\r\n folder_manager = Folders(Folders.OS_MODULE)\r\n tagger_manager = Mp3TaggerManager()\r\n\r\n def test_cant_open_file_as_mp3(self):\r\n if not self.folder_manager.exists('filename.txt'):\r\n file = open('filename.txt','w+')\r\n file.close()\r\n\r\n not_mp3_file_path = Path('./filename.txt')\r\n\r\n with self.assertRaises(Exception):\r\n self.tagger_manager.open_mp3_file(not_mp3_file_path)\r\n\r\n def test_can_open_file_as_mp3(self):\r\n mp3_file_source_path = Path('./files/jax-jones-years-years-play.mp3')\r\n\r\n assert self.tagger_manager.open_mp3_file(str(mp3_file_source_path))\r\n\r\n def test_read_version_1_tags(self):\r\n mock_file = Mock(artist='artist', song='song')\r\n\r\n tags = self.tagger_manager.read_version_tags(1, mock_file)\r\n\r\n self.assertEqual('artist', tags['artist'])\r\n self.assertEqual('song', tags['song'])\r\n\r\n def test_read_version_2_tags(self):\r\n mock_file = Mock(artist='artist', song='song')\r\n\r\n tags = self.tagger_manager.read_version_tags(2, mock_file)\r\n\r\n self.assertEqual('artist', tags['artist'])\r\n self.assertEqual('song', tags['song'])\r\n\r\n\r\n def tearDown(self):\r\n if self.folder_manager.exists('filename.txt'):\r\n self.folder_manager.remove_dir('filename.txt')\r\n","sub_path":"mp3_tagger_manager_test.py","file_name":"mp3_tagger_manager_test.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"183755710","text":"\"\"\"Setup for all tests.\"\"\"\n\nfrom typing import Dict, List\n\nfrom traiter.util import clean_text, shorten\n\nfrom myrsidea.pylib.pipeline import pipeline\n\nNLP = pipeline() # Singleton for testing\n\n# Translate characters resulting from PDF madness\nTRANS = str.maketrans({'¼': '=', '⫻': '×', '#': '♂', '$': '♀'})\n\n\ndef test_traits(text: str) -> List[Dict]:\n \"\"\"Find entities in the doc.\"\"\"\n text = shorten(text)\n text = clean_text(text, trans=TRANS)\n\n doc = NLP(text)\n\n traits = [e._.data for e in doc.ents]\n\n # from pprint import pp\n # pp(traits)\n\n # from spacy import displacy\n # options = {'collapse_punct': False, 'compact': True}\n # displacy.serve(doc, options=options)\n\n return traits\n","sub_path":"tests/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"210900631","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\nimport os.path\n\ndef plot(location, file_name):\n\tpath = location + file_name\n\tif os.path.exists(path):\n\t\twith open(path) as f:\n\t\t\tdata = [int(x) for x in next(f).split()]\n\t\t\t# data = data[50:-1]\n\t\t\t# plt.figure(file_name)\n\t\t\tplt.title(location[3:-1] + \" \" + file_name[:-4])\n\t\t\tplt.plot(data)\n\t\t\t# plt.show()\n\t\t\tplt.savefig(path[:-3] + \"png\")\n\t\t\tplt.clf()\n\telse:\n\t\tprint('\"'+ path + '\" does not exist')\n\ndef plot2(location, x, y):\n\tfile_name = str(x) + \"-\" + str(y) + \".txt\"\n\tplot(location, file_name)\n\ndef plotAll():\n\tfolder_name = [\"no_leak_new\", \"leak_new\"]\n\tfor location in [\"../\" + str(x) + \"/\" for x in folder_name]:\n\t\tfor x, y in itertools.product(range(1,8), range(0, 100, 20)):\n\t\t\tplot2(location, x, y)\n\t\tplot(location, \"open_close.txt\");\n\nplotAll()\n# plot(\"../no_leak_new/\", \"open_close.txt\")","sub_path":"code/plotWave.py","file_name":"plotWave.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"99236438","text":"from datetime import datetime\nimport logging\nfrom lmpd.lemon.lemon.spiders.query_spider import QuerySpider\nfrom urllib.parse import quote\n\nfrom twisted.internet import defer, reactor\nfrom lmpd.cosmos.service import CosmosService\nfrom scrapy.crawler import CrawlerRunner\n\noffer_types = [\n 'D', # Demo\n 'J', # Jahreswagen\n 'O', # Oldtimer\n 'U' # Gebraucht\n]\n\nquery_by_county = False\n\ncountries = [\n 'A', # Österreich\n 'B', # Belgien\n 'D', # Deutschland\n 'E', # Spanien\n 'F', # Frankreich\n 'I', # Italien\n 'L', # Luxemburg\n 'NL' # Niederlande\n]\n\ndef run():\n logging.basicConfig(\n filename='Logs\\\\query.log',\n level=logging.ERROR,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filemode='w')\n\n logger = logging.getLogger('RunQuerySpider')\n logger.setLevel(logging.INFO)\n\n service = CosmosService()\n makers = list(service.get_all_makers())\n\n runner = CrawlerRunner()\n query_jobs = []\n\n for maker in makers:\n query_jobs.append({\n 'maker_id': maker.get('id'),\n 'maker': maker.get('name'),\n 'start_urls': build_start_urls(maker)\n })\n\n crawl(query_jobs, logger, runner, service)\n reactor.run()\n\n@defer.inlineCallbacks\ndef crawl(query_jobs, logger: logging.Logger, runner: CrawlerRunner, service: CosmosService):\n for job in query_jobs:\n logger.info(f'Start getting queries for {job.get(\"maker\")}')\n QuerySpider.result.hits = 0\n QuerySpider.result.urls = []\n QuerySpider.result.time = datetime.now().isoformat()\n yield runner.crawl(QuerySpider, start_urls=job.get('start_urls'))\n service.update_maker_query(job.get('maker_id'), QuerySpider.result)\n reactor.stop()\n\ndef build_start_urls(maker):\n urls = []\n for m in maker.get('models'):\n if maker.get('is_top'):\n for c in countries:\n urls.append(f'https://www.autoscout24.de/lst/{quote(maker.get(\"name\"))}/{quote(m.get(\"name\"))}?size=20&offer={\",\".join(offer_types)}&cy={c}')\n else:\n urls.append(f'https://www.autoscout24.de/lst/{quote(maker.get(\"name\"))}/{quote(m.get(\"name\"))}?size=20&offer={\",\".join(offer_types)}')\n return urls\n\nif __name__ == '__main__': run()","sub_path":"run_query_spider.py","file_name":"run_query_spider.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"337207779","text":"\nimport json\nimport sys\nfrom adapt.intent import IntentBuilder\nfrom adapt.engine import IntentDeterminationEngine\n\nengine = IntentDeterminationEngine()\n\ncard_keyword = [\n\"card\",\n\"wallet\",\n\"purse\"\n]\n\nfor card in card_keyword:\n engine.register_entity(card, \"Card\")\n\ncard_related_issue = [\n \"lost\",\n \"missed\",\n \"stolen\"\n]\n\nfor cri in card_related_issue:\n engine.register_entity(cri, \"CardLost\")\n\ncard_type = [\n \"credit\",\n \"debit\",\n \"cash card\"\n]\n\nfor ct in card_type:\n engine.register_entity(cri, \"CardType\")\n\ncard_lost_intent = IntentBuilder(\"Card_Lost_Intent\")\\\n .require(\"Card\")\\\n .require(\"CardLost\")\\\n .optionally(\"CardType\")\\\n .build()\n\nengine.register_intent_parser(card_lost_intent)\n\nif __name__ == '__main__':\n text = input(\"Enter Your Text: \")\n for intent in engine.determine_intent(text):\n if intent.get('confidence') > 0:\n print(json.dumps(intent, indent=4))\n","sub_path":"intentFinder.py","file_name":"intentFinder.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"514790320","text":"import socket\n\ndef print_error(command, error):\n if error == 'ERROR 500':\n print (\"Unrecognized command: '\" + command + \"'\\n\")\n\n if error == 'ERROR 501':\n print (\"This command is reserved for servers\\n\")\n \n if error == 'ERROR 502':\n print (\"Syntax error, please check command!\\n\")\n\n\ndef create_file(response):\n mark = response.find(b';') # The spot of first semicolon in response\n\n file_name = str( response[ response.find(b' ')+1 : mark ], 'utf-8' )\n data_start = mark + 1 # Start index of \n data_end = response.find(b';\\r\\n', data_start+1) # End index of \n data = response[ data_start : data_end ]\n\n # Write downloaded data to file\n try:\n with open (file_name, 'wb') as f:\n f.write(data)\n except Exception as e:\n print (e)\n else:\n print (\"Downloaded file\", file_name)\n \n\ndef handle_response(response):\n if response == b'ERROR 403;\\r\\n':\n print (\"You are unauthorized to download that file!\\n\") \n\n if response == b'ERROR 404;\\r\\n':\n print (\"File was not found from the server\\nCheck available files with LIST command\\n\")\n\n if response[:4] == b'FILE':\n create_file(response)\n \n if response[:2] == b'LS':\n print( str(response[3:-3], 'utf-8') ) # Prints files separated with SP\n\n\ndef main(HOST, PORT):\n while True:\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.connect((HOST, PORT))\n command = input(\"\\nGive command: \")\n bytes_to_server = bytes(command + '\\r\\n', 'utf-8')\n\n sock.sendall(bytes_to_server)\n \n # Get ACK or ERROR message from server\n recv_ack = sock.recv(16).decode('utf-8')\n msg_type = 'acknowledgement:' if recv_ack == 'ACK 200;\\r\\n' else 'error:'\n print (\"\\nServer sent\" , msg_type, recv_ack[:-3])\n\n if command == 'QUIT;':\n break\n \n # Handle ERROR messages from server\n if recv_ack != 'ACK 200;\\r\\n':\n print_error( command, recv_ack[:-3] )\n \n else:\n response = b''\n while True:\n response += sock.recv(1024)\n if response[-2:] == b'\\r\\n':\n break\n \n handle_response(response)\n\n except ConnectionRefusedError:\n print (\"Server is not active at\", HOST, \"port\", PORT)\n exit(1)\n\n except socket.error as e:\n print (\"Error with socket:\", e)\n exit(1)\n\n\nif __name__ == '__main__':\n main('localhost', 13337)\n","sub_path":"protocol_programming/assignments/A5/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"501595918","text":"from bson.json_util import dumps\r\nimport json\r\n\r\nfrom base.mongo_engine import MongoEngine\r\n\r\n\r\nclass Item:\r\n collection_name = ''\r\n collection_schema = {}\r\n data = {}\r\n\r\n def __init__(self):\r\n pass\r\n\r\n \"\"\"\r\n Returns the cursor to the collection.\r\n \"\"\"\r\n def cursor(self):\r\n if self.collection_name != '':\r\n return MongoEngine().get_client()[self.collection_name]\r\n return None\r\n\r\n \"\"\"\r\n Finds all the elements by the given criteria. Only returns the\r\n parameters specified in the projection.\r\n \"\"\"\r\n def find(self, criteria={}, projection={}):\r\n _criteria = criteria if criteria else {'deleted': False}\r\n _projection = projection if projection else self.collection_schema\r\n data = json.loads(\r\n dumps(self.cursor().find(_criteria, _projection)))\r\n data_length = len(data)\r\n if data_length == 0:\r\n self.data = None\r\n elif data_length == 1:\r\n self.data = data[0]\r\n else:\r\n self.data = data\r\n\r\n return self\r\n\r\n \"\"\"\r\n Inserts an item.\r\n \"\"\"\r\n def insert(self, data=None):\r\n\r\n if data is None:\r\n return False\r\n elif type(data) is dict:\r\n _operation = 'insert_one'\r\n elif type(data) is list:\r\n _operation = 'insert_many'\r\n else:\r\n return False\r\n\r\n try:\r\n getattr(self.cursor(), _operation)(data)\r\n return True\r\n except Exception:\r\n return False\r\n\r\n \"\"\"\r\n Completely removes an item by default. If force is false it marks the\r\n item as removed.\r\n \"\"\"\r\n def remove(self, criteria={}):\r\n\r\n try:\r\n self.cursor().delete_one(filter=criteria)\r\n return True\r\n except Exception:\r\n return False\r\n\r\n \"\"\"\r\n Updates the item that fits the criteria with the new data.\r\n \"\"\"\r\n def update(self, criteria, data):\r\n try:\r\n self.cursor().update_one(filter=criteria, update={'$set': data})\r\n return True\r\n except Exception:\r\n return False\r\n","sub_path":"base/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"205101908","text":"import os\nfrom PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal, QTimer\n\nimport src.utils as utils\nfrom src.folderReader import FolderReader\nfrom src.file_saver import FileSaver\nfrom src.file_reader import FileReader\nfrom src.trackerFactory import create_tracker\n\nimport cv2\n\n\nclass Controller(QObject):\n\n change_color_index = pyqtSignal(int)\n request_bboxes = pyqtSignal()\n request_and_init_bboxes = pyqtSignal()\n remove_rectangle_signal = pyqtSignal(int)\n update_filename = pyqtSignal(str)\n rectangles_signal = pyqtSignal(list, list, list, list, list, list)\n update_image_folder = pyqtSignal(str)\n\n def __init__(self, parent=None, extension='.png'):\n super().__init__(parent)\n self.mode = utils.MODE_TRACK\n self.tracker_name = 'default'\n self.trackers = []\n self.current_class = 'default'\n self.current_color_index = 0\n self.class_colors = {}\n\n self.prev_index_frame = 0\n self.current_index_frame = 0\n\n self.folder_reader = FolderReader()\n\n self.image_directory = './imagen'\n self.extension = extension\n self.file_saver = FileSaver(self.image_directory)\n self.file_reader = FileReader()\n\n # self.filenames = ['out2.png', 'out19.png']\n self.filenames = utils.read_files(self.image_directory, self.extension)\n self.filenames = utils.sort_files(self.filenames)\n\n # Timer for run button\n self.run_timer = QTimer()\n\n\n @pyqtSlot(list, list, list, list, list, list)\n def process_rectangles(self, xs, ys, widths, heights, color_indices, recent_draw):\n self.save_rectangles(self.get_prev_filename(), xs, ys, widths, heights, color_indices)\n if self.mode == utils.MODE_TRACK:\n xs, ys, widths, heights, color_indices, recent_draw = self.update_trackers(recent_draw, xs, ys, widths, heights, color_indices)\n recent_draw = [False for i in recent_draw]\n self.rectangles_signal.emit(xs, ys, widths, heights, color_indices, recent_draw)\n elif self.mode == utils.MODE_RTRACK:\n self.send_saved_bboxes()\n elif self.mode == utils.MODE_COPYBBOX:\n recent_draws = [True for i in recent_draw]\n self.rectangles_signal.emit(xs, ys, widths, heights, color_indices, recent_draws)\n elif self.mode == utils.MODE_EMPTY:\n pass\n elif self.mode == utils.MODE_NOTHING:\n self.send_saved_bboxes()\n\n @pyqtSlot(int)\n def remove_rectangle_slot(self, index):\n self.remove_rectangle(index)\n\n def remove_rectangle(self, index):\n if index < len(self.trackers):\n self.trackers.pop(index)\n\n def update_trackers(self, recent_draws, xs, ys, widths, heights, color_indices):\n xs_out = []\n ys_out = []\n widths_out = []\n heights_out = []\n color_indices_out = []\n curr_draws = []\n trackers_to_remove = []\n prev_image = cv2.imread(self.get_prev_filepath() + self.extension)\n current_image = cv2.imread(self.get_current_filepath() + self.extension)\n for index, (recent_draw, x, y, w, h, color_idx) in enumerate(zip(recent_draws, xs, ys, widths, heights, color_indices)):\n if recent_draw:\n tracker = create_tracker(self.tracker_name)\n tracker.init(prev_image, (x, y, w, h))\n self.trackers.append(tracker)\n ret, bbox = tracker.update(prev_image)\n ret, bbox = self.trackers[index].update(current_image)\n if ret:\n xs_out.append(bbox[0])\n ys_out.append(bbox[1])\n widths_out.append(bbox[2])\n heights_out.append(bbox[3])\n color_indices_out.append(color_idx)\n curr_draws.append(True)\n else:\n trackers_to_remove.append(index)\n\n for index in trackers_to_remove[::-1]:\n self.remove_rectangle_signal.emit(index)\n\n return xs_out, ys_out, widths_out, heights_out, color_indices_out, curr_draws\n\n def send_saved_bboxes(self):\n xs, ys, widths, heights, color_indices = self.file_reader.read_bboxes(self.get_current_filepath())\n self.rectangles_signal.emit(xs, ys, widths, heights, color_indices, [False for i in color_indices])\n\n def save_rectangles(self, filename, xs, ys, widths, heights, color_indices):\n image = cv2.imread(self.get_prev_filepath() + self.extension)\n h, w = image.shape[:2]\n c = 1\n if len(image.shape) > 2:\n c = image.shape[2]\n self.file_saver.save_bboxes(filename, xs, ys, widths, heights, color_indices, w, h, c)\n\n @pyqtSlot(str)\n def set_tracker_name(self, tracker_name):\n self.tracker_name = tracker_name\n\n # Try 1\n for index in range(len(self.trackers))[::-1]:\n self.remove_rectangle_signal.emit(index)\n self.send_saved_bboxes()\n self.request_and_init_bboxes.emit()\n # Try 2\n # self.request_and_init_bboxes.emit()\n\n @pyqtSlot(str)\n def set_current_class(self, class_name):\n self.current_class = class_name\n self.current_color_index = self.class_colors[self.current_class]\n self.change_color_index.emit(self.current_color_index)\n\n def update_mode(self, mode):\n self.mode = mode\n\n @pyqtSlot()\n def update_mode_to_track(self):\n self.update_mode(utils.MODE_TRACK)\n self.set_tracker_name(self.tracker_name)\n\n @pyqtSlot()\n def update_mode_to_rtrack(self):\n self.update_mode(utils.MODE_RTRACK)\n\n @pyqtSlot()\n def update_mode_to_copybbox(self):\n self.update_mode(utils.MODE_COPYBBOX)\n\n @pyqtSlot()\n def update_mode_to_empty(self):\n self.update_mode(utils.MODE_EMPTY)\n\n @pyqtSlot()\n def update_mode_to_nothing(self):\n self.update_mode(utils.MODE_NOTHING)\n\n def set_classes(self, items):\n self.class_colors = {color: index for index, color in enumerate(items)}\n\n @pyqtSlot()\n def request_next(self):\n if self.current_index_frame < len(self.filenames) - 1:\n self.prev_index_frame = self.current_index_frame\n self.current_index_frame += 1\n self.update_filename.emit(self.get_current_frame())\n self.request_bboxes.emit()\n\n @pyqtSlot()\n def request_prev(self):\n if self.current_index_frame > 0:\n self.prev_index_frame = self.current_index_frame\n self.current_index_frame -= 1\n self.update_filename.emit(self.get_current_frame())\n self.request_bboxes.emit()\n\n def get_current_frame(self):\n if self.current_index_frame < 0 or self.current_index_frame >= len(self.filenames):\n return None\n else:\n path = os.path.join(self.image_directory, self.filenames[self.current_index_frame])\n return path\n\n def get_current_filename(self):\n return self.filenames[self.current_index_frame]\n\n def get_prev_filename(self):\n return self.filenames[self.prev_index_frame]\n\n def get_current_filepath(self):\n return os.path.join(self.image_directory, self.get_current_filename())\n\n def get_prev_filepath(self):\n return os.path.join(self.image_directory, self.get_prev_filename())\n\n def select_folder(self):\n folder = self.folder_reader.get_folder()\n self.image_directory = folder\n self.file_saver.set_folder(folder)\n self.filenames = utils.read_files(self.image_directory, self.extension)\n self.filenames = utils.sort_files(self.filenames)\n self.prev_index_frame = 0\n self.current_index_frame = 0\n\n for index in range(len(self.trackers))[::-1]:\n self.remove_rectangle_signal.emit(index)\n\n self.update_filename.emit(self.get_current_frame())\n self.send_saved_bboxes()\n self.request_and_init_bboxes.emit()\n\n self.update_image_folder.emit(folder)\n print('NEWFOLDER', folder)\n\n\n @pyqtSlot()\n def run_tracking(self):\n self.run_timer.start(50)\n\n @pyqtSlot()\n def stop_tracking(self):\n self.run_timer.stop()","sub_path":"src/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":8141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"222152281","text":"#!/usr/bin/env python\nfrom sense_hat import SenseHat\nimport time\nimport random\n\nsense = SenseHat()\n\nsense.clear()\n\ndef random_pixel():\n return [random.randint(0,255) for i in range(3)]\n\ndef random_pixelarray():\n x = random.randint(0,7)\n y = random.randint(0,7)\n pixel = [random.randint(0,255) for i in range(3)]\n sense.set_pixel(x,y,pixel)\n\n\ncount = 0\nwhile True:\n random_pixelarray()\n time.sleep(0.01)\n count +=1\n if count % 100 == 0:\n sense.flip_v()\n count = 0\n\n\n\n","sub_path":"raspberrypi/pixels.py","file_name":"pixels.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"600454189","text":"# -*- encoding: utf-8 -*-\nfrom gestion_roles.views import *\nfrom django.conf.urls import patterns, url\n#url(r'^$', index, name='roles.index'),\nurlpatterns = patterns('',\n url(r'^panel$', panelRender, name='pRoles'),#Mostrar: un panel de admin gestión de roles con sus opciones\n url(r'^user/([\\d]+)$', userRender, name='vUsuario'),#Detalle: de informacion del usuario determinado\n url(r'^user/new$', userNewRender, name='nUsuario'),#Mostrar: Pagina para CREAR usuario\n url(r'^user$', userAllRender, name='lUsuario'),#Mostrar: pagina para listar usuarios\n url(r'^user/edit/([\\d]+)$', userEditRender, name='eUsuario'),#Mostrar: pagina para EDITAR usuario\n\n url(r'^user/all$', userRead, name='userRead'),#listar: usuarios\n url(r'^user/groups$', userGroupsRead, name='userGroupsRead'),#Listar: Grupos al que pertenece el usuario\n url(r'^user/delete$', userDelete, name='userDelete'),#Eliminar: usuario\n\n\n url(r'^group/users$', groupUsersRead, name='groupUsersRead'),#Listar: usuarios de un grupo\n url(r'^group/([\\d]+)$', groupRender, name='vGrupo'),#Detalle: informacion de grupo\n url(r'^group/all$', groupRead, name='groupRead'),#Listar: grupos\n url(r'^group/new$', groupNewRender, name='nGrupo'),#Mostrar pagina para CREAR grupo\n url(r'^group$', groupAllRender, name='lGrupo'),#Mostrar: pagina para listar grupo\n url(r'^group/edit/([\\d]+)$', groupEditRender, name='eGrupo'),#Mostrar: pagina para EDITAR grupo\n url(r'^group/delete$', groupDelete, name='groupDelete'),#Eliminar: grupo\n\n url(r'^permission/([\\w]+)/([\\d]+)$', permissionRender, name='aPermiso'),#Mostrar pagina de administracion de permisos grupo o usuario determinado\n url(r'^permission/create$', permissionCreate, name='nPermiso'),#Crear permiso\n url(r'^permission/delete$', permissionDelete, name='dPermiso'),#Eliminar permiso\n url(r'^permission/update$', permissionUpdate, name='uPermiso'),#Actualizar permiso\n url(r'^permission/all$', permissionRead, name='permissionRead'),#Listar permisos\n)","sub_path":"SGSISoft/gestion_roles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454353727","text":"#!/usr/bin/env python\n\nimport sys\n\nlocation_mapping = ('32', '31', '30', '29', '28', '27', '26', '25', '24', '23', '22', '21', '20', '19', '18', '17', '16', '15', '14', '13', '12', '11', '10', '09', '08', '07', '06', '05', '04', '03', '02', '01', '00',)\n\n#reverse the location index of the training data\n# input: location,ap,mac,ssid,rssi...\n# output: location,ap,mac,ssid,rssi...\n\ndef reverse():\n for line in sys.stdin.readlines():\n line = line.strip('\\n')\n words= line.split(',')\n words[0] = location_mapping[int(words[0])]\n sys.stdout.write(','.join(words))\n sys.stdout.write('\\n')\n\ndef sort():\n content = sys.stdin.readlines()\n sortedcontent = sorted(content)\n for item in sortedcontent:\n sys.stdout.write(item)\n\ndef print_sequence():\n index = 32\n while index >= 0:\n sys.stdout.write(\"'%02d', \" % index)\n index -= 1\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n sys.stderr.write(\"USAGE: ./run [option:[sort[reverse\\n\")\n if sys.argv[1] == 'sort':\n sort()\n elif sys.argv[1] == 'reverse':\n reverse()\n else:\n sys.stderr.write(\"PARAMETER ERROR\\n\")\n sys.exit(1)\n\n","sub_path":"machine_learning/mx3/reverse_location_sequence.py","file_name":"reverse_location_sequence.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"624065398","text":"import asyncio\nimport concurrent\nfrom multiprocessing import get_context\nfrom typing import Dict, List\n\nfrom athena.logger import logger\nfrom athena.models import DBProgrammingFeedback\nfrom athena.schemas import ProgrammingFeedback\n\nfrom module_programming_themisml.extract_methods.method_node import MethodNode\nfrom .code_similarity_computer import CodeSimilarityComputer\n\nSIMILARITY_SCORE_THRESHOLD = 0.75\nASYNC_PROCESSING = True # faster, but worse for debugging\n\n\ndef get_feedback_suggestions_for_method(\n feedbacks: List[DBProgrammingFeedback],\n filepath: str,\n method: MethodNode,\n include_code: bool = False\n) -> List[ProgrammingFeedback]:\n \"\"\"\n Get feedback suggestions from comparisons between a function block of a given submission\n and multiple feedback rows\n \"\"\"\n considered_feedbacks = []\n sim_computer = CodeSimilarityComputer()\n for feedback in feedbacks:\n if feedback.file_path == filepath and feedback.meta.get(\"method_name\") == method.name:\n considered_feedbacks.append(feedback)\n sim_computer.add_comparison(method.source_code, feedback.get_referenced_code())\n\n sim_computer.compute_similarity_scores()\n\n suggested = []\n for feedback in considered_feedbacks:\n feedback_code = feedback.get_referenced_code()\n similarity = sim_computer.get_similarity_score(method.source_code, feedback_code)\n if similarity.f1 >= SIMILARITY_SCORE_THRESHOLD:\n logger.info(\"Found similar code with similarity score %d: %s\", similarity, feedback)\n original_code = feedback_code\n feedback_to_give = feedback.to_schema()\n if include_code:\n feedback_to_give.meta[\"code\"] = method.source_code\n feedback_to_give.line_start = method.line_start\n feedback_to_give.line_end = method.line_end\n feedback_to_give.meta = {\n **feedback_to_give.meta,\n \"precision_score\": similarity.precision,\n \"recall_score\": similarity.recall,\n \"similarity_score\": similarity.f1,\n \"similarity_score_f3\": similarity.f3,\n }\n if include_code:\n feedback_to_give.meta[\"originally_on_code\"] = original_code\n suggested.append(feedback_to_give)\n return sorted(suggested, key=lambda f: f.meta[\"similarity_score\"], reverse=True)\n\n\nasync def get_feedback_suggestions(\n function_blocks: Dict[str, List[MethodNode]],\n feedbacks: List[DBProgrammingFeedback],\n include_code: bool = False\n) -> List[ProgrammingFeedback]:\n \"\"\"\n Get feedback suggestions from comparisons between function blocks of a given submission\n and multiple feedback rows.\n This is quicker than calling get_feedback_suggestions_for_method for each method\n because it uses multiple processes to do the comparisons in parallel.\n \"\"\"\n if ASYNC_PROCESSING:\n loop = asyncio.get_event_loop()\n # Doing it like this for compatibility with FastAPI / Uvicorn, see https://github.com/tiangolo/fastapi/issues/1487#issuecomment-657290725\n with concurrent.futures.ProcessPoolExecutor(mp_context=get_context(\"spawn\")) as pool: # type: ignore\n results = await asyncio.gather(*[\n loop.run_in_executor(pool, get_feedback_suggestions_for_method,\n feedbacks, filepath, method, include_code)\n for filepath, methods in function_blocks.items()\n for method in methods\n ])\n else:\n results = []\n for filepath, methods in function_blocks.items():\n for method in methods:\n results.append(get_feedback_suggestions_for_method(feedbacks, filepath, method, include_code))\n return [result for result_list in results for result in result_list]\n","sub_path":"module_programming_themisml/module_programming_themisml/feedback_suggestions/feedback_suggestions.py","file_name":"feedback_suggestions.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319880284","text":"import urllib.request\nfrom bs4 import *\n\nurl = input('Enter - ')\ncount=int(input('Enter Count:'))\npos=int(input('Enter Position:'))\n\nhtml = urllib.request.urlopen(url).read()\nsoup = BeautifulSoup(html,\"html.parser\")\nprint (\"Retrieving: \",url)\nc=0\ns=[]\ntags = soup('a')\nfor tag in tags:\n s.append(str(tag.get('href', None)))\nfor link in s:\n\tprint (\"Retrieving: \",s[pos-1])\n\tnew = urllib.request.urlopen(s[pos-1]).read()\n\tsoup = BeautifulSoup(new,\"html.parser\")\n\ttags = soup('a')\n\ts=[] \n\tfor tag in tags:\n\t\ts.append(str(tag.get('href', None)))\n\tc=c+1\n\tif c==count:\n\t\tbreak","sub_path":"beta.py","file_name":"beta.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"325103867","text":"from asyncio import AbstractEventLoop\nfrom typing import Any, Dict, Optional, TYPE_CHECKING\n\nfrom aiohttp import ClientSession\nfrom ujson import loads\n\nfrom autobrowser.abcs import BehaviorManager\nfrom autobrowser.automation import AutomationConfig\nfrom autobrowser.util import AutoLogger, Helper, create_autologger\nfrom .runners import WRBehaviorRunner\n\nif TYPE_CHECKING:\n from autobrowser.abcs import Behavior, Tab\n\n__all__ = [\"RemoteBehaviorManager\"]\n\n\nclass RemoteBehaviorManager(BehaviorManager):\n \"\"\"Manages matching URL to their corresponding behaviors by requesting\n the behavior from a remote endpoint\n \"\"\"\n\n __slots__ = [\"__weakref__\", \"conf\", \"logger\", \"loop\", \"session\"]\n\n def __init__(\n self,\n conf: AutomationConfig,\n session: ClientSession,\n loop: Optional[AbstractEventLoop] = None,\n ) -> None:\n \"\"\"Initialize the new instance of RemoteBehaviorManager\n\n :param conf: The automation's config\n :param session: The HTTP session to use for making the behavior requests\n :param loop: The event loop for the automation\n \"\"\"\n self.conf: AutomationConfig = conf\n self.session: ClientSession = session\n self.loop: AbstractEventLoop = Helper.ensure_loop(loop)\n self.logger: AutoLogger = create_autologger(\n \"remoteBehaviorManager\", \"RemoteBehaviorManager\"\n )\n\n async def behavior_for_url(self, url: str, tab: \"Tab\", **kwargs: Any) -> \"Behavior\":\n self.logger.info(\"behavior_for_url\", f\"fetching behavior - {url}\")\n async with self.session.get(self.conf.retrieve_behavior_url(url)) as res:\n self.logger.info(\n \"behavior_for_url\",\n f\"fetched behavior - {{'url': '{url}', 'status': {res.status}}}\",\n )\n res.raise_for_status()\n behavior_js = await res.text()\n behavior = WRBehaviorRunner(\n behavior_js=behavior_js,\n tab=tab,\n next_action_expression=self.conf.behavior_action_expression,\n loop=self.loop,\n **kwargs,\n )\n return behavior\n\n async def behavior_info_for_url(self, url: str) -> Dict[str, Any]:\n self.logger.info(\"behavior_info_for_url\", f\"fetching behavior info for {url}\")\n async with self.session.get(self.conf.behavior_info_url(url)) as res:\n self.logger.info(\n \"behavior_info_for_url\",\n f\"fetched behavior info - {{'url': '{url}', 'status': {res.status}}}\",\n )\n res.raise_for_status()\n info: Dict[str, Any] = await res.json(loads=loads)\n return info\n\n def __str__(self) -> str:\n info = f\"behavior={self.conf.fetch_behavior_endpoint}, info={self.conf.fetch_behavior_info_endpoint}\"\n return f\"RemoteBehaviorManager({info})\"\n\n def __repr__(self) -> str:\n return self.__str__()\n","sub_path":"autobrowser/behaviors/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"338921128","text":"from unittest import TestCase\n\nfrom aiortc import RTCSessionDescription\n\n\nclass RTCSessionDescriptionTest(TestCase):\n def test_bad_type(self):\n with self.assertRaises(ValueError) as cm:\n RTCSessionDescription(sdp='v=0\\r\\n', type='bogus')\n self.assertEqual(str(cm.exception), 'Unexpected SDP type \"bogus\"')\n\n def test_good_type(self):\n desc = RTCSessionDescription(sdp='v=0\\r\\n', type='answer')\n self.assertEqual(desc.sdp, 'v=0\\r\\n')\n self.assertEqual(desc.type, 'answer')\n","sub_path":"tests/test_rtcsessiondescription.py","file_name":"test_rtcsessiondescription.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"121616395","text":"from django.db import models\n\nfrom modelcluster.fields import ParentalKey\n\nfrom wagtail.wagtailsnippets.models import register_snippet\nfrom wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel\nfrom wagtail.wagtailcore.models import Orderable, Page\nfrom wagtail.wagtailcore.fields import RichTextField\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsearch import index\n\n# model for library staff\n@register_snippet\nclass StaffMember(models.Model):\n name = models.CharField(max_length=150, blank=False)\n email = models.EmailField(default=\"username@cca.edu\")\n phone = models.CharField(\n max_length=12,\n blank=True,\n default=\"415.703.5555\",\n help_text='In form \"555.555.5555\"',\n )\n position = models.CharField(max_length=150)\n main_image = models.ForeignKey(\n 'wagtailimages.Image',\n help_text=\"Will be sized 150-by-150px on the staff list page.\",\n null=True,\n on_delete=models.PROTECT,\n related_name='+',\n )\n bio = RichTextField(help_text='A single 4-5 sentence paragraph.')\n slug = models.CharField(max_length=150)\n\n panels = [\n FieldPanel('name', classname=\"title\"),\n MultiFieldPanel([\n FieldPanel('email', classname=\"col6\"),\n FieldPanel('phone', classname=\"col6\"),\n FieldPanel('position'),\n ]),\n ImageChooserPanel('main_image'),\n FieldPanel('bio'),\n ]\n\n # on save generate slug from email address\n def save(self):\n self.slug = self.email.replace('@cca.edu', '')\n return super(StaffMember, self).save()\n\n def __str__(self):\n return self.name\n\n# connection between staff & the staff page\nclass StaffPageStaffMembers(Orderable):\n page = ParentalKey('staff.StaffListPage', related_name='staff_members')\n staff_member = models.ForeignKey('StaffMember', related_name='+')\n\n panels = [\n SnippetChooserPanel('staff_member'),\n ]\n\n# actual staff list page\nclass StaffListPage(Page, index.Indexed):\n parent_page_types = ['categories.RowComponent']\n subpage_types = []\n main_image = models.ForeignKey(\n 'wagtailimages.Image',\n help_text='Only used in search results right now',\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name='+',\n )\n\n content_panels = Page.content_panels + [\n ImageChooserPanel('main_image'),\n InlinePanel('staff_members', label='Staff Member'),\n ]\n\n # shouldn't have to do this hacky workaround but index.RelatedFields chokes\n # on the related StaffMember fields\n def get_related_staff_for_search(self):\n staff_fields = []\n\n for staff in self.staff_members.all():\n staff_fields.append(staff.staff_member.name)\n staff_fields.append(staff.staff_member.email)\n staff_fields.append(staff.staff_member.phone)\n staff_fields.append(staff.staff_member.position)\n staff_fields.append(staff.staff_member.bio)\n\n return '\\n'.join(staff_fields)\n\n search_fields = Page.search_fields + [\n index.SearchField('get_related_staff_for_search')\n ]\n\n # for consistency with other child pages in categories app\n def category(self):\n return 'about-us'\n\n # allow only one instance of the staff list page to be created\n @classmethod\n def can_create_at(cls, parent):\n return super(StaffListPage, cls).can_create_at(parent) and not cls.objects.exists()\n","sub_path":"libraries/staff/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"294284860","text":"s = raw_input(\"Enter the string \\n\")\nlogestSubstring = \"\"\nfor i in range(len(s)):\n\tASCIINumber = ord(s[i])\n\tAuxiliarString = ''\n\tAuxiliarChar = ''\n\tif ASCIINumber<=122:\n\t\tfor j in range (len(s)-i):\n\t\t\tAuxiliarChar = ord(s[i+j])\n\t\t\tif AuxiliarChar>=ASCIINumber:\n\t\t\t\tAuxiliarString += chr(AuxiliarChar)\n\t\t\t\tASCIINumber = AuxiliarChar\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\t\t\n\tif len(AuxiliarString)>len(logestSubstring):\n\t\t\t\tlogestSubstring = AuxiliarString\n\nprint('Longest substring in alphabetical order is: ' + logestSubstring)","sub_path":"AlphabeticalSubstrings.py","file_name":"AlphabeticalSubstrings.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"180865982","text":"from framework.rom.rom_object import ROMObject\nfrom framework.rom.rom_property import StringProperty\nfrom framework.rom import meta\nfrom framework.utils import log\n\n\n@meta.rom(singleton=True, description='Application level configuration, root node of configuration tree')\nclass SysEntry(ROMObject):\n Version = StringProperty(default='3.0.0.999999', display='Version', category='state')\n\n VersionType = StringProperty(default='', display='Version', category='state')\n\n TestCaseName = StringProperty(default='Untitled', display='Test Case Name', category='config')\n\n TestCaseDirectory = StringProperty(default='', display='Test Case Name', category='config', private=True)\n\n def _setup(self):\n try:\n ver = self.get_version()\n if ver:\n self.Version = ver[0]\n log.Logger.CL.info('Version : {}'.format(self.Version))\n self.VersionType = ver[1]\n except:\n pass\n\n @classmethod\n def get_version(cls):\n try:\n from framework import version\n ver = version.Version\n if ver.endswith('.*'):\n ver = ver[:-1]\n ver = ver + version.Revision\n return ver,version.VersionType\n except:\n return None\n\n","sub_path":"CL/framework/sysentry.py","file_name":"sysentry.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"355523395","text":"import pytest\nimport rapidjson\n\n# from http://json.org/JSON_checker/test/pass2.json\nJSON = r'''\n[[[[[[[[[[[[[[[[[[[\"Not too deep\"]]]]]]]]]]]]]]]]]]]\n'''\n\n@pytest.mark.unit\ndef test_parse():\n # test in/out equivalence and parsing\n res = rapidjson.loads(JSON)\n out = rapidjson.dumps(res)\n assert res == rapidjson.loads(out)\n","sub_path":"tests/test_pass2.py","file_name":"test_pass2.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"509596705","text":"'''\nReplace all the special characters(!, @, #, $, %, ^, &, *) in a given string with a space.\nexample : ab!@#cd is the input, the output is ab cd\nOutput has three spaces, which are to be replaced with these special characters\n'''\ndef main():\n '''\n Read string from the input, store it in variable str_input.\n '''\n str_1 = input()\n spl = [\"!\", \"@\", \"#\", \"$\", \"%\", \"^\", \"&\", \"*\"]\n my_list = list(str_1)\n str_len = len(str_1)\n for x_i in range(str_len):\n if my_list[x_i] in spl:\n my_list[x_i] = \" \"\n ans = \"\".join(str(q) for q in my_list)\n print(ans)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cspp1-assignments/m6/p2/special_char.py","file_name":"special_char.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"250430248","text":"print(\"==============================================================================================\")\r\n\r\n#import argparse\r\n#\r\n#parser = argparse.ArgumentParser()\r\n#parser.add_argument(\"run\", help=\"enter the specific run you need to process\",type=str)\r\n#args = parser.parse_args()\r\n#\r\n#run = str(args.run)\r\n\r\nprint(\"starting........................................................................................\")\r\n\r\nimport glob\r\n\r\nprint(\"imported glob........................................................................................\")\r\n\r\nrun = '000265383'\r\n\r\n#files_in_order = glob.glob(\"/scratch/vljchr004/data/msc-thesis-data/unprocessed/\" + run + '/**/*.txt', recursive=True)\r\n\r\nfiles_in_order = glob.glob(\"C:/Users/gerhard/Documents/msc-thesis-data/unprocessed/\" + run + '/**/*.txt', recursive=True)\r\n\r\na = list(range(1,len(files_in_order)-1))\r\n\r\nfiles_in_order = [files_in_order[i] for i in a]\r\n\r\nprint(\"read files list........................................................................................\")\r\n\r\nfrom ast import literal_eval\r\n\r\ndef file_reader1(i):\r\n di = open(i)\r\n di = di.read()\r\n if di == \"}\":\r\n pass\r\n else:\r\n di = di + \"}\"\r\n di = literal_eval(di)\r\n ki = list(di.keys())\r\n P = [di.get(k).get('P') for k in ki]\r\n return(P)\r\n \r\ndef file_reader2(i,l):\r\n di = open(i)\r\n print(i)\r\n di = di.read()\r\n if di == \"}\":\r\n pass\r\n else:\r\n di = di + \"}\"\r\n di = literal_eval(di)\r\n ki = list(di.keys())\r\n layer = [di.get(k).get(l) for k in ki]\r\n return(layer)\r\n\r\n\r\nimport numpy as np\r\n\r\nprint(\"pdg........................................................................................\")\r\n \r\nP0 = [file_reader1(i) for i in files_in_order]\r\n\r\n\r\nprint(\"layer 0........................................................................................\")\r\n\r\nlayer0 = [file_reader2(i,\"layer 0\") for i in files_in_order]\r\n\r\nlayer0 = np.array([item for sublist in layer0 for item in sublist if sublist is not None])\r\n\r\nP0 = np.array([item for sublist in P0 for item in sublist])\r\n\r\nempties = np.where([np.array(i).shape!=(17,24) for i in layer0])\r\n\r\nlayer0 = np.delete(layer0, empties)\r\n\r\nlayer0 = np.stack(layer0)\r\n\r\nP0 = np.delete(P0, empties)\r\n\r\nprint(\"layer 1........................................................................................\")\r\n\r\nlayer1 = [file_reader2(i,\"layer 1\") for i in files_in_order]\r\n\r\nP1 = [file_reader1(i) for i in files_in_order]\r\n\r\nlayer1 = np.array([item for sublist in layer1 for item in sublist])\r\n\r\nP1 = np.array([item for sublist in P1 for item in sublist])\r\n\r\nempties = np.where([np.array(i).shape!=(17,24) for i in layer1])\r\n\r\nlayer1 = np.delete(layer1, empties)\r\n\r\nlayer1 = np.stack(layer1)\r\n\r\nP1 = np.delete(P1, empties)\r\n\r\n\r\nprint(\"layer 2........................................................................................\")\r\n\r\nlayer2 = [file_reader2(i,\"layer 2\") for i in files_in_order]\r\n\r\nP2 = [file_reader1(i) for i in files_in_order]\r\n\r\nlayer2 = np.array([item for sublist in layer2 for item in sublist])\r\n\r\nP2 = np.array([item for sublist in P2 for item in sublist])\r\n\r\nempties = np.where([np.array(i).shape!=(17,24) for i in layer2])\r\n\r\nlayer2 = np.delete(layer2, empties)\r\n\r\nlayer2 = np.stack(layer2)\r\n\r\nP2 = np.delete(P2, empties)\r\n\r\n\r\nprint(\"layer 3........................................................................................\")\r\n\r\nlayer3 = [file_reader2(i,\"layer 3\") for i in files_in_order]\r\n\r\nP3 = [file_reader1(i) for i in files_in_order]\r\n\r\nlayer3 = np.array([item for sublist in layer3 for item in sublist])\r\n\r\nP3 = np.array([item for sublist in P3 for item in sublist])\r\n\r\nempties = np.where([np.array(i).shape!=(17,24) for i in layer3])\r\n\r\nlayer3 = np.delete(layer3, empties)\r\n\r\nlayer3 = np.stack(layer3)\r\n\r\nP3 = np.delete(P3, empties)\r\n\r\n\r\nprint(\"layer 4........................................................................................\")\r\n\r\nlayer4 = [file_reader2(i,\"layer 4\") for i in files_in_order]\r\n\r\nP4 = [file_reader1(i) for i in files_in_order]\r\n\r\nlayer4 = np.array([item for sublist in layer4 for item in sublist])\r\n\r\nP4 = np.array([item for sublist in P4 for item in sublist])\r\n\r\nempties = np.where([np.array(i).shape!=(17,24) for i in layer4])\r\n\r\nlayer4 = np.delete(layer4, empties)\r\n\r\nlayer4 = np.stack(layer4)\r\n\r\nP4 = np.delete(P4, empties)\r\n\r\n\r\nprint(\"layer 5........................................................................................\")\r\n\r\nlayer5 = [file_reader2(i,\"layer 5\") for i in files_in_order]\r\n\r\nP5 = [file_reader1(i) for i in files_in_order]\r\n\r\nlayer5 = np.array([item for sublist in layer5 for item in sublist])\r\n\r\nP5 = np.array([item for sublist in P5 for item in sublist])\r\n\r\nempties = np.where([np.array(i).shape!=(17,24) for i in layer5])\r\n\r\nlayer5 = np.delete(layer5, empties)\r\n\r\nlayer5 = np.stack(layer5)\r\n\r\nP5 = np.delete(P5, empties)\r\n\r\nprint(\"mapped out files to useful elements....................................................................\")\r\n\r\nprint(\"concatenate pdgs and layers....................................................................\")\r\n\r\nP = np.concatenate([P0,P1,P2,P3,P4,P5]).ravel()\r\n\r\nx = np.vstack([layer0,layer1,layer2,layer3,layer4,layer5])\r\n\r\nnz = np.array([np.count_nonzero(i) for i in x])\r\n\r\nzeros = np.where(nz==0)\r\n\r\nP = np.delete(P,zeros)\r\n \r\n#np.savetxt('/scratch/vljchr004/data/msc-thesis-data/cnn/P_' + run + '.csv',P,delimiter=\", \")\r\n\r\nnp.savetxt('C:/Users/gerhard/Documents/msc-thesis-data/cnn/P_' + run + '.csv',P,delimiter=\", \")\r\n\r\nprint(\"done.........................................................................................\")\r\n\r\nprint(\"==============================================================================================\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Code/Particle_Identification/msc-hpc/OLD/OLD/round3/preproc_add_info.py","file_name":"preproc_add_info.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614375676","text":"\n\n#calss header\nclass _FIDELITY():\n\tdef __init__(self,): \n\t\tself.name = \"FIDELITY\"\n\t\tself.definitions = [u'honest or lasting support, or loyalty, especially to a sexual partner: ', u'the degree to which the detail and quality of an original, such as a picture, sound, or story, is copied exactly: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_fidelity.py","file_name":"_fidelity.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447561462","text":"#!/usr/bin/env python3\n\n# The following code is developed by Sisi Li\n# Please contact sisli@umich.edu if you have any questions\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport math\nfrom scipy.interpolate import interp1d \n\n#Importing the dataset \n#paths = pd.read_csv('vehicle_Encountering_detail.csv')\n#paths = np.array(paths)\ndata = np.load('np_array_path.npy')\n# data process\n\ntraining_data = []\nfor id_paths in range (1, 40):\n id_paths_detail = data[:,[4,5,7,8]][data[:,1] == id_paths]\n id_paths_detail = id_paths_detail[0:int(len(id_paths_detail)/2)]\n training_data.append(list(id_paths_detail))\n \n#add speed\ntemp_total= []\nfor i in range(len(training_data)):\n temp_paths = training_data[i] # list \n temp = [] \n for m in range(len(temp_paths) - 1):\n speed_A = math.sqrt((temp_paths[m + 1][0] - temp_paths[m][0])**2 + \n (temp_paths[m + 1][1] - temp_paths[m][1])**2)\n speed_B = math.sqrt((temp_paths[m + 1][2] - temp_paths[m][2])**2 + \n (temp_paths[m + 1][3] - temp_paths[m][3])**2)\n\n speed_relative = math.fabs(speed_A - speed_B)\n temp_= np.append(temp_paths[m],speed_relative)\n temp.append(list(temp_))\n \n temp_total.append(list(temp)) \ntraining_data = temp_total\n\n # np.append(np.array([1,2,3]),2)\n # interpolation\nfor id_path in range (len(training_data)):\n if len(training_data[id_path]) < 200:\n inter = np.array(training_data[id_path])\n inter_col0 = inter[:,0]\n inter_col1 = inter[:,1]\n inter_col2 = inter[:,2]\n inter_col3 = inter[:,3]\n inter_col4 = inter[:,4]\n x_0 = np.linspace(inter_col0[0],inter_col0[-1],num = len(inter_col0),endpoint = True)\n x_1 = np.linspace(inter_col1[0],inter_col1[-1],num = len(inter_col0),endpoint = True)\n x_2 = np.linspace(inter_col2[0],inter_col2[-1],num = len(inter_col0),endpoint = True)\n x_3 = np.linspace(inter_col3[0],inter_col3[-1],num = len(inter_col0),endpoint = True)\n x_4 = np.linspace(inter_col4[0],inter_col4[-1],num = len(inter_col0),endpoint = True)\n y_0 = inter_col0\n y_1 = inter_col1\n y_2 = inter_col2\n y_3 = inter_col3\n y_4 = inter_col4\n f_0 = interp1d(x_0,y_0)\n f_1 = interp1d(x_1,y_1)\n f_2 = interp1d(x_2,y_2)\n f_3 = interp1d(x_3,y_3)\n f_4 = interp1d(x_4,y_4)\n \n xnew_0 = np.linspace(inter_col0[0],inter_col0[-1],num = 200, endpoint = True)\n xnew_1 = np.linspace(inter_col1[0],inter_col1[-1],num = 200, endpoint = True)\n xnew_2 = np.linspace(inter_col2[0],inter_col2[-1],num = 200, endpoint = True)\n xnew_3 = np.linspace(inter_col3[0],inter_col3[-1],num = 200, endpoint = True)\n xnew_4 = np.linspace(inter_col4[0],inter_col4[-1],num = 200, endpoint = True)\n ynew_0 = f_0(xnew_0)\n ynew_1 = f_1(xnew_1)\n ynew_2 = f_2(xnew_2)\n ynew_3 = f_3(xnew_3)\n ynew_4 = f_4(xnew_4)\n new_inter = np.zeros((200,5))\n new_inter[:,0] = ynew_0\n new_inter[:,1] = ynew_1\n new_inter[:,2] = ynew_2\n new_inter[:,3] = ynew_3\n new_inter[:,4] = ynew_4\n training_data[id_path] = list(new_inter)\n \n \n#Normal\n#training_data[i]: carA latitude, carA longitude, carB latitude, carB longitude, velocity difference\nNormal_trainning_set = []\nv = []\nfor i in range(len(training_data)):\n sc = MinMaxScaler(feature_range = (0, 1))\n length = len(training_data[i])\n carLatitudes = np.zeros([1,2 * length])\n carLongitudes = np.zeros([1, 2 * length])\n #velocities = np.zeros([1, length])\n temp = np.array(training_data[i])\n carLatitudes[0,0:length] = temp[:,0]\n carLatitudes[0,length: 2*length] = temp[:,2]\n carLongitudes[0,0:length] = temp[:,1]\n carLongitudes[0,length: 2*length] = temp[:,3]\n# for k in range(length):\n# carLatitudes[0, k] = training_data[i][k][0]\n# carLatitudes[0, k + length] = training_data[i][k][2]\n# carLongitudes[0, k] = training_data[i][k][1]\n# carLongitudes[0, k + length] = training_data[i][k][3]\n# velocities[0, k] = training_data[i][k][4] \n carLatitudes = sc.fit_transform(np.transpose(carLatitudes))\n carLongitudes = sc.fit_transform(np.transpose(carLongitudes))\n temp[:,0] = carLatitudes[0:length,0]\n temp[:,2] = carLatitudes[length: 2*length,0]\n temp[:,1] = carLongitudes[0:length,0]\n temp[:,3] = carLongitudes[length: 2*length,0]\n #velocities = sc.fit_transform(velocities) \n# Normal = np.zeros((length, 5))\n# for k in range(length):\n# Normal[k, 0] = carLatitudes[0, k]\n# Normal[k, 1] = carLongitudes[0, k]\n# Normal[k, 2] = carLatitudes[0, k + length]\n# Normal[k, 3] = carLongitudes[0, k + length]\n# Normal[k, 4] = velocities[0, k] \n \n Normal_trainning_set.append(list(temp))\n v.append(list(temp[:,4]))\n\n\nmax_v = max(max(v))\nmin_v = min(min(v))\n\nfor i in range(len(training_data)):\n for j in range(len(training_data[i])):\n Normal_trainning_set[i][j][4] = Normal_trainning_set[i][j][4]/max_v\n \n \n\n\n\n\n#Sample\nSample_trainning_set = []\nfor i in range(len(Normal_trainning_set)):\n array_size = len(Normal_trainning_set[i])\n step = int(array_size/200)\n if step > 0:\n Normal = np.array(Normal_trainning_set[i])\n sample = Normal[0: 200*step: step]\n Sample_trainning_set.append(list(sample))\n \n\n","sub_path":"normal.py","file_name":"normal.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"429559118","text":"import random\nimport numpy as np\n\nfrom common.segment_tree import SumSegmentTree, MinSegmentTree\n\n\n# Replay Buffer\nclass ReplayBuffer(object):\n def __init__(self, size):\n self._storage = []\n self._maxsize = size\n self._next_idx = 0\n\n def add(self, obses_t, actions, rewards, obses_tp1, dones):\n experience = (obses_t, actions, rewards, obses_tp1, dones)\n if self._next_idx >= len(self._storage):\n self._storage.append(experience)\n else:\n self._storage[self._next_idx] = experience\n self._next_idx = (self._next_idx + 1) % self._maxsize\n\n def _encode_sample(self, idxes):\n obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []\n for i in idxes:\n experience = self._storage[i]\n obs_t, action, reward, obs_tp1, done = experience\n obses_t.append(obs_t)\n actions.append(action)\n rewards.append(reward)\n obses_tp1.append(obs_tp1)\n dones.append(done)\n return np.array(obses_t, copy=False), np.array(actions, copy=False), \\\n np.array(rewards, copy=False), np.array(obses_tp1, copy=False), \\\n np.array(dones, copy=False)\n\n def sample(self, batch_size):\n idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]\n return self._encode_sample(idxes)\n\n\nclass PrioritizedReplayBuffer(ReplayBuffer):\n def __init__(self, size, alpha):\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha >= 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n\n def add(self, *args, **kwargs):\n \"\"\"See ReplayBuffer.store_effect\"\"\"\n idx = self._next_idx\n super().add(*args, **kwargs)\n self._it_sum[idx] = self._max_priority ** self._alpha\n self._it_min[idx] = self._max_priority ** self._alpha\n\n def _sample_proportional(self, batch_size):\n res = []\n p_total = self._it_sum.sum(0, len(self._storage) - 1)\n every_range_len = p_total / batch_size\n for i in range(batch_size):\n mass = random.random() * every_range_len + i * every_range_len\n idx = self._it_sum.find_prefixsum_idx(mass)\n res.append(idx)\n return res\n\n def sample(self, batch_size, beta):\n assert beta > 0\n\n idxes = self._sample_proportional(batch_size)\n\n weights = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self._storage)) ** (-beta)\n\n for idx in idxes:\n p_sample = self._it_sum[idx] / self._it_sum.sum()\n weight = (p_sample * len(self._storage)) ** (-beta)\n weights.append(weight / max_weight)\n weights = np.array(weights, dtype=np.float32)\n encoded_sample = self._encode_sample(idxes)\n return tuple(list(encoded_sample) + [weights, idxes])\n\n def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for idx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= idx < len(self._storage)\n self._it_sum[idx] = priority ** self._alpha\n self._it_min[idx] = priority ** self._alpha\n\n self._max_priority = max(self._max_priority, priority)\n","sub_path":"distdeepq/replay_buffer.py","file_name":"replay_buffer.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"51246282","text":"class Solution:\n def checkPalindromeFormation(self, a: str, b: str) -> bool:\n \"\"\"String.\n\n Running time: O(n) where n == len(a).\n \"\"\"\n i, j = 0, len(a) - 1\n while i < j and a[i] == b[j]:\n i += 1\n j -= 1\n s1, s2 = a[i:j+1], b[i:j+1]\n i, j = 0, len(a) - 1\n while i < j and b[i] == a[j]:\n i += 1\n j -= 1\n s3, s4 = a[i:j+1], b[i:j+1]\n return any(s == s[::-1] for s in [s1, s2, s3, s4])\n","sub_path":"solutions/1616_split_two_strings_to_make_palindrome.py","file_name":"1616_split_two_strings_to_make_palindrome.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"314039717","text":"# -*- coding: utf-8 -*-\n\nimport csv\nimport os\nimport sys\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom dateutil.relativedelta import relativedelta\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom io import StringIO\nfrom inspect import currentframe, getframeinfo\nfrom sqlalchemy import create_engine\nimport stock_comm as comm\nimport inspect\nimport traceback\nDEBUG=1\nLOG=1\ndef lno():\n cf = currentframe()\n filename = getframeinfo(cf).filename\n return '%s-L(%d)'%(os.path.basename(filename),inspect.currentframe().f_back.f_lineno)\ndef check_dst_folder(dstpath):\n if not os.path.isdir(dstpath):\n os.makedirs(dstpath) \ndef check(r):\n try:\n #print(lno(),r[0],type(r[0]))\n #print(lno(),r)\n if len(r['stock_id'])!=4:\n return 0\n if r['stock_id'].startswith('00'):\n return 0\n return 1\n except:\n #print(lno(),r[0],type(r[0]))\n return 0\n \n \n \n \ndef parse_stock_director_xq(startdate,enddate):\n nowdate=startdate\n while nowdate<=enddate :\n \n _csv='data/director/xq/director{}.csv'.format(nowdate.strftime('%Y%m'))\n if os.path.exists(_csv):\n dfs = pd.read_csv(_csv,encoding = 'big5hkscs',skiprows=5,header=None)\n try:\n #dfs.columns=['序號','stock_id','stock_name','成交','漲幅%','總量','董監持股佔股本比例']\n dfs.columns=['序號','stock_id','stock_name','成交','漲幅%','總量','d1','d2','董監持股','董監持股佔股本比例','符合條件數']\n except:\n print(lno(),dfs.iloc[0])\n dfs.columns=['序號','stock_id','stock_name','成交','漲幅%','總量','董監持股','董監持股佔股本比例','符合條件數']\n #raise\n #print(lno(),dfs.iloc[0])\n #raise \n d=dfs[['stock_id','董監持股','董監持股佔股本比例']].copy()\n d['date']=datetime(nowdate.year,nowdate.month,15)\n for i in range(0,len(dfs)):\n print(lno(),d.iloc[i])\n #raise\n stock_id=d.iloc[i]['stock_id'].replace('.TW','')\n comm.stock_read_sql_add_df(stock_id,'director',d[i:i+1])\n else:\n print(lno(),_csv) \n nowdate = nowdate + relativedelta(months=1) \ndef parse_xq_rr(year,season):\n\n date1=datetime(year,season*3,1)+relativedelta(months=1)\n date1=date1-relativedelta(days=1)\n while True:\n d1=comm.exchange_data('tse').get_df_date_parse(date1)\n if len(d1)==0:\n date1=date1-relativedelta(days=1)\n continue\n else:\n d2=comm.exchange_data('otc').get_df_date_parse(date1)\n d3=pd.concat([d1,d2])\n \n break\n d3['check_stock_id']=d3.apply(check,axis=1)\n d3=d3[d3['check_stock_id']==1].reset_index(drop=True)\n\n d_ref=d3[['stock_id','date','stock_name']].copy() \n \n print(lno(),d_ref.columns) \n _csv='xq_data/rr{}.{}Q.csv'.format(year,season)\n if os.path.exists(_csv):\n print(lno(),_csv)\n dfs = pd.read_csv(_csv,encoding = 'big5hkscs',skiprows=5,header=None)\n try:\n #dfs.columns=['序號','stock_id','stock_name','成交','漲幅%','總量','董監持股佔股本比例']\n #序號,\t代碼,\t商品,\t成交,\t漲幅%,\t總量,\t研發費用(百萬),\t符合條件數\n dfs.columns=['序號','stock_id','stock_name','成交','漲幅%','總量','研發費用(百萬)','符合條件數']\n except:\n print(lno(),dfs.iloc[0])\n dfs.columns=['序號','stock_id','stock_name','成交','漲幅%','總量','收盤價','區間漲幅','研發費用(百萬)','符合條件數']\n #raise\n def rmTW(r):\n return r['stock_id'].replace('.TW','')\n dfs['stock_id']=dfs.apply(rmTW,axis=1) \n d=dfs[['stock_id','研發費用(百萬)']].copy()\n df_o=pd.merge(d_ref,d,how='left').reset_index(drop=True)\n df_o['YQ']='{}.{}'.format(year,season)\n print(lno(),df_o)\n df_o.to_html('test.html',escape=False,index=False,sparsify=True,border=2,index_names=False)\n for i in range(0,len(df_o)):\n print(lno(),df_o.iloc[i])\n #raise\n stock_id=df_o.iloc[i]['stock_id']\n comm.stock_read_sql_add_df(stock_id,'RD_fee',df_o[i:i+1])\n #print(lno(),d.iloc[0])\n return df_o\n print(lno(),_csv)\n return pd.DataFrame()\n \n \ndef gen_director_good_list(date,debug=0):\n nowdate=date\n cnt=0\n while cnt<=3:\n _csv='data/director/xq/director{}.csv'.format(nowdate.strftime('%Y%m'))\n if os.path.exists(_csv):\n break\n nowdate=nowdate - relativedelta(months=1)\n cnt=cnt+1\n d=get_xq_month_df(nowdate)\n if len(d):\n prev_month = nowdate - relativedelta(months=1)\n d_prev=get_xq_month_df(prev_month)\n if len(d_prev):\n d_prev.columns=['stock_id','stock_name','前1月董監持股','前1月董監持股佔股本比例']\n df_out=pd.merge(d,d_prev)\n def calc_director_add(r):\n try:\n add= float(r['董監持股'])-float(r['前1月董監持股'])\n except:\n print(lno(),r) \n raise \n return add \n df_out['董監持股增減']=df_out.apply(calc_director_add,axis=1)\n df_good=df_out[df_out['董監持股增減']>100].copy().reset_index(drop=True)\n def removetw(r):\n return r['stock_id'].replace('.TW','') \n df_good['stock_id']=df_good.apply(removetw,axis=1)\n \n return df_good\n else:\n nowdate=nowdate - relativedelta(months=1)\n cnt+=1\n return pd.DataFrame() \n \n \n \n \n \nif __name__ == '__main__':\n\n if len(sys.argv)==1:\n startdate=datetime.today().date()\n \n startdate=datetime(2019,8,1)\n enddate=datetime(2020,3,1)\n parse_stock_director_xq(startdate,enddate)\n elif sys.argv[1]=='rr' :\n year =int(sys.argv[2])\n season =int(sys.argv[3])\n parse_xq_rr(year,season)\n \n \n \n elif sys.argv[1]=='gen' :\n if len(sys.argv)==3 :\n #參數2:開始日期 \n datatime=datetime.strptime(sys.argv[2],'%Y%m%d')\n gen_revenue_final_file(datatime)\n else :\n print (lno(),'func -g date')\n elif sys.argv[1]=='get' :\n if len(sys.argv)==4 :\n #參數2:開始日期 \n stock_id=sys.argv[2]\n datatime=datetime.strptime(sys.argv[3],'%Y%m%d')\n #get_revenue_by_stockid_bydate(stock_id,datatime)\n #get_revenue_by_stockid(stock_id,datatime)\n df=sql_data.get_by_date(datatime)\n print(lno(),df)\n elif sys.argv[1]=='sql' :\n \n startdate=datetime.strptime(sys.argv[2],'%Y%m%d')\n try:\n enddate=datetime.strptime(sys.argv[3],'%Y%m%d')\n except:\n enddate=startdate\n now_date = startdate \n while now_date<=enddate :\n #down_tse_monthly_report(int(now_date.year),int(now_date.month))\n #down_otc_monthly_report(int(now_date.year),int(now_date.month))\n sql_data.download(now_date)\n #gen_revenue_final_file(now_date)\n now_date = now_date + relativedelta(months=1)\n \n \n else:\n print (lno(),\"unsport \")\n sys.exit()\n \n ","sub_path":"xq.py","file_name":"xq.py","file_ext":"py","file_size_in_byte":7638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"471595195","text":"# --- Collatz's Conjecture --- #\nx = int(input(\"Pick a number!: \"))\ncounter = 0\n\nwhile (x > 0):\n if x == 1:\n break\n # --- Checks for EVEN number --- #\n elif (x % 2) == 0:\n x = int(x / 2)\n print(x)\n counter += 1\n # --- Otherwise ODD --- #\n else:\n x = int((3 * x) + 1)\n print(x)\n counter += 1\n continue\n# --- Steps taken to get to 1 --- #\nprint(\"steps = {}\".format(counter))","sub_path":"collatz_conjecture.py","file_name":"collatz_conjecture.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574653880","text":"import json\nimport os\n\nfilepath = \"../games/\"\nfilepath = \"J:/Isaac/School/2017/Classes/CMPM290A/mobygames/dataset/\"\ndirectory = 'dos_games'\ndirectory = 'all'\n\nwith open('./' + directory + '_game_stream.tsv', 'w+', encoding='utf8') as tsv:\n for file in os.listdir(filepath + directory):\n print(file)\n if \".swp\" in file:\n continue\n else:\n with open(filepath + directory + '/' + file, 'rt', encoding='utf8') as json_data:\n ugly_data = json.load(json_data)\n for game in ugly_data:\n platforms = game['platforms']\n game_id = 'G' + str(10000000 + game['game_id'])\n title = game['title'].replace('\\t', '').replace('\\r\\n', '')\n for platform in platforms:\n credits = platform['credits']\n release_date = platform['releases'][0]['release_date']\n platform_name = str(platform['platform_name'])\n platform_id = str(platform['platform_id'])\n for credits_entry in credits:\n role = credits_entry['role']\n role_type = str(credits_entry['role_type']).replace('\\t', '').replace('\\r\\n', '')\n role_type_id = str(credits_entry['role_type_id'])\n if role_type_id is None:\n continue\n role_type_id = 'R' + str(role_type_id)\n for credit in credits_entry['credits']:\n dev_id = credit['developer_id']\n if dev_id is None:\n continue\n dev_id = 'D' + str(100000000 + dev_id)\n dev_name = credit['name'].replace('\\r\\n', '')\n tsv_line = dev_name\n print(tsv_line)\n tsv.write(tsv_line + '\\n')\n","sub_path":"python/extract_names.py","file_name":"extract_names.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574793777","text":"from rest_framework import serializers\nfrom hospital.models import HospitalsData\n\nclass HospitalsDataSerializer(serializers.ModelSerializer):\n class Meta:\n model = HospitalsData\n fields =('h_name','address','long','lati','free')\n extra_kwargs = {\n 'h_name' : {'required': False},\n 'address': {'required': False},\n 'long' : {'required': False},\n 'lati' : {'required': False},\n 'free': {'write_only': True},\n }\n","sub_path":"hospital/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"422098271","text":"#https://www.urionlinejudge.com.br/judge/pt/problems/view/1045\n\nimport math\n\nA, B, C = input().split(' ')\nA = float(A)\nB = float(B)\nC = float(C)\n\nif B > A and B > C:\n aux_maior = B\n B = A\n A = aux_maior\n\nelif C > A and C > B:\n aux_maior = C\n C = A\n A = aux_maior\n\nif A >= (B + C):\n print(\"NAO FORMA TRIANGULO\")\n\nelif math.pow(A, 2) == math.pow(B, 2) + math.pow(C, 2):\n print(\"TRIANGULO RETANGULO\")\nelif math.pow(A, 2) > math.pow(B, 2) + math.pow(C, 2):\n print(\"TRIANGULO OBTUSANGULO\")\nelif math.pow(A, 2) < math.pow(B, 2) + math.pow(C, 2):\n print(\"TRIANGULO ACUTANGULO\")\n\nif A == B and B == C:\n print(\"TRIANGULO EQUILATERO\")\n\nelif (A == B and A and B != C) or (B == C and B and C != A) or (A == C and A and C != B):\n print(\"TRIANGULO ISOSCELES\")\n","sub_path":"solution_1045.py","file_name":"solution_1045.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"618574657","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport lightgbm as lgb\nimport gc\nfrom sklearn.metrics import f1_score\nimport joblib\n\ndef save_model(model, model_path):\n joblib.dump(model, model_path)\n\ndef load_model(model_path):\n return joblib.dump(model_path)\n\nclass LgbConfig(object):\n def __init__(self):\n self.params = {\n 'boosting_type': 'gbdt', # dart goss rf\n 'objective': 'binary', # 二分类\n 'metric':'binary_logloss', # 可以用auc\n 'num_leaves': 50,\n 'learning_rate': 0.01,\n 'bagging_fraction': 0.9,\n 'bagging_freq': 1,\n 'bagging_seed': 55,\n 'seed': 77,\n 'max_bin': 255,\n \n 'nthread': -1,\n 'max_depth': -1,\n 'verbose': 0\n }\n \n self.nfold = 5\n self.seed = 666\n self.num_boost_round = 5000\n self.early_stopping_rounds = 100\n self.verbose_eval = 100\n \n self.flag = 1 # metric 越小越好,如果-1代表越大越好\n self.min_merror = float('Inf')\n \n self.thres = 0.5\n\n\nclass SwordLgbClassifier(object):\n \"\"\"\n best lgb sword\n \"\"\"\n def __init__(self, config):\n self.config = config\n self.cv_score = 0\n self.best_rounds = 100\n\n def use_metric_auc(self, config):\n self.config.params['metric'] = 'auc'\n self.config.flag = -1\n self.config.min_merror *= -1\n self.model = None\n\n def get_best_thres(self, data, label, score_func = f1_score):\n \"\"\"\n score_func must have two params in order\n 1: true_label\n 2: pred_label\n \"\"\"\n pred_prob = self.model.predict(data)\n best_score = 0\n for i_thres in range(0, 100):\n pred_label = [int(i > (i_thres / 100.0)) for i in pred_prob]\n fs = score_func(label, pred_label)\n if best_score < fs:\n best_score = fs\n self.config.thres = i_thres / 100.0\n print ('best score: %0.2f best_thres: %0.2f' % (best_score, self.config.thres))\n\n def get_lgb_dataset(self, data, label, feature_name = \"auto\", categorical_feature = \"auto\", weight = None):\n return lgb.Dataset(data, label = label, feature_name = [], categorical_feature = [], weight = weight)\n\n def load_binary_dataset(self, filepath):\n return lgb.Dataset(filepath)\n\n def save_binary_dataset(self, data, filepath):\n \"\"\"\n eg: filepath = train.bin\n \"\"\"\n data.save_binary(filepath)\n\n def get_best_rounds_by_cv(self, lgbdata):\n cv_results = lgb.cv(\n params = self.config.params,\n train_set = lgbdata,\n seed = self.config.seed,\n nfold = self.config.nfold,\n num_boost_round = self.config.num_boost_round,\n early_stopping_rounds = self.config.early_stopping_rounds,\n verbose_eval = self.config.verbose_eval\n )\n if self.config.flag == -1:\n self.best_rounds = pd.Series(cv_results[self.config.params['metric'] + '-mean']).idxmax()\n self.cv_score = pd.Series(cv_results[self.params['metric'] + '-mean']).max()\n else:\n self.best_rounds = pd.Series(cv_results[self.config.params['metric'] + '-mean']).idxmin()\n self.cv_score = pd.Series(cv_results[self.config.params['metric'] + '-mean']).min()\n print (\"cv: best rounds:%d cv score:0.4f\" % (self.best_rounds, self.cv_score))\n\n def get_lgb_model(self, lgbdata):\n self.model = lgb.train(self.config.params, lgbdata, num_boost_round = self.best_rounds)\n return True\n \n def find_best_params(self, lgbdata, seed = 66, nfold = 5, early_stopping_rounds = 100):\n self.adj_leaves_depth(lgbdata, seed, nfold, early_stopping_rounds)\n self.adj_bin_leafdata(lgbdata, seed, nfold, early_stopping_rounds)\n self.adj_fraction(lgbdata, seed, nfold, early_stopping_rounds)\n self.adj_lambda(lgbdata, seed, nfold, early_stopping_rounds)\n self.adj_eta(lgbdata, seed, nfold, early_stopping_rounds)\n return True\n \n def adj_leaves_depth(self, lgbdata, seed = 66, nfold = 5, early_stopping_rounds = 100):\n best_params = {}\n for num_leaves in range(20,200,10):\n for max_depth in range(3,8,1):\n self.config.params['num_leaves'] = num_leaves\n self.config.params['max_depth'] = max_depth \n cv_results = lgb.cv(\n self.config.params,\n lgbdata,\n seed = seed,\n nfold = nfold,\n early_stopping_rounds = early_stopping_rounds,\n verbose_eval = 0\n )\n if self.config.flag == -1:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).max()\n else:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).min()\n\n if mean_merror * self.config.flag < self.config.min_merror * self.config.flag:\n self.config.min_merror = mean_merror\n best_params['num_leaves'] = num_leaves\n best_params['max_depth'] = max_depth\n self.config.params.update(best_params)\n \n def adj_bin_leafdata(self, lgbdata, seed = 66, nfold = 5, early_stopping_rounds = 100):\n best_params = {}\n for max_bin in range(100,255,10):\n for min_data_in_leaf in range(10,200,10):\n self.config.params['max_bin'] = max_bin\n self.config.params['min_data_in_leaf'] = min_data_in_leaf\n cv_results = lgb.cv(\n self.config.params,\n lgbdata,\n seed = seed,\n nfold = nfold,\n early_stopping_rounds = early_stopping_rounds,\n verbose_eval = 0\n )\n if self.config.flag == -1:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).max()\n else:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).min()\n\n if mean_merror * self.config.flag < self.config.min_merror * self.config.flag:\n self.config.min_merror = mean_merror\n best_params['max_bin']= max_bin\n best_params['min_data_in_leaf'] = min_data_in_leaf\n self.config.params.update(best_params)\n \n def adj_fraction(self, lgbdata, seed = 66, nfold = 5, early_stopping_rounds = 100):\n best_params = {}\n for feature_fraction in [0.6,0.7,0.8,0.9]:\n for bagging_fraction in [0.6,0.7,0.8,0.9]:\n self.config.params['feature_fraction'] = feature_fraction\n self.config.params['bagging_fraction'] = bagging_fraction\n cv_results = lgb.cv(\n self.config.params,\n lgbdata,\n seed = seed,\n nfold = nfold,\n early_stopping_rounds = early_stopping_rounds,\n verbose_eval = 0\n )\n if self.config.flag == -1:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).max()\n else:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).min()\n\n if mean_merror * self.config.flag < self.config.min_merror * self.config.flag:\n self.config.min_merror = mean_merror\n best_params['feature_fraction'] = feature_fraction\n best_params['bagging_fraction'] = bagging_fraction\n self.config.params.update(best_params)\n \n def adj_lambda(self, lgbdata, seed = 66, nfold = 5, early_stopping_rounds = 100):\n best_params = {}\n for lambda_l1 in [0.0,0.2,0.4,0.6,0.8,1.0]:\n for lambda_l2 in [0.0,0.2,0.4,0.6,0.8,1.0]:\n for min_split_gain in [0.0,1.0]:\n self.config.params['lambda_l1'] = lambda_l1\n self.config.params['lambda_l2'] = lambda_l2\n self.config.params['min_split_gain'] = min_split_gain\n cv_results = lgb.cv(\n self.config.params,\n lgbdata,\n seed = seed,\n nfold = nfold,\n early_stopping_rounds = early_stopping_rounds,\n verbose_eval = 0\n )\n if self.config.flag == -1:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).max()\n else:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).min()\n\n if mean_merror * self.config.flag < self.config.min_merror * self.config.flag:\n self.config.min_merror = mean_merror\n best_params['lambda_l1'] = lambda_l1\n best_params['lambda_l2'] = lambda_l2\n best_params['min_split_gain'] = min_split_gain\n self.config.params.update(best_params)\n \n def adj_eta(self, lgbdata, seed = 66, nfold = 5, early_stopping_rounds = 100):\n best_params = {}\n for eta in [0.01, 0.015, 0.025, 0.05, 0.1]:\n self.config.params['learning_rate'] = eta\n cv_results = lgb.cv(\n self.config.params,\n lgbdata,\n seed = seed,\n nfold = nfold,\n num_boost_round = self.config.num_boost_round,\n early_stopping_rounds = early_stopping_rounds,\n verbose_eval = 0\n )\n if self.config.flag == -1:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).max()\n else:\n mean_merror = pd.Series(cv_results[self.config.params['metric'] + '-mean']).min()\n\n if mean_merror * self.config.flag < self.config.min_merror * self.config.flag:\n self.config.min_merror = mean_merror\n best_params['learning_rate'] = eta\n self.config.params.update(best_params)\n\n\n","sub_path":"lightgbm/lgb_classify.py","file_name":"lgb_classify.py","file_ext":"py","file_size_in_byte":11134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"556610025","text":"from flask import abort\nfrom flask_restplus import fields\n\nfrom flask_utilities.fields import Email\n\n\nclass CustomValidator(object):\n @classmethod\n def validate_payload(cls, payload, api_model):\n for key in api_model:\n if api_model[key].required and key not in payload:\n abort(400, 'Required field \\'%s\\' missing' % key)\n # check payload\n for key in payload:\n field = api_model[key]\n if isinstance(field, fields.List):\n field = field.container\n data = payload[key]\n else:\n data = [payload[key]]\n if isinstance(field, Email) and hasattr(field, 'validate'):\n for i in data:\n if not field.validate(i):\n abort(400, 'Validation of \\'%s\\' field failed' % key)\n","sub_path":"flask_utilities/validators/custom_validator.py","file_name":"custom_validator.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92161603","text":"'''Invert CNN feature to reconstruct image: Reconstruct image from CNN features using gradient descent with momentum.\n\nAuthor: Ken SHIRAKAWA \n'''\nimport os\nfrom datetime import datetime\nfrom scipy.io import savemat\nimport numpy as np\nimport PIL.Image\nimport torch.optim as optim\nimport torch\nfrom loss import *\nfrom utils import img_deprocess, img_preprocess, normalise_img, \\\n vid_deprocess, normalise_vid, vid_preprocess, clip_extreme_value, get_cnn_features, create_feature_masks,\\\n save_video, save_gif, gaussian_blur, clip_small_norm_value\n\n\n\ndef reconstruct_stim(features, net, net_gen, \n img_mean=np.array((0, 0, 0)).astype(np.float32),\n img_std=np.array((1, 1, 1)).astype(np.float32),\n norm=255,\n bgr=False,\n layer_weight = None, \n channel= None, feature_masks = None, mask = None,\n feat_size_gen = (4096,),\n image_input_size=(1, 3, 224, 224), # for pytorch input\n img_size_gen = (1, 3, 256, 256),\n initial_gen_feat = None,\n feat_upper_bound = 100., feat_lower_bound=0.,\n loss_type='l2', iter_n = 200, loss_weight = None, \n lr_start = 2., lr_end=1e-10,\n momentum_start=0.9, momentum_end=0.9,\n decay_start=0.01, decay_end=0.01,\n disp_every=1,\n grad_normalize = True, # this is differ from original caffe model\n save_intermediate=False, save_intermediate_every=1,save_intermediate_path = None,\n return_gen_feat = False,\n opt_name='SGD'):\n if loss_type == \"l2\":\n loss_fun = torch.nn.MSELoss(reduction='sum')\n elif loss_type == \"L2_with_reg\":\n loss_fun = MSE_with_regulariztion(L_lambda=lamda, alpha=p, TV_lambda=TVlambda)\n elif loss_type == \"CorrLoss\":\n loss_fun = CorrLoss()\n elif loss_type == \"FeatCorrLoss\":\n loss_fun = FeatCorrLoss()\n elif loss_type == \"MSE_Corr_FeatCorr\":\n loss_fun = MergeLoss([torch.nn.MSELoss(reduction='sum'),CorrLoss(), FeatCorrLoss() ], weight_list = loss_weight)\n elif loss_type == \"MSE_FeatCorr\":\n loss_fun = MergeLoss([torch.nn.MSELoss(reduction='sum'), FeatCorrLoss() ], weight_list = loss_weight)\n elif loss_type == \"MSE_Corr\":\n loss_fun = MergeLoss([torch.nn.MSELoss(reduction='sum'), CorrLoss() ], weight_list = loss_weight)\n else:\n assert loss_type + ' is not correct'\n \n # make save dir\n if save_intermediate:\n if save_intermediate_path is None:\n save_intermediate_path = os.path.join('..', 'recon_img_by_dgn_icnn' + datetime.now().strftime('%Y%m%dT%H%M%S'))\n if not os.path.exists(save_intermediate_path):\n os.makedirs(save_intermediate_path)\n \n #initial feature\n\n if initial_gen_feat is None:\n initial_gen_feat = np.random.normal(0,1, feat_size_gen).astype(np.float32)\n\n if save_intermediate:\n save_name = 'initial_gen_feat.mat'\n savemat(os.path.join(save_intermediate_path, save_name), {'initial_gen_feat': initial_gen_feat})\n \n #image_size \n img_size = image_input_size \n\n #top left offset for cropping the output image to get match image size\n top_left = ((img_size_gen[2] - img_size[2])//2,\n (img_size_gen[3] -img_size[3])//2)\n \n # layer_list\n layer_dict = features\n layer_list = list(features.keys())\n\n # number of layers\n num_of_layer = len(layer_list)\n\n # layer weight\n if layer_weight is None:\n weights = np.ones(num_of_layer)\n weights = np.float32(weights)\n weights = weights / weights.sum()\n layer_weight = {}\n for j, layer in enumerate(layer_list):\n layer_weight[layer] = weights[j]\n \n # feature mask\n if feature_masks is None:\n feature_masks = create_feature_masks(layer_dict, masks=mask, channels=channel)\n \n \n #iteration for optimization\n feat_gen = initial_gen_feat.copy()\n\n\n loss_list = np.zeros(iter_n, dtype='float32')\n \n for t in range(iter_n):\n feat_gen = torch.tensor(feat_gen).requires_grad_()\n feat_gen.retain_grad()\n if feat_gen.grad is None:\n feat_gen.grad = torch.zeros_like(feat_gen).detach()\n\n #parameter \n # parameters\n lr = lr_start + t * (lr_end - lr_start) / iter_n\n momentum = momentum_start + t * (momentum_end - momentum_start) / iter_n\n decay = decay_start + t * (decay_end - decay_start) / iter_n\n\n\n if opt_name == 'Adam':\n #op = optim.Adam([input], lr = lr)\n op = optim.Adam([feat_gen], lr = lr)\n elif opt_name == 'SGD':\n op = optim.SGD([feat_gen], lr=lr, momentum=momentum)\n #op = optim.SGD([input], lr=lr)\n elif opt_name == 'Adadelta':\n op = optim.Adadelta([feat_gen],lr = lr)\n elif opt_name == 'Adagrad':\n op = optim.Adagrad([feat_gen], lr = lr)\n elif opt_name == 'AdamW':\n op = optim.AdamW([feat_gen], lr = lr)\n elif opt_name == 'SparseAdam':\n op = optim.SparseAdam([feat_gen], lr = lr)\n elif opt_name == 'Adamax':\n op = optim.Adamax([feat_gen], lr = lr)\n elif opt_name == 'ASGD':\n op = optim.ASGD([feat_gen], lr = lr)\n\n elif opt_name == 'RMSprop':\n op = optim.RMSprop([feat_gen], lr = lr)\n elif opt_name == 'Rprop':\n op = optim.Rprop([feat_gen], lr = lr)\n\n # forward for generator\n\n img0 = net_gen(feat_gen).requires_grad_()\n img0.retain_grad()\n\n final_gen_feat = feat_gen.clone() #keep featgen for return_gen_out\n \n \n if t==0: #and save_intermediate:\n if len(img_size) == 4:\n #image\n save_img = img_deprocess(img0[0].detach().numpy(), img_mean, img_std, norm)\n save_name = 'initial_image.jpg'\n if bgr:\n PIL.Image.fromarray(np.uint8(save_img[...,[2,1,0]])).save(os.path.join(save_intermediate_path, save_name))\n else:\n PIL.Image.fromarray(np.uint8(save_img)).save(os.path.join(save_intermediate_path, save_name))\n elif len(img_size) == 5:\n # video\n save_vid = vid_deprocess(img0[0].detach().numpy(), img_mean, img_std, norm)\n # if you install cv2 and ffmpeg, you can use save_video function which save preferred video as video format\n save_name = 'initial_video.avi'\n save_video(save_vid, save_name, save_intermediate_path, bgr)\n\n save_name = 'initial_video.gif'\n save_gif(save_vid, save_name, save_intermediate_path, bgr,\n fr_rate=150)\n\n else:\n print('Input size is not appropriate for save')\n assert len(input_size) not in [3,4]\n \n #image clip\n img_mask = np.zeros_like(img0.detach().numpy())\n img_mask[:,:, top_left[0]: top_left[0] + img_size[2], \n top_left[1]: top_left[1] + img_size[3]\n ] = 1\n input = torch.masked_select(img0, torch.FloatTensor(img_mask).bool()).view(img_size).requires_grad_()\n input.retain_grad()\n if input.grad is None:\n input.grad = torch.zeros_like(input).detach()\n \n # forward\n fw = get_cnn_features(net, input, features.keys())\n \n # backward for net\n err = 0.\n loss = 0.\n # set the grad of network to 0\n net.zero_grad()\n net_gen.zero_grad()\n op.zero_grad()\n for j in range(num_of_layer):\n\n # op.zero_grad()\n\n target_layer_id = num_of_layer -1 -j\n target_layer = layer_list[target_layer_id]\n # extract activation or mask at input true video, and mask\n act_j = fw[target_layer_id].clone()\n feat_j = features[target_layer].clone()\n mask_j = feature_masks[target_layer]\n\n layer_weight_j = layer_weight[target_layer]\n\n masked_act_j = torch.masked_select(act_j, torch.FloatTensor(mask_j).bool())\n masked_feat_j = torch.masked_select(feat_j, torch.FloatTensor(mask_j).bool())\n #if loss_type == 'FeatCorrLoss':\n masked_act_j = masked_act_j.view(act_j.shape)\n masked_feat_j = masked_feat_j.view(feat_j.shape)\n # calculate loss using pytorch loss function\n loss_j = loss_fun(masked_act_j, masked_feat_j) * layer_weight_j\n\n # backward the gradient to the video\n loss_j.backward(retain_graph=True)\n\n loss += loss_j.detach().numpy()\n if grad_normalize:\n\n grad_mean = torch.abs(feat_gen.grad).mean()\n\n if grad_mean > 0:\n feat_gen.grad /= grad_mean\n \n op.step()\n feat_gen = feat_gen.detach().numpy()\n err = err + loss\n loss_list[t] = loss\n \n #L2 decay\n feat_gen = (1 - decay) * feat_gen\n \n #clip feat\n if feat_lower_bound is not None:\n feat_gen = np.maximum(feat_gen, feat_lower_bound)\n if feat_upper_bound is not None:\n feat_gen = np.minimum(feat_gen, feat_upper_bound)\n \n \n # disp info\n if (t+1) % disp_every == 0:\n print('iter=%d; err=%g;' % (t+1, err))\n \n feat_gen = feat_gen.astype(np.float32)\n # save image\n if save_intermediate and ((t+1) % save_intermediate_every == 0):\n input_ = input.detach().numpy()[0]\n if bgr:\n input_ = input_[[2,1,0]]\n if len(input_) == 3:\n save_name = '%05d.jpg' % (t+1)\n PIL.Image.fromarray(normalise_img(img_deprocess(input_, img_mean, img_std, norm))).save(\n os.path.join(save_intermediate_path, save_name))\n else:\n save_stim = input_\n # if you install cv2 and ffmpeg, you can use save_video function which save preferred video as video format\n save_name = '%05d.avi' % (t + 1)\n save_video(normalise_vid(vid_deprocess(save_stim, img_mean, img_std, norm)), save_name,\n save_intermediate_path, bgr, fr_rate=30)\n save_name = '%05d.gif' % (t + 1)\n save_gif(normalise_vid(vid_deprocess(save_stim, img_mean, img_std, norm)), save_name,\n save_intermediate_path,\n bgr, fr_rate=150)\n \n # return img\n input = input.detach().numpy()[0]\n \n \n if len(input) == 3:\n return img_deprocess(input, img_mean, img_std, norm), loss_list\n else:\n return vid_deprocess(input, img_mean, img_std, norm), loss_list\n\n\n\n","sub_path":"icnn_torch/icnn_DGN.py","file_name":"icnn_DGN.py","file_ext":"py","file_size_in_byte":11054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"324499880","text":"import os\nimport time\nimport json\nimport operator\nfrom threading import Thread\n\nimport numpy as np\nimport pyaudio\nimport datetime\nimport math\n\ntry:\n import tflite_runtime.interpreter as tflite\nexcept:\n from tensorflow import lite as tflite\n\nfrom config import config as cfg\nfrom utils import audio\nfrom utils import image\nfrom utils import log\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nDET = {}\nFRAMES = np.array([], dtype='float32')\nINTERPRETER = None\nINPUT_LAYER_INDEX = -1\nOUTPUT_LAYER_INDEX = -1\n\ndef openStream(): \n\n try:\n\n # Setup pyaudio\n paudio = pyaudio.PyAudio()\n info = paudio.get_host_api_info_by_index(0)\n numdevices = info.get('deviceCount')\n for i in range(0, numdevices):\n if (paudio.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:\n print(\"Input Device id \", i, \" - \", paudio.get_device_info_by_host_api_device_index(0, i).get('name'))\n # Stream Settings\n stream = paudio.open(format=pyaudio.paFloat32,\n input_device_index=1,\n channels=1,\n rate=cfg['SAMPLE_RATE'],\n input=True,\n frames_per_buffer=cfg['SAMPLE_RATE'] // 2)\n\n return stream\n\n except:\n return None\n\ndef record():\n\n global FRAMES\n\n # Open stream\n stream = openStream()\n \n while not cfg['KILL_ALL']:\n\n try:\n\n # Read from stream\n data = stream.read(cfg['SAMPLE_RATE'] // 2)\n data = np.fromstring(data, 'float32');\n FRAMES = np.concatenate((FRAMES, data))\n\n # Truncate frame count\n FRAMES = FRAMES[-int(cfg['SAMPLE_RATE'] * cfg['SPEC_LENGTH']):]\n\n except KeyboardInterrupt:\n cfg['KILL_ALL'] = True\n break\n except:\n FRAMES = np.array([], dtype='float32')\n stream = openStream()\n continue\n\ndef loadModel(model_file, config_file):\n\n global INPUT_LAYER_INDEX\n global OUTPUT_LAYER_INDEX\n global MDATA_INPUT_INDEX\n global CLASSES\n\n # Load TFLite model and allocate tensors.\n #interpreter = tflite.Interpreter(model_path='model.tflite')\n interpreter = tflite.Interpreter(model_path='model.tflite')\n interpreter.allocate_tensors()\n\n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # Get input tensor index\n INPUT_LAYER_INDEX = input_details[0]['index']\n MDATA_INPUT_INDEX = input_details[1]['index']\n OUTPUT_LAYER_INDEX = output_details[0]['index']\n\n # Load labels\n CLASSES = []\n with open('labels.txt', 'r') as lfile:\n for line in lfile.readlines():\n CLASSES.append(line.replace('\\n', ''))\n\n return interpreter \n\ndef getSpeciesList():\n\n # Add selected species to white list\n cfg['WHITE_LIST'] = [# Species that have a sound file\n 'Sturnus vulgaris_European Starling',\n 'Delichon urbicum_Common House-Martin',\n 'Linaria cannabina_Eurasian Linnet',\n 'Ficedula hypoleuca_European Pied Flycatcher',\n 'Regulus regulus_Goldcrest',\n 'Emberiza citrinella_Yellowhammer',\n 'Cyanistes caeruleus_Eurasian Blue Tit',\n 'Phylloscopus collybita_Common Chiffchaff',\n 'Carduelis carduelis_European Goldfinch',\n # Additional species\n 'Parus major_Great Tit',\n 'Passer domesticus_House Sparrow',\n 'Erithacus rubecula_European Robin',\n 'Phoenicurus ochruros_Black Redstart',\n 'Fringilla coelebs_Common Chaffinch',\n 'Turdus merula_Eurasian Blackbird'\n ]\n\ndef convertMetadata(m):\n\n # Convert week to cosine\n if m[2] >= 1 and m[2] <= 48:\n m[2] = math.cos(math.radians(m[2] * 7.5)) + 1 \n else:\n m[2] = -1\n\n # Add binary mask\n mask = np.ones((3,))\n if m[0] == -1 or m[1] == -1:\n mask = np.zeros((3,))\n if m[2] == -1:\n mask[2] = 0.0\n\n return np.concatenate([m, mask])\n\ndef getInput(sig):\n\n if cfg['INPUT_TYPE'] == 'raw': \n\n # Prepare as input\n sample = audio.prepare(sig)\n\n else: \n spec = audio.getSpec(sig,\n rate=cfg['SAMPLE_RATE'],\n fmin=cfg['SPEC_FMIN'],\n fmax=cfg['SPEC_FMAX'],\n win_len=cfg['WIN_LEN'],\n spec_type=cfg['SPEC_TYPE'],\n magnitude_scale=cfg['MAGNITUDE_SCALE'],\n bandpass=True,\n shape=(64, 384))\n\n # DEBUG: Save spec?\n if cfg['DEBUG_MODE']:\n image.saveSpec(spec, os.path.join(cfg['LOG_DIR'], 'spec.jpg'))\n\n # Prepare as input\n sample = image.prepare(spec)\n\n return sample\n\ndef splitSignal(sig, rate, overlap, seconds=3.0, minlen=1.5):\n\n # Split signal with overlap\n sig_splits = []\n for i in range(0, len(sig), int((seconds - overlap) * rate)):\n split = sig[i:i + int(seconds * rate)]\n\n # End of signal?\n if len(split) < int(minlen * rate):\n break\n \n # Signal chunk too short? Fill with zeros.\n if len(split) < int(rate * seconds):\n temp = np.zeros((int(rate * seconds)))\n temp[:len(split)] = split\n split = temp\n \n sig_splits.append(split)\n\n return sig_splits\n\n\ndef flat_sigmoid(x, sensitivity=-1):\n return 1 / (1.0 + np.exp(sensitivity * x))\n\ndef predictionPooling(p, sensitivity=-1, mode='avg'):\n\n # Apply sigmoid function\n p = flat_sigmoid(p, sensitivity)\n\n # Mean exponential pooling for monophonic recordings\n if mode == 'mexp':\n p_pool = np.mean((p * 2.0) ** 2, axis=0)\n\n # Simple average pooling\n else: \n p_pool = np.mean(p, axis=0)\n \n p_pool[p_pool > 1.0] = 1.0\n\n return p_pool\n \n \ndef custom_sigmoid(x, sensitivity=1.0):\n return 1 / (1.0 + np.exp(-sensitivity * x))\n\ndef predict(sample, interpreter, sensitivity):\n\n # Make a prediction\n interpreter.set_tensor(INPUT_LAYER_INDEX, np.array(sample[0], dtype='float32'))\n interpreter.set_tensor(MDATA_INPUT_INDEX, np.array(sample[1], dtype='float32'))\n interpreter.invoke()\n prediction = interpreter.get_tensor(OUTPUT_LAYER_INDEX)[0]\n\n # Apply custom sigmoid\n p_sigmoid = custom_sigmoid(prediction, sensitivity)\n\n # Get label and scores for pooled predictions\n p_labels = dict(zip(CLASSES, p_sigmoid))\n\n # Sort by score\n p_sorted = sorted(p_labels.items(), key=operator.itemgetter(1), reverse=True)\n\n # Remove species that are on blacklist\n for i in range(min(10, len(p_sorted))):\n if p_sorted[i][0] in ['Human_Human', 'Non-bird_Non-bird', 'Noise_Noise']:\n p_sorted[i] = (p_sorted[i][0], 0.0)\n\n # Only return first the top ten results\n return p_sorted[:10]\n \n \ndef analyzeAudioData(chunks,interpreter):\n\n my_date = datetime.date.today() # current date\n year, week_num, day_of_week = my_date.isocalendar()\n detections = {}\n start = time.time()\n print('ANALYZING AUDIO...', end=' ', flush=True)\n\n # Convert and prepare metadata\n mdata = convertMetadata(np.array([52.379189, -4.899431, week_num]))\n mdata = np.expand_dims(mdata, 0)\n\n # Parse every chunk\n pred_start = 0.0\n for c in chunks:\n\n # Prepare as input signal\n sig = np.expand_dims(c, 0)\n\n # Make prediction\n p = predict([sig, mdata], interpreter, 1)\n\n # Save result and timestamp\n pred_end = pred_start + 3.0\n detections[str(pred_start) + ';' + str(pred_end)] = p\n pred_start = pred_end - 0.0\n\n print('DONE! Time', int((time.time() - start) * 10) / 10.0, 'SECONDS')\n\n return detections\n\ndef analyzeStream(interpreter):\n global FRAMES\n # Time\n start = time.time()\n\n # Get signal from FRAMES\n sig = FRAMES.copy()\n print('Sample length: ' + str(len(sig)) + '/' + str(int(cfg['SAMPLE_RATE'] * cfg['SPEC_LENGTH'])))\n # Do we have enough frames?\n if len(sig) < cfg['SAMPLE_RATE'] * cfg['SPEC_LENGTH']:\n return None\n \n # Split audio into 3-second chunks\n chunks = splitSignal(sig, cfg['SAMPLE_RATE'], 0.0)\n print('DONE! READ', str(len(chunks)), 'CHUNKS.')\n\n my_date = datetime.date.today() # current date\n year, week_num, day_of_week = my_date.isocalendar()\n detections = {}\n start = time.time()\n print('ANALYZING AUDIO...', end=' ', flush=True)\n\n # Convert and prepare metadata\n # mdata = convertMetadata(np.array([52.379189, -4.899431, week_num])) # amsterdam\n mdata = convertMetadata(np.array([51.6978, 5.3037, week_num])) # den bosch\n mdata = np.expand_dims(mdata, 0)\n\n # Parse every chunk\n pred_start = 0.0\n for c in chunks:\n # Prepare as input signal\n sig = np.expand_dims(c, 0)\n\n # Make prediction\n p = predict([sig, mdata], interpreter, 1)\n\n # Save result and timestamp\n pred_end = pred_start + 3.0\n detections[str(pred_start) + ';' + str(pred_end)] = p\n pred_start = pred_end - 0.0\n\n \n print('DONE! Time', int((time.time() - start) * 10) / 10.0, 'SECONDS')\n \n FRAMES = np.array([], dtype='float32')\n \n my_list = list()\n rcnt = 0\n for d in detections:\n for entry in detections[d]:\n if entry[1] >= 0.1:\n print(d + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]))\n my_list.append(d + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]))\n rcnt += 1\n #time.sleep(3)\n return my_list\n\ndef run():\n\n # Load model\n interpreter = loadModel(cfg['MODEL_PATH'], cfg['CONFIG_PATH'])\n\n # Load species list\n getSpeciesList()\n\n # Start recording\n log.p(('STARTING RECORDING WORKER'))\n recordWorker = Thread(target=record, args=())\n recordWorker.start()\n\n # Keep running...\n log.p(('STARTING ANALYSIS'))\n while not cfg['KILL_ALL']:\n\n try:\n #time.sleep(1)\n # Make prediction\n p = analyzeStream(interpreter)\n\n # Save results\n if not p == None:\n\n # Sleep if we are too fast\n if 'time_for_prediction' in p:\n if p['time_for_prediction'] < cfg['SPEC_LENGTH'] - cfg['SPEC_OVERLAP']:\n time.sleep((cfg['SPEC_LENGTH'] - cfg['SPEC_OVERLAP']) - (p['time_for_prediction']))\n\n else:\n time.sleep(1.0)\n\n except KeyboardInterrupt:\n cfg['KILL_ALL'] = True\n break\n #except:\n #cfg.KILL_ALL = True\n\n # Done\n log.p(('TERMINATED'))\n\n\nif __name__ == '__main__': \n \n run()\n ","sub_path":"recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":11118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"639235659","text":"from discord.ext.commands.cooldowns import BucketType, Cooldown, CooldownMapping\nfrom discord.ext.commands import Command\n\ndef cooldoown(rate, per, type=BucketType.default, premium: bool = False):\n def decorator(func):\n if isinstance(func, Command):\n func._buckets = CooldownMapping(Cooldown(rate, per, type))\n else:\n if not premium:\n func.__commands_cooldown__ = Cooldown(rate, per, type)\n else:\n func.__commands_cooldown__ = Cooldown(0, per, type)\n return func\n return decorator","sub_path":"bot/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"352969945","text":"import sys\n\nfrom PySide6.QtWidgets import QApplication, QMainWindow, QPushButton\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setWindowTitle(\"My App\")\n self.button = QPushButton(\"Press Me!\")\n self.button.clicked.connect(self.the_button_was_clicked)\n self.setCentralWidget(self.button)\n\n def the_button_was_clicked(self):\n self.button.setText(\"You already clicked me.\")\n self.button.setEnabled(False)\n\n self.setWindowTitle(\"My Oneshot App\")\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec())\n","sub_path":"src/pyside_demo/signals_and_slots_2.py","file_name":"signals_and_slots_2.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552959335","text":"import numpy as np\nimport tensorflow as tf\n\nfrom tensorflow import py_func\nfrom Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py\ndef lamda_variable(shape):\n initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16)\n return tf.get_variable(\"lamda\", shape,initializer=initializer, dtype=tf.float32)\n\ndef theta_variable(shape):\n initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16)\n return tf.get_variable(\"theta\", shape,initializer=initializer, dtype=tf.float32)\n\ndef generatingWeightMatrix(images, labels, epoch, division, batch):\n W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0]\n return W\n\ndef checkInformation(rep, epoch, s):\n X = py_func(checkInformation_py, [rep, epoch, s], [tf.float32])[0]\n return X\n\ndef weight_variable(shape):\n initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1)\n return tf.get_variable(\"weights\", shape,initializer=initializer, dtype=tf.float32)\n\ndef bias_variable(shape):\n initializer = tf.constant_initializer(0.0)\n return tf.get_variable(\"biases\", shape, initializer=initializer, dtype=tf.float32)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\nclass MNISTcnn(object):\n def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False):\n self.x = tf.reshape(x, shape=[-1, 28, 28, 1])\n self.x_re=tf.reshape(x_re,shape=[-1,conf.ngray,784])\n self.x_d=x_d\n self.y = y\n self.keep_prob = tf.placeholder(tf.float32)\n self.e=tf.placeholder(tf.float32)\n self.batch=tf.placeholder(tf.float32)\n #####################glgcm#########################\n\n with tf.variable_scope(\"fc0\"):\n W_fc1 = weight_variable([784, 32])\n b_fc1 = bias_variable([32])\n x_flat = tf.reshape(x, [-1, 784])\n glgcm_h_fc1 = tf.nn.relu(tf.matmul(x_flat, W_fc1) + b_fc1)\n # glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob)\n\n glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0)\n #####################################glgcm############################\n ######################################hex#############################\n # H = glgcm_h_fc1\n ######################################hex############################\n\n ######################################Sentiment######################\n # conv1\n with tf.variable_scope('hex'):\n with tf.variable_scope('conv1'):\n W_conv1 = weight_variable([5, 5, 1, 32])\n if conf.re==1:\n tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1))\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n\n # conv2\n with tf.variable_scope('conv2'):\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n\n # fc1\n with tf.variable_scope(\"fc1\"):\n shape = int(np.prod(h_pool2.get_shape()[1:]))\n W_fc1 = weight_variable([shape, 1024])\n b_fc1 = bias_variable([1024])\n h_pool2_flat = tf.reshape(h_pool2, [-1, shape])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # h_fc1 = checkInformation(h_fc1, self.e)\n\n h_fc1 = tf.nn.l2_normalize(h_fc1, 0)\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)\n\n\n yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1)\n #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1)\n\n pad=tf.zeros_like(glgcm_h_fc1, tf.float32)\n yconv_contact_pred=tf.concat([h_fc1_drop, pad],1)\n\n pad2 = tf.zeros_like(h_fc1, tf.float32)\n yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1)\n\n # fc2\n with tf.variable_scope(\"fc2\"):\n W_fc2 = weight_variable([1056, 7])\n b_fc2 = bias_variable([7])\n y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2\n y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2\n y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2\n ######################################Sentiment######################\n\n \"\"\"\n t_histo_rows = [\n tf.histogram_fixed_width(\n tf.gather(x, [row]),\n [0.0, 256.0], 100)\n for row in range(128)]\n\n H = tf.stack(t_histo_rows, axis=0)\n \"\"\"\n # H = y_conv_H\n\n sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss))\n if Hex_flag==False:\n if conf.re==1:\n tf.add_to_collection(\"losses\",sess_loss)\n self.loss = tf.add_n(tf.get_collection(\"losses\"))\n else:\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss))\n self.pred = tf.argmax(y_conv_pred, 1)\n\n # H = y_conv_H\n # H = tf.argmax(y_conv_H, 1)\n # y_H = tf.one_hot(H, depth=7)\n\n # y_conv_pred = checkInformation(y_conv_pred, self.e, 'hey')\n # H = checkInformation(H, self.e, 'ha')\n\n self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1))\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))\n if Hex_flag:\n # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10)\n\n # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch)\n\n y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0)\n y_conv_H = tf.nn.l2_normalize(y_conv_H, 0)\n\n y_conv_loss = y_conv_loss - tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss)\n\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss))\n\n # self.loss = tf.reduce_mean(tf.multiply(W, tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32)))\n\n # tf.stop_gradient(W)\n # if conf.re==1:\n # sess_loss = tf.matmul(tf.matmul(loss, W, transpose_a=True), loss)\n #\n # tf.add_to_collection(\"losses\",tf.reshape(sess_loss,[]))\n # self.loss = tf.add_n(tf.get_collection(\"losses\"))\n # else:\n # self.loss=tf.matmul(tf.matmul(loss, W, transpose_a=True), loss)","sub_path":"sentiment/cnn_mlp.py","file_name":"cnn_mlp.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"472048924","text":"\"\"\"\nThe algorithm will find prime factors of each of the numbers from\n1-20\n\nThen it will count the occurences of each factors occurence.\n\n30 = 2*3*5\n45 = 3*3*5\n\n2*3*3*5\n\n\"\"\"\nprimes = [2,3,5,7,11,13,17,19]\ndef GetPrimeFactorsation(num):\n factors = []\n for each in primes:\n while num%each==0 and num!=1:\n factors.append(each)\n num /= each\n return factors\n\n\nmaxfactorcount = [0]*21\n### So that it is indexable by actual numbers rather than - 1.\nmaxfactorcount[0] = 0\n\ndef GetDistinctValues(list):\n there = []\n distinct = []\n for each in list:\n if each not in there:\n distinct.append(each)\n there.append(each)\n return distinct\n\n\ndef SortOutCounter(list):\n list2 = GetDistinctValues(list)\n for each in list2:\n cout = list.count(each)\n if(maxfactorcount[each] < cout):\n maxfactorcount[each] = cout\n\n #Get distinct values\n #Get Count\n\n\nfor i in range(2,21):\n SortOutCounter(GetPrimeFactorsation(i))\n\nsum = 1\nfor x, y in enumerate(maxfactorcount):\n\n if(x != 0 and y!= 0):\n print(x, y)\n sum *= x**y\n\nprint(sum)\n\nprint(maxfactorcount)\n\n\n","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176346493","text":"# -*- coding: utf-8 -*-\n\"\"\"project settings\"\"\"\n\n# Copyright (C) 2015 ZetaOps Inc.\n#\n# This file is licensed under the GNU General Public License v3\n# (GPLv3). See LICENSE.txt for details.\n__author__ = 'Evren Esat Ozkan'\n\nimport os.path\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\n\n# path of the activity modules which will be invoked by workflow tasks\nACTIVITY_MODULES_IMPORT_PATH = 'ulakbus.activities'\n# absolute path to the workflow packages\nWORKFLOW_PACKAGES_PATH = os.path.join(BASE_DIR, 'workflows')\n\n#PYOKO SETTINGS\nRIAK_SERVER = os.environ.get('RIAK_SERVER')\nRIAK_PROTOCOL = os.environ.get('RIAK_PROTOCOL')\nRIAK_PORT = os.environ.get('RIAK_PORT')\nREDIS_SERVER = os.environ.get('REDIS_SERVER')\n","sub_path":"ulakbus/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382173582","text":"import turtle\n\ndef draw_square():\n # need a window\n window = turtle.Screen()\n window.bgcolor(\"yellow\")\n\n franklin = turtle.Turtle()\n \n franklin.shape(\"turtle\")\n franklin.color(\"red\")\n franklin.speed(5)\n\n move_turtle(4, franklin)\n\n window.exitonclick()\n\ndef move_turtle(move, t):\n for m in range(move):\n t.forward(200)\n t.right(90)\n\ndraw_square()\n","sub_path":"my_square.py","file_name":"my_square.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"483585682","text":"import os\nimport time\nimport datetime\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom openpyxl import load_workbook, Workbook\nfrom MyList import ID, PW\n\nbase_dir = os.path.dirname(os.path.realpath(__file__))\ncsv_file = base_dir+'/Node_Server_20180504.csv'\nnow = datetime.datetime.now()\nnowDate = now.strftime('%Y-%m-%d')\nnowYM = now.strftime('%Y%m')\nresult_file = base_dir+\"/opsdb_whatip_2018.xlsx\"\n\ndf = pd.read_csv(csv_file, usecols=['시스템코드','시스템코드명','Hostname', '접근제어 IPAddress','총 Core 수','총 메모리(GB)','사용유형','IDC','자산상태'])\nIP_addrs = df['접근제어 IPAddress']\n\n\ntry:\n # Whatip사이트에 접속하여 로그인 하기\n driver = webdriver.Chrome(base_dir+'/chromedriver')\n driver.implicitly_wait(3)\n driver.get('https://whatip.skplanet.com/login.php')\n elem = driver.find_element_by_xpath('.//input[@id=\"inputID\"]')\n elem.clear()\n elem.send_keys(ID)\n elem = driver.find_element_by_xpath('.//input[@id=\"inputPassword\"]')\n elem.clear()\n elem.send_keys(PW)\n elem = driver.find_element_by_xpath('.//button[@id=\"loginBtn\"]')\n elem.click()\n\n # 엑셀파일을 준비\n wb = Workbook()\n # 엑셀 시트 만들기\n ws = wb.create_sheet(nowDate)\n ws.append(['호스트명','IP addr','시스템코드명','IDC/Zone','OS','공인IP','사설IP','DBMS VIP','L4 VIP','Pub:In','Pub:Out','Pri: Out'])\n \n # wait = WebDriverWait(driver, 60)\n # cond = EC.text_to_be_present_in_element_value(By.LINK_TEXT, 'PMON')\n \n # IP addr 정보 조회하기\n cnt = 0\n for IP_addr in IP_addrs:\n if cnt > 4:\n break\n # Whatip사이트에 IP 질의하기\n elem = driver.find_element_by_xpath('.//input[@id=\"ip\"]')\n elem.clear()\n elem.send_keys(IP_addr)\n elem = driver.find_element_by_id('submitBtn').click()\n time.sleep(3)\n # just_wait = wait.until(cond)\n\n # 값 저장하기\n search_results = driver.find_elements_by_xpath('.//table/tbody/tr/td[@class=\"right-td\"]')\n result_list = []\n # 불필요한 값을 제거하자. (공백: 제거, /: 유지)\n for i in search_results:\n if i.text.find(\" \") == -1:\n result_list.append(i.text)\n elif i.text.find(\"/\") > -1:\n result_list.append(i.text)\n else:\n result_list.append(i.text.split(\" \")[0])\n print(result_list)\n ws.append(result_list)\n cnt += 1\n \n wb.save(filename=result_file)\n print(\">>> Result: OPSDB - Whatip 조회를 마쳤습니다.\")\n input()\nexcept Exception as e:\n print(e)\nfinally:\n driver.quit()","sub_path":"get_opsdb_data.py","file_name":"get_opsdb_data.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"8601841","text":"import datetime\nimport re\n\nimport Cons\nimport Conf\n\n\n\n_simulation_time_begin = None\n_simulation_time_end = None\n_simulated_time_begin = None\n_simulated_time_end = None\n\n\n# Get the simulation/simulated time begin/end by parsing a client log file with\n# the the same simulation time begin\ndef Init():\n\twith Cons.MT(\"Init Conf ...\", print_time=False):\n\t\tglobal _simulation_time_begin\n\t\tglobal _simulation_time_end\n\t\tglobal _simulated_time_begin\n\t\tglobal _simulated_time_end\n\n\t\tfn = \"%s/client/%s\" % (Conf.GetDir(\"log_dir\"), Conf.Get(\"simulation_time_begin\"))\n\t\twith open(fn) as fo:\n\t\t\tfor line in fo:\n\t\t\t\t#Cons.P(line)\n\t\t\t\t# simulation_time_end : 161227-162418.288\n\t\t\t\tmo = re.match(r\"# simulation_time_begin: (?P

\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d)\", line)\n\t\t\t\tif mo is not None:\n\t\t\t\t\t_simulation_time_begin = mo.group(\"dt\")\n\t\t\t\t\tif _simulation_time_begin != Conf.Get(\"simulation_time_begin\"):\n\t\t\t\t\t\traise RuntimeError(\"Unexpected\")\n\t\t\t\t\t_simulation_time_begin = datetime.datetime.strptime(_simulation_time_begin, \"%y%m%d-%H%M%S.%f\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tmo = re.match(r\"# simulation_time_end : (?P
\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d)\", line)\n\t\t\t\tif mo is not None:\n\t\t\t\t\t_simulation_time_end = mo.group(\"dt\")\n\t\t\t\t\t_simulation_time_end = datetime.datetime.strptime(_simulation_time_end, \"%y%m%d-%H%M%S.%f\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tmo = re.match(r\"# simulated_time_begin : (?P
\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d)\", line)\n\t\t\t\tif mo is not None:\n\t\t\t\t\t_simulated_time_begin = mo.group(\"dt\")\n\t\t\t\t\t_simulated_time_begin = datetime.datetime.strptime(_simulated_time_begin, \"%y%m%d-%H%M%S.%f\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tmo = re.match(r\"# simulated_time_end : (?P
\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d)\", line)\n\t\t\t\tif mo is not None:\n\t\t\t\t\t_simulated_time_end = mo.group(\"dt\")\n\t\t\t\t\t_simulated_time_end = datetime.datetime.strptime(_simulated_time_end, \"%y%m%d-%H%M%S.%f\")\n\t\t\t\t\tcontinue\n\n\t\tCons.P(\"simulation_time_begin: %s\" % _simulation_time_begin)\n\t\tCons.P(\"simulation_time_end : %s\" % _simulation_time_end)\n\t\tCons.P(\"simulated_time_begin : %s\" % _simulated_time_begin)\n\t\tCons.P(\"simulated_time_end : %s\" % _simulated_time_end)\n\n\n# s - _simulation_time_begin : _simulation_time_end - _simulation_time_begin = x - _simulated_time_begin : _simulated_time_end - _simulated_time_begin\n# x - _simulated_time_begin = (s - _simulation_time_begin) * (_simulated_time_end - _simulated_time_begin) / (_simulation_time_end - _simulation_time_begin)\n# x = (s - _simulation_time_begin) * (_simulated_time_end - _simulated_time_begin) / (_simulation_time_end - _simulation_time_begin) + _simulated_time_begin\ndef ToSimulatedTime(s):\n\treturn datetime.timedelta(seconds = ((s - _simulation_time_begin).total_seconds() \\\n\t\t\t* (_simulated_time_end - _simulated_time_begin).total_seconds() \\\n\t\t\t/ (_simulation_time_end - _simulation_time_begin).total_seconds())) \\\n\t\t\t+ _simulated_time_begin\n\n\ndef ToSimulatedTimeDur(simulation_time_dur):\n\treturn float(simulation_time_dur) \\\n\t\t\t/ (_simulation_time_end - _simulation_time_begin).total_seconds() \\\n\t\t\t* (_simulated_time_end - _simulated_time_begin).total_seconds()\n","sub_path":"rocksdb/quizup/analysis/not-used/plot-dstat/SimTime.py","file_name":"SimTime.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"420891084","text":"##################################################################\r\n# Admin class : Functionalities like adding trains , stations and \r\n# their schedules are done by this class\r\n#\r\n##################################################################\r\n\r\nimport csv\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\n\r\nclass admin_object:\r\n def __init__(self,database_cursor,connection):\r\n username = input(\" Please enter the admin credentials , username ? : \")\r\n secret_key = input(\" Please enter the admin credentials , secret_key ? : \")\r\n self.database_cursor = database_cursor\r\n self.connection = connection\r\n if username == \"root\" and secret_key == \"root\":\r\n print(\" Admin authorised successfully \")\r\n self.main_function()\r\n else:\r\n print(\" Invalid credentials for admin , exiting the program\") \r\n \r\n def main_function(self): \r\n print(\" Please find the functions that admin can perform \")\r\n typeof_function = input(\" 1 . Add a station , 2 . Add a train : \") \r\n if typeof_function == '1':\r\n self.add_station()\r\n elif typeof_function == '2':\r\n self.add_train()\r\n else:\r\n # go back to main function and try the process again for incorrect input entered\r\n print(\" Incorrect Code Entered ! \")\r\n \r\n admin_continuation = input(\" Do you want to continue with services of admin ? - Yes or No : \")\r\n if admin_continuation == \"Yes\":\r\n self.main_function()\r\n else:\r\n print(\" Thanks for using the services!\")\r\n \r\n # Ask user for a station as an input , check it's existence in station table and add it . \r\n def add_station(self):\r\n station_name_input = input(\" Please enter the station name to be added : \")\r\n sql_command_check_station_name = \" SELECT 1 FROM stations WHERE station_name = '\" + station_name_input + \"'\"\r\n self.database_cursor.execute(sql_command_check_station_name)\r\n check_station_name = self.database_cursor.fetchone() \r\n if check_station_name:\r\n print(\" Station Name : \" , station_name_input , \"already exists\")\r\n else:\r\n sql_command_insert_station_name = \" INSERT INTO stations(station_name) VALUES ('\"+station_name_input+\"')\"\r\n self.database_cursor.execute(sql_command_insert_station_name)\r\n self.connection.commit() \r\n print(\"Station Name :\" , station_name_input,\"added successfully\") \r\n \r\n # Add a train based on it's check in the db \r\n # Train name is checked for duplication and added \r\n def add_train(self):\r\n train_name_input = input(\" Please enter the train name to be added : \")\r\n sql_command_check_train_name = \" SELECT 1 FROM trains WHERE train_name = '\" + train_name_input + \"'\"\r\n self.database_cursor.execute(sql_command_check_train_name)\r\n check_train_name = self.database_cursor.fetchone() \r\n if check_train_name:\r\n print(\"Train name already exists!\")\r\n else:\r\n sql_command_insert_train_name = \" INSERT INTO trains(train_name) VALUES ('\"+train_name_input+\"')\" \r\n self.database_cursor.execute(sql_command_insert_train_name)\r\n self.connection.commit()\r\n print(\"Train Name :\" , train_name_input,\"added successfully\")\r\n self.prompt_schedule_csv_parse_and_add(train_name_input) \r\n \r\n # Add weekly availability for trains here \r\n # It requests a CSV in a specified format , which is parsed and update into schedule table \r\n def prompt_schedule_csv_parse_and_add(self,train_name_input):\r\n sql_command_get_train_id = \" SELECT train_id FROM trains WHERE train_name = '\" + train_name_input + \"'\"\r\n self.database_cursor.execute(sql_command_get_train_id)\r\n train_id = self.database_cursor.fetchone()\r\n num_seats_monday = input(\" Weekly Availability - Please enter the number of seats available for train on Monday : \")\r\n num_seats_tuesday = input(\" Weekly Availability - Please enter the number of seats available for train on Tuesday : \") \r\n num_seats_wednesday = input(\" Weekly Availability - Please enter the number of seats available for train on Wednesday : \")\r\n num_seats_thursday = input(\" Weekly Availability - Please enter the number of seats available for train on Thursday : \")\r\n num_seats_friday = input(\" Weekly Availability - Please enter the number of seats available for train on Friday : \")\r\n num_seats_saturday = input(\" Weekly Availability - Please enter the number of seats available for train on saturday : \")\r\n num_seats_sunday = input(\" Weekly Availability - Please enter the number of seats available for train on sunday : \")\r\n sql_command_insert_week_availability = \"INSERT INTO weekly_availability_seats(train_id,Mon,Tue,Wed,Thu,Fri,Sat,Sun) VALUES ('\"+str(train_id[0])+\"','\"+num_seats_monday+\"','\"+num_seats_tuesday+\"','\"+num_seats_wednesday+\"','\"+num_seats_thursday+\"','\"+num_seats_friday+\"','\"+num_seats_saturday+\"','\"+num_seats_sunday+\"')\";\r\n self.database_cursor.execute(sql_command_insert_week_availability)\r\n self.connection.commit()\r\n \r\n daily_schedule_csvfile = input(\" Please provide csv file that contains schedule for this train :\")\r\n with open(daily_schedule_csvfile, 'r') as csvfile: \r\n csvreader = csv.reader(csvfile)\r\n commit = 1\r\n for row in csvreader:\r\n station_name = row[0]\r\n time_of_arrival = row[1]\r\n time_of_departure = row[2]\r\n sequence_number = row[3]\r\n sql_command_get_station_id = \" SELECT station_id FROM stations WHERE station_name = '\" + station_name + \"'\"\r\n self.database_cursor.execute(sql_command_get_station_id)\r\n station_id_tuple = self.database_cursor.fetchone()\r\n if station_id_tuple:\r\n station_id = station_id_tuple[0]\r\n sql_command_insert_day_sched = \"INSERT INTO daily_station_train_schedule(station_id,train_id,time_of_arrival,time_of_departure,sequence_number) VALUES ('\"+str(station_id)+\"','\"+str(train_id[0])+\"','\"+time_of_arrival+\"','\"+time_of_departure+\"','\"+str(sequence_number)+\"')\";\r\n self.database_cursor.execute(sql_command_insert_day_sched)\r\n else:\r\n print(\" Incorrect station encountered in schedule : \" , station_id_tuple)\r\n commit = 0\r\n break \r\n if commit == 1:\r\n print(\" Schedule Updated Succesfully ! \" )\r\n self.connection.commit()\r\n \r\n ","sub_path":"userdefs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"456075446","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n lunaport.domain.chart\n ~~~~~~~~~~~~~~~~~~~~~\n Business logic layer for chart resource.\n\"\"\"\n\nimport pprint\nimport json\npp = pprint.PrettyPrinter(indent=4).pprint\n\n\nclass Chart(object):\n \"\"\"\n Object encapsulate whole charts data calculated for test ^ ammo_tag pair.\n \"\"\"\n attr_required = [\n 'test_id',\n 'ammo_tag',\n 'version',\n 'doc',\n ]\n attr_date = ['added_at']\n\n def __init__(self, **kw):\n for attr in self.attr_required:\n v = kw.get(attr)\n if not v:\n raise ValueError(\n '*{}* - required parameter missing.'.format(attr))\n setattr(self, attr, v)\n\n def as_dict(self, date_iso=False):\n retval = self.__dict__\n if date_iso: # datetime obj JSON serializable in ISO 8601 format.\n for attr in self.attr_date:\n if retval.get(attr):\n retval[attr] = retval[attr].isoformat()\n return retval\n\n def as_json(self):\n return json.dumps(self.as_dict(date_iso=True))\n\n\nclass ChartBuilder(object):\n \"\"\"\n Chart instance static builder.\n \"\"\"\n req_attr_allowed = [\n 'test_id',\n 'version',\n 'doc',\n ]\n req_attr_allowed_set = set(req_attr_allowed)\n\n @classmethod\n def ver_to_int(cls, version):\n \"\"\"\n Adopt string version format like '0.1.4' to int representation.\n \"\"\"\n return int(''.join(version.split('.')))\n\n @classmethod\n def from_Flask_req(cls, r, test_id, ammo_tag):\n \"\"\"\n Creates class instance from Flask request object.\n\n Args:\n r: Flask request object.\n\n Returns:\n Host class instance.\n \"\"\"\n if r.mimetype == 'multipart/form-data':\n msg_rv = r.form\n\n elif r.mimetype == 'application/json':\n msg_rv = r.json\n else:\n raise ValueError('Unsupported mime type')\n\n if not msg_rv:\n raise ValueError('Can\\'t deserialize request body')\n\n # ImmutableMultiDict to dict cast\n msg_rv = dict((k, v) for k, v in msg_rv.items())\n msg_set = set(msg_rv.keys())\n\n if not msg_set.issubset(cls.req_attr_allowed_set):\n err_msg = [\n 'Body contains unexpected params:',\n str(list(msg_set - cls.req_attr_allowed_set))\n ]\n raise ValueError(' '.join(err_msg))\n\n msg_rv.update({\n 'test_id': test_id,\n 'ammo_tag': ammo_tag\n })\n msg_rv['version'] = cls.ver_to_int(msg_rv['version'])\n return Chart(**msg_rv)\n\n @classmethod\n def from_row(cls, **row):\n \"\"\"\n Creates class instance from RDBMS returned row.\n\n Args:\n row: dict with table columns as keys.\n\n Returns:\n Host class instance.\n \"\"\"\n return Chart(**row)\n","sub_path":"lunaport_server/domain/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505093471","text":"\"\"\"Test the surface_io module.\"\"\"\nfrom collections import OrderedDict\nimport pytest\n\nimport fmu.dataio as fio\nimport fmu.dataio._utils as _utils\n\nCFG = OrderedDict()\nCFG[\"model\"] = {\"name\": \"Test\", \"revision\": \"21.0.0\"}\nCFG[\"masterdata\"] = {\n \"smda\": {\n \"country\": [\n {\"identifier\": \"Norway\", \"uuid\": \"ad214d85-8a1d-19da-e053-c918a4889309\"}\n ],\n \"discovery\": [{\"short_identifier\": \"abdcef\", \"uuid\": \"ghijk\"}],\n }\n}\n\n\n@pytest.mark.parametrize(\n \"name, tagname, t1, t2, loc, expectedstem, expectedpath\",\n [\n (\n \"some\",\n \"case1\",\n None,\n None,\n \"surface\",\n \"some--case1\",\n \"maps\",\n ),\n (\n \"some\",\n \"case2\",\n None,\n None,\n \"grid\",\n \"some--case2\",\n \"grids\",\n ),\n (\n \"some\",\n None,\n None,\n None,\n \"wrong\",\n \"some\",\n \"other\",\n ),\n (\n \"some\",\n None,\n 20200101,\n None,\n \"grid\",\n \"some--20200101\",\n \"grids\",\n ),\n (\n \"some\",\n \"case8\",\n 20200101,\n 20400909,\n \"grid\",\n \"some--case8--20400909_20200101\",\n \"grids\",\n ),\n (\n \"some.with.dots and some spaces\",\n \"case8\",\n 20200101,\n 20400909,\n \"grid\",\n \"some_with_dots_and_some_spaces--case8--20400909_20200101\",\n \"grids\",\n ),\n ],\n)\ndef test_utils_construct_filename(\n tmp_path, name, tagname, t1, t2, loc, expectedstem, expectedpath\n):\n \"\"\"Testing construct file.\"\"\"\n stem, dest = _utils.construct_filename(\n name, pretagname=None, tagname=tagname, loc=loc, t1=t1, t2=t2, outroot=tmp_path\n )\n\n assert stem == expectedstem\n assert dest.resolve() == (tmp_path / expectedpath).resolve()\n\n\ndef test_utils_verify_path():\n \"\"\"Testing veriy the path.\"\"\"\n ed = fio.ExportData(\n config=CFG,\n content=\"depth\",\n unit=\"m\",\n vertical_domain={\"depth\": \"msl\"},\n timedata=None,\n is_prediction=True,\n is_observation=False,\n tagname=\"any\",\n verbosity=\"DEBUG\",\n workflow=\"dummy\",\n )\n\n path, metapath, relpath, abspath = _utils.verify_path(\n ed,\n \"tmp/share/results\",\n \"somefile\",\n \".myext\",\n dryrun=True,\n )\n\n assert str(path).endswith(\"tmp/share/results/somefile.myext\")\n\n\ndef test_uuid_from_string():\n \"\"\"Testing that uuid from string is repeatable\"\"\"\n string1 = \"string1\"\n string2 = \"string2\"\n\n uuid1 = _utils.uuid_from_string(string1)\n uuid2 = _utils.uuid_from_string(string2)\n uuidx = _utils.uuid_from_string(string1)\n\n assert uuid1 != uuid2\n assert uuidx == uuid1\n\n\ndef test_parse_parameters_txt():\n \"\"\"Testing parsing of parameters.txt to JSON\"\"\"\n\n ptext = \"tests/data/drogon/ertrun1/realization-1/iter-0/parameters.txt\"\n\n res = _utils.read_parameters_txt(ptext)\n\n assert res[\"SENSNAME\"] == \"rms_seed\"\n assert res[\"GLOBVAR\"][\"VOLON_PERMH_CHANNEL\"] == 1100\n\n\ndef test_parse_parameters_txt_justified():\n \"\"\"Testing parsing of justified parameters.txt to JSON\"\"\"\n\n ptext = \"tests/data/drogon/ertrun1/realization-0/iter-0/parameters_justified.txt\"\n\n res = _utils.read_parameters_txt(ptext)\n\n assert res[\"SENSNAME\"] == \"rms_seed\"\n assert res[\"GLOBVAR\"][\"VOLON_PERMH_CHANNEL\"] == 1100\n assert res[\"LOG10_MULTREGT\"][\"MULT_VALYSAR_THERYS\"] == -3.2582\n","sub_path":"tests/test_fmu_dataio_utils.py","file_name":"test_fmu_dataio_utils.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"394072231","text":"from django.http import Http404\nimport globalVars\n\n\nclass CubieSrvMiddleware(object):\n # Check if client IP is allowed\n\n def process_request(self, request):\n allowed_ips = globalVars.djangoIPAuth\n ip = request.META.get('REMOTE_ADDR')\n if ip not in allowed_ips:\n msg = 'Peticion no autorizada de: ' + ip\n globalVars.toLogFile(msg)\n #globalVars.toFile(globalVars.sendFile, msg)\n raise Http404 # If user is not allowed raise Error\n\n # If IP is allowed we don't do anything\n return None\n","sub_path":"piWeb/middleware/cubieSrvMiddleware.py","file_name":"cubieSrvMiddleware.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"405210083","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 3 15:07:16 2020\n\n@author: Gerd Duscher\n\"\"\"\nimport unittest\nimport sys\n\nsys.path.append(\"../pyTEMlib/\")\nfrom pyTEMlib.microscope import Microscope\n\n\nclass TestPackageImport(unittest.TestCase):\n\n def test_microscope_init(self):\n tem = Microscope()\n self.assertTrue(isinstance(tem.name, str))\n\n def test_get_available_microscope_names(self):\n\n available_names = Microscope().get_available_microscope_names()\n print(available_names)\n self.assertTrue(isinstance(available_names, list))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_microscope.py","file_name":"test_microscope.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"213704961","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.optimize import curve_fit\r\nfrom pylab import figure, axes, pie, title, show\r\n\r\nx, y,z = np.loadtxt('Apparaturkonstante.txt', unpack=True, delimiter=',')\r\n\r\ndef f(x,a,b):\r\n return a*x+b\r\npopt, pcov = curve_fit(f, x, y)\r\n\r\nerrors = np.sqrt(np.diag(pcov))\r\nprint('a =', popt[0], '±', errors[0])\r\nprint('b =', popt[1], '±', errors[1])\r\n\r\nx_new = np.linspace(0.005 , 0.002 , 1000)\r\n\r\nplt.figure(1)\r\nplt.plot(x, y,'rx')\r\nplt.plot(x_new, f(x_new,*popt),'-', label='Linearer Fit')\r\n\r\nplt.ylabel('$D/U_{d}$ / $cm/V$')\r\nplt.xlabel('$1/U_{b}$ / $1/V$')\r\nplt.grid()\r\nplt.legend(loc='best')\r\n\r\n\r\nplt.savefig('Apparaturkonstante.pdf')\r\n","sub_path":"501 bzw 502/Apparaturkonstante.py","file_name":"Apparaturkonstante.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203153652","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nfrom pyspark import SparkConf, SparkContext\nimport sys\nimport operator\nimport re, string\nimport unicodedata\n\ninputs = sys.argv[1]\noutput = sys.argv[2]\n \nconf = SparkConf().setAppName('word count')\n#sc = SparkContext(conf=conf)\nsc = SparkContext.getOrCreate()\n#assert sys.version_info >= (3, 4) # make sure we have Python 3.5+\n#assert sc.version >= '2.2' # make sure we have Spark 2.2+\n \ndef words_once(line):\n wordsep = re.compile(r'[%s\\s]+' % re.escape(string.punctuation))\n for w in wordsep.split(line):\n if(w != \"\"):\n yield (unicodedata.normalize(\"NFD\", w.lower()), 1)\n \ndef get_key(kv):\n return kv[0]\n\ndef freq(kv):\n return kv[1]\n \ndef output_format(kv):\n k, v = kv\n return '%s %i' % (k, v)\n \ntext = sc.textFile(inputs)\nwords = text.flatMap(words_once)\nwordcount = words.reduceByKey(operator.add)\n#outdata = wordcount\nwordcount = wordcount.cache()\noutdata = wordcount.sortBy(get_key).map(output_format)\noutdata.saveAsTextFile(output+\"/by-word\")\n\n#statements to sort by frequency, will create a second output file\noutdata2 = wordcount.sortBy(get_key).sortBy(freq,False).map(output_format)\noutdata2.saveAsTextFile(output+\"/by-freq\")\n\nsc.stop()","sub_path":"2A/wordcount-improved.py","file_name":"wordcount-improved.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594962373","text":"\"\"\"metro_map_saver URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.urls import include, path, re_path\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.contrib.auth import views as auth_views\n\nfrom map_saver.views import MapDataView, MapDiffView, MapGalleryView, MapAdminActionView, MapSimilarView, MapsByDateView, ThumbnailGalleryView, HomeView, PublicGalleryView, CreatorNameMapView\n\nurlpatterns = [\n # Main page\n path('', HomeView.as_view(), name='home'),\n\n # Public Gallery\n path('gallery/', PublicGalleryView.as_view(), name='public_gallery'),\n\n # End-user actions (saving, loading, naming maps)\n path('save/', MapDataView.as_view(), name='save_map'),\n path('load/', MapDataView.as_view(), name='load_map'),\n path('name/', CreatorNameMapView.as_view(), name='name_map'),\n\n # Admin and Moderation\n path('admin/', admin.site.urls),\n path('accounts/login/', auth_views.LoginView.as_view()),\n\n # Admin Gallery\n re_path(r'admin/gallery/(?P[\\w^\\d]+)?/?$', MapGalleryView.as_view(), name='admin_gallery'),\n\n # Admin Gallery: Admin actions\n path('admin/action/', MapAdminActionView.as_view(), name='admin_action'),\n\n # Admin Gallery: Similar\n path('admin/similar/', MapSimilarView.as_view(), name='similar'),\n\n # Admin Gallery: Direct View\n path('admin/direct/', MapGalleryView.as_view(), name='direct'),\n\n # Admin Gallery: Diff\n path('admin/diff///', MapDiffView.as_view(), name='diff'),\n\n # Admin: Maps created by date\n path('admin/bydate/', MapsByDateView.as_view(), name='by_date'),\n\n # Thumbnails\n path('admin/thumbnail//', ThumbnailGalleryView.as_view(), name='thumbnail_tag'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n","sub_path":"metro_map_saver/metro_map_saver/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"71789165","text":"from random import *\r\nfrom math import *\r\nfrom csv import *\r\n\r\nCLUSTERS=[]\r\nDISTANCE_METRIC=[]\r\ndef load_csv_file(filename):\r\n\tDATASET = list()\r\n\twith open(filename, 'r') as f:\r\n\t\tcr = reader(f)\r\n\t\tfor row in cr:\r\n\t\t\tif not row:\r\n\t\t\t\tcontinue\r\n\t\t\tDATASET.append(row)\r\n\treturn DATASET\r\n\r\ndef classLabelToInt(dataset, column):\r\n class_values = [row[column] for row in dataset] #all class labels\r\n unique = set(class_values) #all distinct class labels\r\n lookup = dict()\r\n for i, value in enumerate(unique):\r\n lookup[value] = i\r\n for row in dataset:\r\n row[column] = lookup[row[column]] # assign distinct integers starting from 0 to noOfDistinct Values\r\n return lookup\r\n\r\ndef stringToFloat(DATASET, column):\r\n\tfor row in DATASET:\r\n\t\trow[column] = float(row[column].strip())\r\n\r\ndef step1_form_initial_cluster_center(DATASET,k,length):\r\n ran=[]\r\n for i in range(k):\r\n r=randrange(0,length)\r\n ran.append(r)\r\n #ran=[0,3,6]\r\n cluster=[DATASET[k] for k in ran]\r\n print(\"Initial Cluster Center\")\r\n for j in cluster:\r\n print(j)\r\n return cluster\r\n\r\ndef euclidianDistance(x,y):\r\n length=len(x)\r\n total=0\r\n for i in range(length):\r\n diff=x[i]-y[i]\r\n diff=abs(diff)\r\n part=diff**2\r\n total+=part\r\n DISTANCE_METRIC=total**(1/2)\r\n return DISTANCE_METRIC\r\n\r\ndef find_the_index_in_list(number,list1):\r\n for i in range(len(list1)):\r\n if(list1[i]==number):\r\n return i\r\n\r\ndef step2_find_distance_between_cluster_point_and_datapoint(DATASET,clusterpoints):\r\n length=len(clusterpoints)\r\n lengthDataset=len(DATASET)\r\n clusterSet=[]\r\n distanceVector=[]\r\n for i in range(lengthDataset):\r\n dist=[]\r\n for j in clusterpoints:\r\n d=euclidianDistance(j,DATASET[i])\r\n dist.append(d)\r\n distanceVector.append(dist)\r\n clusterNumber=find_the_index_in_list(min(dist),dist)+1\r\n #print(clusterNumber)\r\n clusterSet.append(clusterNumber)\r\n global CLUSTERS\r\n CLUSTERS.append(clusterSet)\r\n global DISTANCE_METRIC\r\n DISTANCE_METRIC.append(distanceVector)\r\n\r\ndef find_mean_of_values(a):\r\n lengthmain=len(a)\r\n lengthsub=len(a[0])\r\n output=[]\r\n for i in range(lengthsub):\r\n sums=0\r\n for j in range(lengthmain):\r\n sums+=a[j][i]\r\n avg=sums/lengthmain\r\n output.append(avg)\r\n #print(output)\r\n return output\r\n\r\ndef step3_get_new_cluster_points(DATASET,iterationNumber,length):\r\n global CLUSTERS\r\n newCenter=[]\r\n clustertobeconsidered=CLUSTERS[iterationNumber]\r\n partition={}\r\n for i in range(len(clustertobeconsidered)):\r\n if(clustertobeconsidered[i] not in partition):\r\n partition[clustertobeconsidered[i]]=[]\r\n partition[clustertobeconsidered[i]].append(DATASET[i])\r\n for key,value in partition.items():\r\n #print(\"key=\",key,\"value\",value)\r\n newone=find_mean_of_values(value)\r\n newCenter.append(newone)\r\n return newCenter\r\n\r\ndef main():\r\n #DATASET=[[2,10],[2,5],[8,4],[5,8],[7,5],[6,4],[1,2],[4,9]]\r\n filename = 'C:/Users/Deepak Acharya/Pictures/SPECTF_New.csv'\r\n DATASET=load_csv_file(filename)\r\n classLabelFile='C:/Users/Deepak Acharya/Desktop/SPECTF_New_CL.csv'\r\n ds=load_csv_file(classLabelFile)\r\n\r\n classlabelarray=classLabelToInt(ds,0);\r\n print(\"Printing Ds\")\r\n print(ds)\r\n kvalue=2\r\n for i in range(len(DATASET[0])):\r\n stringToFloat(DATASET, i)\r\n clusterpoints=step1_form_initial_cluster_center(DATASET,kvalue,len(DATASET))\r\n c=0\r\n oldone=-1\r\n newone=+1\r\n while(oldone != newone):\r\n oldone=clusterpoints\r\n step2_find_distance_between_cluster_point_and_datapoint(DATASET,clusterpoints)\r\n newclusterpoint=step3_get_new_cluster_points(DATASET,c,len(clusterpoints))\r\n clusterpoints=newclusterpoint\r\n newone=newclusterpoint\r\n c+=1;\r\n k=0\r\n finalCluster=[]\r\n print(\"Number Of Ierations Took=\",c)\r\n for i in CLUSTERS:\r\n if((k==(c-1)) or (k==(c-2))):\r\n finalCluster=i\r\n k+=1\r\n print(finalCluster)\r\n whichpointbelongstowhichcluster=[[] for i in range(kvalue)]\r\n\r\n for i in range(len(finalCluster)):\r\n whichpointbelongstowhichcluster[finalCluster[i]-1].append(i+1)\r\n\r\n totalCount=[0 for i in range(kvalue)]\r\n countArray=[0 for i in range(kvalue)]\r\n for i in range(len(whichpointbelongstowhichcluster)):\r\n for j in whichpointbelongstowhichcluster[i]:\r\n #print(j,j-1,ds[j-1][0],i)\r\n if(ds[j-1][0]==i):\r\n countArray[i]+=1\r\n totalCount[i]+=1\r\n print(totalCount)\r\n print(countArray)\r\n\r\n correct=[]\r\n\r\n for i in range(kvalue):\r\n partAccuracy=countArray[i]/totalCount[i]*100\r\n correct.append(partAccuracy)\r\n\r\n sums=0\r\n for i in range(kvalue):\r\n sums+=correct[i]\r\n\r\n print(\"Accuracy=%.2f\"%(sums/kvalue))\r\n\r\n\r\nmain()\r\n","sub_path":"calest/kmeansclustering.py","file_name":"kmeansclustering.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"56144114","text":"import sqlite3\r\nimport os\r\n\r\n\r\nclass DatabaseManager(object):\r\n\r\n def __init__(self):\r\n self.db_path = os.path.join(os.path.split(os.getcwd())[0], \"DATABASE\", \"data.db\")\r\n\r\n self.conn = None\r\n\r\n def connect(self):\r\n self.conn = sqlite3.connect(self.db_path)\r\n\r\n def build_db(self):\r\n self.conn.execute(\"DROP TABLE IF EXISTS user\")\r\n self.conn.execute(\"\"\"\r\n CREATE TABLE user (\r\n id TEXT PRIMARY KEY,\r\n name TEXT NOT NULL,\r\n email TEXT UNIQUE NOT NULL,\r\n profile_pic TEXT NOT NULL\r\n )\"\"\")\r\n self.conn.commit()\r\n\r\n def get_db(self):\r\n if self.conn is None:\r\n self.connect()\r\n return self.conn\r\n\r\n def close_db(self):\r\n if self.conn is not None:\r\n self.conn.close()\r\n\r\nx = DatabaseManager()\r\nx.connect()\r\nx.build_db()\r\n","sub_path":"assignment-13a/SSO_Software_Engineering/DatabaseWorkers/DatabaseManagerClass.py","file_name":"DatabaseManagerClass.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"597860609","text":"\nimport string\n\ndef main():\n\n punctuation = string.punctuation\n words = {}\n book = open('book.txt', 'r').read()\n # book.close()\n with open('book.txt', 'r') as f:\n for thing in punctuation:\n book = book.replace(thing, '')\n\n book = book.lower()\n\n book = book.replace(' ', '')\n \n book.split()\n\n dictionary = {}\n for item in book:\n if item in dictionary:\n dictionary[item] += 1\n else:\n dictionary.update({item:1})\n \n words = list(dictionary.items()) # .items() returns a list of tuples\n words.sort(key=lambda tup: tup[1], reverse=True) # sort largest to smallest, based on count\n for i in range(min(10, len(words))): # print the top 10 words, or all of them, whichever is smaller\n print(words[i])\n\n # print(book)\n # for thing in punctuation:\n # print(thing)\n # book = str(f.read()).replace(thing, '')\n # # book = f.read()\n # print(str(f.read()))\n \n\n\n\nmain()","sub_path":"Code/Tyler/21-count_words.py","file_name":"21-count_words.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"266668439","text":"from rest_framework import serializers\n\nfrom post.models import Posts\n\n\nclass PostSerializer(serializers.ModelSerializer):\n class Meta:\n model = Posts\n fields = ('id', 'title', 'body')\n extra_kwargs = {\n 'url': {\n 'view_name': 'posts:post-detail',\n }\n }\n","sub_path":"post/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"27315360","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nfrom .Answer import Answer\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport random\nimport time\n\nfrom .OfficineUpdater import OfficineUpdater\nfrom .ContextMessageManager import ContextMessage,ContextMessageManager,ContextCode,GIPHY\n\nclass PharmaGardeAnswer(Answer):\n\t\"\"\"\n\tpour les reponses demandant les pharmacies de garde\n\t\"\"\"\n\tdef __init__(self):\n\t\tsuper().__init__([\"getPharmaGarde\"])\n\t\tself.scrappingUrl = \"https://www.abidjan.net/inc/abidjan/inc_pharmacie.js\"\n\n\n\tdef process(self,e,options:dict=None) -> str:\n\n\t\tsender_psid = options[\"sender_psid\"]\n\t\tmanager = ContextMessageManager(user_id=sender_psid)\n\n\n\t\t# on cherche les lieux\n\t\tcommunes:list = []\n\t\tquartiers:list = []\n\t\tpharmacies:list = []\n\t\t\n\t\tif \"quartier\" in e[\"entities\"]:\n\t\t\tquartiers = [i[\"value\"].strip().upper() for i in e[\"entities\"][\"quartier\"] if i[\"confidence\"] > 0.5 and \"suggested\" not in i]\n\n\t\tif \"Commune\" in e[\"entities\"]:\n\t\t\tcommunes = [i[\"value\"].strip().upper() for i in e[\"entities\"][\"Commune\"] if i[\"confidence\"] > 0.5 and \"suggested\" not in i]\n\n\t\tif \"pharmaName\" in e[\"entities\"]:\n\t\t\tpharmacies = [i[\"value\"] for i in e[\"entities\"][\"pharmaName\"] if i[\"confidence\"] > 0.5]\n\n\n\t\tanswer:str = \"\"\n\t\t\n\n\t\tif len(quartiers) == 0 and len(communes) == 0 and len(pharmacies) == 0:\n\n\t\t\tm = [\n\t\t\t\t\"pour gagner du temps 🏃‍♂️ indiques stp une localité que je couvre.\",\n\t\t\t\t\"si tu veux gagner du temps 🏃‍♂️ indiques stp une localité que je couvre\",\n\t\t\t\t\"🚶il est preferable de selectionner une localité que je couvre\",\n\t\t\t\t\"🚶 il est souhaitable de selectionner une localité que je couvre\",\n\t\t\t\t\"🚶 je pense que tu devrais indiquer une localité que je couvre \"\n\t\t\t]\n\t\t\tresp:dict = {\"text\":random.choice(m)}\n\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\t\t\t# un git du style j'attend votre reponse\n\t\t\tresp:dict = {\n\t\t\t\t\"attachment\": {\n\t \t\"type\": \"image\",\n\t \"payload\": {\n\t \"attachment_id\": random.choice(GIPHY.WAITING),\n\t }\n\t\t\t\t}\n\t\t\t}\n\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\t\t\tmanager.process()\n\t\telse:\n\n\t\t\tparams:dict = {\"quartier\":quartiers,\"commune\":communes,\"pharmacie\":pharmacies}\n\n\t\t\t# on met a jour la localité demandée\n\t\t\tcurrentLocation = None\n\t\t\tcurrentLocationType = None\n\t\t\tcurrentPharmacie = None\n\t\t\tif len(quartiers):\n\t\t\t\tcurrentLocation = quartiers[0]\n\t\t\t\tcurrentLocationType = \"quartier\"\n\t\t\t\tmanager.currentLocation = currentLocation\n\t\t\t\tmanager._user.currentLocation = currentLocation\n\t\t\t\tmanager.currentLocationType = currentLocationType\n\t\t\t\tmanager.save({\"currentLocation\":currentLocation,\"currentLocationType\":currentLocationType})\n\t\t\telif len(communes):\n\t\t\t\tcurrentLocation = communes[0]\n\t\t\t\tcurrentLocationType = \"commune\"\n\t\t\t\tmanager.currentLocation = currentLocation\n\t\t\t\tmanager._user.currentLocation = currentLocation\n\t\t\t\tmanager.currentLocationType = currentLocationType\n\t\t\t\tmanager.save({\"currentLocation\":currentLocation,\"currentLocationType\":currentLocationType})\n\n\t\t\tif len(pharmacies):\n\t\t\t\tcurrentPharmacie = pharmacies[0]\n\t\t\t\tmanager.currentPharmacie = currentPharmacie\n\t\t\t\tmanager._user.currentPharmacie = currentPharmacie\n\t\t\t\tmanager.save({\"currentPharmacie\":currentPharmacie})\n\n\n\n\t\t\tif len(pharmacies) == 0:\n\t\t\t\n\t\t\t\ttext = 'Tu as dit \"{}\"'.format(currentLocation.title())\n\t\t\t\tif manager._user.currentZone:\n\t\t\t\t\tif manager._user.currentZone == 1:\n\t\t\t\t\t\ttext = text + \", dans la zone d'abidjan\"\n\t\t\t\t\telse:\n\t\t\t\t\t\ttext = text + \", à l'intérieur du pays\"\n\n\t\t\t\tresp:dict = {\"text\":text}\n\t\t\t\tctx = ContextMessage(message=resp,code=ContextCode.VERBOSE)\n\t\t\t\tmanager.addItem(ctx)\n\t\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\t\t\telse:\n\n\t\t\t\ttext = 'Tu as dit la \"{}\"'.format(currentPharmacie.title())\n\n\t\t\t\tif manager._user.currentLocation:\n\t\t\t\t\ttext = text + \", à {}\".format(manager._user.currentLocation.title())\n\n\n\t\t\t\tresp:dict = {\"text\":text}\n\t\t\t\tctx = ContextMessage(message=resp,code=ContextCode.VERBOSE)\n\t\t\t\tmanager.addItem(ctx)\n\t\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\n\t\t\tm = [\n\t\t\t\t\"un instant que je cherche un peu 🏃‍♂️\",\n\t\t\t\t\"je te reviens tres vite 🏃‍♂️\",\n\t\t\t\t\"laisses moi voir stp 🤨\",\n\t\t\t\t\"voyons voir ce que j'ai là 🤔\",\n\t\t\t]\n\t\t\tresp:dict = {\"text\":random.choice(m)}\n\t\t\tctx = ContextMessage(message=resp,code=ContextCode.VERBOSE)\n\t\t\tmanager.addItem(ctx)\n\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\t\t\t# un git du style j'y travail en ce moment\n\t\t\tresp:dict = {\n\t\t\t\t\"attachment\": {\n\t \t\"type\": \"image\",\n\t \"payload\": {\n\t \"attachment_id\": random.choice(GIPHY.TYPING),\n\t }\n\t\t\t\t}\n\t\t\t}\n\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\t\t\t#time.sleep(random.choice([2,3,1]))\n\n\t\t\tofm = OfficineUpdater()\n\t\t\tr = ofm.search(**params)\n\n\t\t\tif len(r[\"data\"]):\n\n\t\t\t\tmanager.searchSuccess = manager.searchSuccess + 1\n\t\t\t\tmanager.save({\"searchSuccess\":manager.searchSuccess})\n\n\n\t\t\t\t# on met a jour la question qui a appellé cette reponse\n\t\t\t\targs = {\"answered\":True,\"required\":False}\n\t\t\t\tmanager.updateItem(ContextCode.ASK_LOCALITY,args)\n\t\t\t\t\n\n\t\t\t\tif len(pharmacies):\n\t\t\t\t\t# recherche d'une pharmacie specique\n\t\t\t\t\titem = r[\"data\"][0]\n\t\t\t\t\tname = \"{}\".format(item['name'].replace(\"Phcie\",\"Pharmacie\"))\n\t\t\t\t\tif item['address']:\n\t\t\t\t\t\ttext = \"Tu peux joindre la {} au numéro suivant:\\r\\n{}\".format(name,item['address'])\n\t\t\t\t\t\tresp:dict = {\"text\":text}\n\t\t\t\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\t\t\t\t\tif item['description']:\n\t\t\t\t\t\ttext = \"📍 situation géographique: {}\".format(item['description'])\n\t\t\t\t\t\tresp:dict = {\"text\":text}\n\t\t\t\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\n\t\t\t\t\t# if manager.check_if_user_subscribe_to_current_pharmacy_alert() == False:\t\n\t\t\t\t\t# \tm = {\n\t\t\t\t\t# \t\t\"nlp\":{},\n\t\t\t\t\t# \t\t\"quick_reply\":{\n\t\t\t\t\t# \t\t\t\"payload\":\"PHARMACY_ALERT_SUBSCRIPTION\"\n\t\t\t\t\t# \t\t},\n\t\t\t\t\t# \t}\n\t\t\t\t\t# \tmanager.handle_quick_reply(m)\n\t\t\t\t\t# else:\n\t\t\t\t\tm = {\n\t\t\t\t\t\t\"nlp\":{},\n\t\t\t\t\t\t\"quick_reply\":{\n\t\t\t\t\t\t\t\"payload\":\"ASK_PHARMACY_DETAILS\"\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"insta\":2\n\t\t\t\t\t}\n\t\t\t\t\tmanager.handle_quick_reply(m)\n\n\t\t\t\t\tmanager.saveUserSearch(\"pharmacy\")\n\n\n\t\t\t\telse:\n\t\t\t\t\tmanager.oldDataSearch = r\n\t\t\t\t\tmanager.save({\"oldDataSearch\":r})\n\t\t\t\t\tm = [\n\t\t\t\t\t\t\"🗓 la periode en cours est\",\n\t\t\t\t\t\t\"🗓 la periode du tour de garde est\"\n\t\t\t\t\t]\n\t\t\t\t\tprefix = random.choice(m)\n\t\t\t\t\ttext = prefix + \" la \" + r[\"period\"].lower().capitalize()\n\t\t\t\t\tresp:dict = {\"text\":text}\n\t\t\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\n\t\t\t\t\tfor item in r[\"data\"]:\n\t\t\t\t\t\ttext = \"🏫 {}\".format(item['name'].replace(\"Phcie\",\"Pharmacie\"))\n\t\t\t\t\t\tif item['address']:\n\t\t\t\t\t\t\ttext = text + \"\\r\\n{}\".format(item['address'])\n\n\t\t\t\t\t\tif len(pharmacies):\n\t\t\t\t\t\t\ttext = text + \"\\r\\n\\r\\n{description}\".format(description=item['description'])\n\n\t\t\t\t\t\tresp:dict = {\"text\":text}\n\t\t\t\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\t\t\t\t\tmanager.saveUserSearch(\"normal\")\n\t\t\t\t\tmanager.saveGardePeriodView()\n\n\n\t\t\t\t\tif manager.check_if_user_subscribe_to_current_locality_alert() == False:\n\n\t\t\t\t\t\tm = {\n\t\t\t\t\t\t\t\"nlp\":{},\n\t\t\t\t\t\t\t\"quick_reply\":{\n\t\t\t\t\t\t\t\t\"payload\":\"LOCALITY_ALERT_SUBSCRIPTION\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmanager.handle_quick_reply(m)\n\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tm = {\n\t\t\t\t\t\t\t\"nlp\":{},\n\t\t\t\t\t\t\t\"quick_reply\":{\n\t\t\t\t\t\t\t\t\"payload\":\"ASK_PHARMACY_DETAILS\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"insta\":1\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmanager.handle_quick_reply(m)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tmanager.searchFails = manager.searchFails + 1\n\t\t\t\tmanager.save({\"searchFails\":manager.searchFails})\n\n\t\t\t\tif len(pharmacies):\n\t\t\t\t\ttext = \"Aïe.. il semble la {} n'est pas dans le tour des gardes cette semaine\".format(pharmacies[0])\n\t\t\t\t\tresp:dict = {\"text\":text}\n\t\t\t\t\tself.fbsend.sendMessage(sender_psid,resp)\n\n\t\t\t\t\t\n\n\t\t\t\t\t\n\t\t\t\tm = {\n\t\t\t\t\t\"nlp\":{},\n\t\t\t\t\t\"quick_reply\":{\n\t\t\t\t\t\t\"payload\":\"NEW_SEARCH\"\n\t\t\t\t\t},\n\t\t\t\t\t\"insta\":2\n\t\t\t\t}\n\t\t\t\tmanager.handle_quick_reply(m)\n\t\t\t\n\t\treturn answer\n\n\n","sub_path":"pharmaapp/PharmaGardeAnswer.py","file_name":"PharmaGardeAnswer.py","file_ext":"py","file_size_in_byte":7800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"67954700","text":"import asyncio\nimport aiohttp\nimport functools\nimport json\nimport logging\nimport traceback\n\nfrom aiohttp import web\nfrom aiohttp import WSCloseCode\nfrom asyncio import Queue\nfrom opentrons.server import serialize\nfrom opentrons.protocol_api.execute import ExceptionInProtocolError\nfrom concurrent.futures import ThreadPoolExecutor\n\n\nlog = logging.getLogger(__name__)\n\n# Number of executor threads\nMAX_WORKERS = 2\n\n# Keep these in sync with ES code\nCALL_RESULT_MESSAGE = 0\nCALL_ACK_MESSAGE = 1\nNOTIFICATION_MESSAGE = 2\nCONTROL_MESSAGE = 3\nCALL_NACK_MESSAGE = 4\nPONG_MESSAGE = 5\n\n\nclass RPCServer(object):\n def __init__(self, app, root=None):\n self.monitor_events_task = None\n self.app = app\n self.loop = app.loop or asyncio.get_event_loop()\n self.objects = {}\n self.system = SystemCalls(self.objects)\n\n self.root = root\n\n # Allow for two concurrent calls max\n self.executor = ThreadPoolExecutor(max_workers=MAX_WORKERS)\n\n self.clients = {}\n self.tasks = []\n\n self.app.router.add_get('/', self.handler)\n self.app.on_shutdown.append(self.on_shutdown)\n\n @property\n def root(self):\n return self._root\n\n @root.setter\n def root(self, value):\n if self.monitor_events_task:\n self.monitor_events_task.cancel()\n self.monitor_events_task = \\\n self.loop.create_task(self.monitor_events(value))\n self._root = value\n\n def start(self, host, port):\n # This call will block while server is running\n # run_app is capable of catching SIGINT and shutting down\n log.info(\"Starting server on {}:{}\".format(host, port))\n web.run_app(self.app, host=host, port=port)\n\n def shutdown(self):\n [task.cancel() for task, _ in self.clients.values()]\n self.monitor_events_task.cancel()\n\n async def on_shutdown(self, app):\n \"\"\"\n Graceful shutdown handler\n\n See https://docs.aiohttp.org/en/stable/web.html#graceful-shutdown\n \"\"\"\n for ws in self.clients.copy():\n await ws.close(code=WSCloseCode.GOING_AWAY,\n message='Server shutdown')\n self.shutdown()\n\n def send_worker(self, socket):\n _id = id(socket)\n\n def task_done(future):\n try:\n future.result()\n except Exception:\n log.exception(\"send_task for socket {} threw:\".format(_id))\n\n async def send_task(socket, queue):\n while True:\n payload = await queue.get()\n if socket.closed:\n log.debug('Websocket {0} closed'.format(id(_id)))\n break\n\n # see: http://aiohttp.readthedocs.io/en/stable/web_reference.html#aiohttp.web.StreamResponse.drain # NOQA\n await socket.drain()\n await socket.send_json(payload)\n\n queue = Queue(loop=self.loop)\n task = self.loop.create_task(send_task(socket, queue))\n task.add_done_callback(task_done)\n log.debug('Send task for {0} started'.format(_id))\n\n return (task, queue)\n\n async def monitor_events(self, instance):\n async for event in instance.notifications:\n try:\n # Apply notification_max_depth to control object tree depth\n # during serialization to avoid flooding comms\n data = self.call_and_serialize(\n lambda: event)\n self.send(\n {\n '$': {'type': NOTIFICATION_MESSAGE},\n 'data': data\n })\n except Exception:\n log.exception('While processing event {0}:'.format(event))\n\n async def handler(self, request):\n \"\"\"\n Receives HTTP request and negotiates up to a Websocket session\n \"\"\"\n\n def task_done(future):\n self.tasks.remove(future)\n exception = future.exception()\n if exception:\n log.warning(\n 'While processing message: {0}\\nDetails: {1}'.format(\n exception,\n traceback.format_exc())\n )\n\n client = web.WebSocketResponse(max_msg_size=0)\n client_id = id(client)\n\n # upgrade to Websockets\n await client.prepare(request)\n\n log.info('Opening Websocket {0}'.format(id(client)))\n\n try:\n await client.send_json({\n '$': {'type': CONTROL_MESSAGE, 'monitor': True},\n 'root': self.call_and_serialize(lambda: self.root),\n 'type': self.call_and_serialize(lambda: type(self.root))\n })\n except Exception:\n log.exception('While sending root info to {0}'.format(client_id))\n\n try:\n self.clients[client] = self.send_worker(client)\n # Async receive client data until websocket is closed\n async for msg in client:\n task = self.loop.create_task(self.process(msg))\n task.add_done_callback(task_done)\n self.tasks += [task]\n except Exception:\n log.exception('While reading from socket:')\n finally:\n log.info('Closing WebSocket {0}'.format(id(client)))\n await client.close()\n del self.clients[client]\n\n return client\n\n def build_call(self, _id, name, args):\n if _id not in self.objects:\n raise ValueError(\n 'object with id {0} not found'.format(_id))\n\n obj = self.objects[_id]\n function = getattr(type(obj), name)\n args = self.resolve_args(args)\n kwargs = {}\n # NOTE: since ECMAScript doesn't have a notion of named arguments\n # we are using a convention that the last dictionary parameter will\n # be expanded into kwargs. This introduces a risk of mistreating a\n # legitimate dictionary as kwargs, but we consider it very low.\n if (len(args) > 0) and (isinstance(args[-1], dict)):\n kwargs = args.pop()\n\n if not function:\n raise ValueError(\n 'Function {0} not found in {1}'.format(name, type(obj)))\n\n if not callable(function):\n raise ValueError(\n 'Attribute {0} of {1} is not a function'\n .format(name, type(obj)))\n\n return functools.partial(function, obj, *args, **kwargs)\n\n def resolve_args(self, args):\n \"\"\"\n Resolve function call arguments that have object ids\n into instances of these objects\n \"\"\"\n def resolve(a):\n if isinstance(a, dict):\n _id = a.get('i', None)\n # If it's a compound type (including dict)\n # Check if it has id (i) to determine that it has\n # a reference in object storage. If it's None, then it's\n # a dict originated at the remote\n return self.objects[_id] if _id else a['v']\n # if array, resolve it's elements\n if isinstance(a, (list, tuple)):\n return [resolve(i) for i in a]\n return a\n\n return [resolve(a) for a in args]\n\n async def process(self, message):\n try:\n if message.type == aiohttp.WSMsgType.TEXT:\n data = json.loads(message.data)\n meta = data.get('$', {})\n token = meta.get('token')\n _id = data.get('id')\n\n if meta.get('ping'):\n return self.send_pong()\n\n # if id is missing from payload or explicitely set to null,\n # use the system object\n if _id is None:\n _id = id(self.system)\n\n try:\n self.send_ack(token)\n func = self.build_call(\n _id=_id,\n name=data.get('name'),\n args=data.get('args', []))\n except Exception as e:\n log.exception(\"Exception during rpc.Server.process:\")\n error = '{0}: {1}'.format(e.__class__.__name__, e)\n self.send_error(error, token)\n else:\n response = await self.make_call(func, token)\n self.send(response)\n elif message.type == aiohttp.WSMsgType.ERROR:\n log.error(\n 'WebSocket connection closed unexpectedly: {0}'.format(\n message))\n else:\n log.warning('Unhandled WSMsgType: {0}'.format(message.type))\n except Exception:\n log.exception('Error while processing request')\n\n def call_and_serialize(self, func, max_depth=0):\n # XXXX: This should really only be called in a new thread (as in\n # the normal case where it is called in a threadpool)\n call_result = func()\n serialized, refs = serialize.get_object_tree(\n call_result, max_depth=max_depth)\n self.objects.update(refs)\n return serialized\n\n async def make_call(self, func, token):\n response = {'$': {'type': CALL_RESULT_MESSAGE, 'token': token}}\n try:\n call_result = await self.loop.run_in_executor(\n self.executor, self.call_and_serialize, func)\n response['$']['status'] = 'success'\n except ExceptionInProtocolError as eipe:\n log.exception(\"Smart exception in protocol\")\n response['$']['status'] = 'error'\n call_result = {\n 'message': str(eipe),\n 'traceback': ''.join(traceback.format_exception(\n type(eipe.original_exc),\n eipe.original_exc,\n eipe.original_tb))\n }\n except Exception as e:\n log.exception(\"Exception during RPC call:\")\n trace = traceback.format_exc()\n try:\n line_msg = ' [line ' + [\n l.split(',')[0].strip()\n for l in trace.split('line') # noqa(E741)\n if '' in l][0] + ']'\n except Exception:\n line_msg = ''\n finally:\n response['$']['status'] = 'error'\n call_result = {\n 'message': '{0}{1}: {2}'.format(\n e.__class__.__name__, line_msg, str(e)),\n 'traceback': trace\n }\n finally:\n response['data'] = call_result\n return response\n\n def send_error(self, text, token):\n self.send({\n '$': {\n 'token': token,\n 'type': CALL_NACK_MESSAGE\n },\n 'reason': text\n })\n\n def send_ack(self, token):\n self.send({\n '$': {\n 'token': token,\n 'type': CALL_ACK_MESSAGE\n }\n })\n\n def send_pong(self):\n self.send({\n '$': {\n 'type': PONG_MESSAGE\n }\n })\n\n def send(self, payload):\n for socket, value in self.clients.items():\n task, queue = value\n asyncio.run_coroutine_threadsafe(queue.put(payload), self.loop)\n\n\nclass SystemCalls(object):\n def __init__(self, objects):\n self.objects = objects\n objects[id(self)] = self\n\n def get_object_by_id(self, id):\n return self.objects[id]\n","sub_path":"api/src/opentrons/server/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":11499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"462906326","text":"import pyttsx3\nimport datetime\nimport speech_recognition as sr\nimport webbrowser as wb\nimport os\n\nkyoshiro = pyttsx3.init()\nvoice = kyoshiro.getProperty('voices')\nkyoshiro.setProperty('voice', voice[1].id)\n\ndef speak(audio):\n print('Kyoshiro said: ' + audio)\n kyoshiro.say(audio)\n kyoshiro.runAndWait()\ndef time():\n Time = datetime.datetime.now().strftime(\"%I:%M:%p\")\n speak(Time)\ndef welcome():\n hour = datetime.datetime.now().hour\n if hour >= 6 and hour < 10:\n speak('Good morning Boss')\n elif hour >= 10 and hour <13:\n speak('Good lunch Boss')\n elif hour >= 13 and hour < 18:\n speak('Good afternoon Boss')\n elif hour >= 18 and hour < 22:\n speak('Good evening Boss')\n else:\n speak('Good midnight Boss')\ndef solve():\n c = sr.Recognizer()\n with sr.Microphone() as source:\n c.pause_theshold = 2\n audio = c.listen(source)\n try:\n query = c.recognize_google(audio, language='en')\n print(\"Otama: \" + query)\n except sr.UnknownValueError:\n print('Hi sir, Please repeat!')\n query = input('Your order is: ')\n return query\nif __name__ == \"__main__\":\n welcome()\n while True:\n query = solve().lower()\n if \"google\" in query:\n speak(\"What do you search on google?\")\n search=solve().lower()\n url = f'https://www.google.com/search?q={search}'\n wb.get().open(url)\n speak(\"This is thing that you searched\")\n elif \"youtube\" in query:\n speak(\"What do you search on youtube?\")\n search=solve().lower()\n url = f'https://www.youtube.com/search?q={search}'\n wb.get().open(url)\n speak(\"This is thing that you searched\")\n elif \"facebook\" in query:\n url = 'https://www.facebook.com/'\n wb.get().open(url)\n speak(\"Okay\")\n elif \"boss\" in query:\n url = 'https://www.facebook.com/lutakrystal305/'\n wb.get().open(url)\n speak(\"Hi Boss\")\n elif \"time\" in query or \"what time is it\" in query:\n time()\n elif \"Photoshop\" in query:\n dir = r\"C:\\Program Files\\Adobe\\Adobe Photoshop 2021\\photoshop\"\n os.startfile(dir)\n elif \"quit\" in query:\n speak(\"Kyoshiro is coming to Wano, bye Boss\")\n quit()\n","sub_path":"vd.py","file_name":"vd.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"651676975","text":"# -*- coding=utf-8 -*-\nclass GetOccurTimesOfOne():\n \"\"\"计算1-n中所有数字中1出现的次数\"\"\"\n\n def get_occur_time_of_one(self, num):\n if num is None or num < 1:\n return 0\n\n length = self.get_num_length(num)\n first = int(str(num)[0])\n # 第一位\n if first == 1:\n first_sum = num % self.base_10(length-1) + 1\n if first != 1:\n first_sum = self.base_10(length - 1)\n # 其他位\n other_num = self.base_10(length - 2) * (length - 1) * first\n next_num = self.get_occur_time_of_one(num % self.base_10(length-1))\n return first_sum + other_num + next_num\n\n def base_10(self, length):\n num = 1\n while length > 0:\n num *= 10\n length -= 1\n return num\n\n def get_num_length(self, num):\n length = 0\n while num != 0:\n length += 1\n num = num // 10\n return length\n\n\nif __name__ == \"__main__\":\n ex = GetOccurTimesOfOne()\n num = 23457\n print(ex.get_occur_time_of_one(num))\n","sub_path":"43_accur_times_of_1.py","file_name":"43_accur_times_of_1.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"109277653","text":"# -*- coding: utf-8 -*-\n# Copyright © 2016 Raúl Benito \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n\n# Setup for Tweesten\n'''Setup for Tweesten'''\n\n# standard library imports\nimport os.path\n\n# external library imports\nfrom setuptools import setup\n\nCLASSIFIERS = [\n 'Intended Audience :: End Users/Desktop',\n 'Environment :: Console',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.4'\n]\n\nsetup(\n name='tweesten',\n version='0.3.0',\n license='GNU GPL v3',\n description=('twitter bot to tweet a collage of album covers ' +\n 'based on your last.fm scrobbles'),\n long_description=('twitter bot to tweet a collage of album covers ' +\n 'based on your last.fm scrobbles'),\n classifiers=CLASSIFIERS,\n author='Raúl Benito',\n author_email='erre.benito@gmail.com',\n url='https://github.com/errebenito/tweesten',\n download_url='https://github.com/errebenito/tweesten/releases/tag/v0.3.0',\n packages=['tweesten'],\n scripts=['scripts/tweesten'],\n install_requires=['tweepy>=3.5.0'],\n)\n","sub_path":"pypi_install_script/tweesten-0.3.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"541755178","text":"__author__ = 'Girish'\n\nimport requests\n\nimport re\ntest = requests.get(\"http://www.spoj.com/problems/classical/\")\ndata = test.content.decode()\nprint(data)\ndata= re.sub(\"[\\\\n\\\\t]+\",\"\",data)\n\n#for understand the regex for html\nresult= re.findall(r\"\"\"\n [\\w\\s\\(\\)\\-]*\n\"\"\",data,re.X|re.M)\n\n#regex for SPOJ\nresult2= re.finditer(r\"\"\"\n (?P\\s*\\d+\\s*)\n ([\\s\\w\\=\\<\\\"]*\\>)\n ()\n (?P[\\w\\s\\,]*)\n\"\"\",data,re.X|re.M)\nfor match in result2:\n print(match.groupdict())\n\n","sub_path":"code_trials/red.py","file_name":"red.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"483757287","text":"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tf_agents.distributions import shifted_categorical\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.networks.q_network import QNetwork\n\nfrom tf_agents.utils import common\n\nfrom tensorflow.python.framework import tensor_spec\nimport copy\n\ntfd = tfp.distributions\n\n@gin.configurable\nclass HeteroQPolicy(tf_policy.Base):\n \"\"\"Class to build Q-Policies.\"\"\"\n\n def __init__(self,\n time_step_spec,\n action_spec,\n mixed_q_network,\n func_arg_mask=None,\n emit_log_probability=False,\n spatial_names=(\"screen\", \"minimap\"),\n structured_names=(\"structured\",),\n use_previous_action=False,\n name=None):\n \"\"\"Builds a Q-Policy given a q_network.\n\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps.\n action_spec: A nest of BoundedTensorSpec representing the actions.\n mixed_q_network: An instance of a `tf_agents.network.Network`,\n callable via `network(observation, step_type) -> (output, final_state)`.\n emit_log_probability: Whether to emit log-probs in info of `PolicyStep`.\n func_arg_mask: A mask Tensor to allow action specific selection of arguments.\n name: The name of this policy. All variables in this module will fall\n under that name. Defaults to the class name.\n\n Raises:\n ValueError: If `q_network.action_spec` exists and is not compatible with\n `action_spec`.\n NotImplementedError: If `action_spec` contains more than one\n `BoundedTensorSpec`.\n \"\"\"\n network_action_spec = getattr(mixed_q_network, 'action_spec', None)\n\n self._available_actions_key = 'available_actions'\n self._func_action_key = 'func_action'\n self._previous_action_key = \"previous_action\"\n\n if use_previous_action != (self._previous_action_key in time_step_spec.observation):\n raise ValueError(\"use_previous_action is not consistent with time_step_spec.observation previous_action\")\n if use_previous_action != (self._func_action_key in time_step_spec.observation):\n raise ValueError(\"use_previous_action is not consistent with time_step_spec.observation func_action\")\n if use_previous_action != (func_arg_mask is not None):\n raise ValueError(\"use_previous_action is not consistent with func_arg_mask\")\n\n if network_action_spec is not None:\n if not isinstance(network_action_spec, (list, dict, tuple)):\n if not action_spec.is_compatible_with(network_action_spec):\n raise ValueError(\n 'action_spec must be compatible with mixed_q_network.action_spec; '\n 'instead got action_spec=%s, mixed_q_network.action_spec=%s' % (\n action_spec, network_action_spec))\n else:\n compatible = [a.is_compatible_with(b)\n for a, b in zip(tf.nest.flatten(action_spec), tf.nest.flatten(network_action_spec))]\n if not all(compatible):\n raise ValueError(\n 'action_spec must be compatible with mixed_q_network.action_spec; ')\n\n if func_arg_mask is not None:\n # create zero mask for previous masked action\n assert len(func_arg_mask.shape) == 2\n self._func_arg_mask = copy.deepcopy(func_arg_mask)\n self._func_arg_mask = np.concatenate([self._func_arg_mask,\n np.zeros((1, self._func_arg_mask.shape[1]), np.float32)],\n axis=0)\n else:\n self._func_arg_mask = copy.deepcopy(func_arg_mask)\n\n self._mixed_q_network = mixed_q_network\n self._spatial_names = spatial_names\n self._structured_names = structured_names\n self._use_previous_action = use_previous_action\n self._discrete_action_key, self._continuous_action_key = 'discrete', 'continuous'\n\n self._keyed_action_spec = action_spec\n self._total_num_actions = None\n action_spec = self._transform_action_spec(time_step_spec, action_spec,\n spatial_names, structured_names)\n\n super(HeteroQPolicy, self).__init__(\n time_step_spec,\n action_spec,\n policy_state_spec=mixed_q_network.state_spec,\n clip=False,\n emit_log_probability=emit_log_probability,\n name=name)\n\n flat_action_spec = tf.nest.flatten(action_spec)\n if len(flat_action_spec) > 1:\n print('hetero Q policy supports action BoundedTensorSpec with size > 1')\n # We need to maintain the flat action spec for dtype, shape and range.\n self._flat_action_spec = flat_action_spec\n\n self._action_lookup = []\n self._build_action_lookup()\n\n @property\n def keyed_action_spec(self):\n return self._keyed_action_spec\n\n def _variables(self):\n return self._q_network.variables\n\n @property\n def total_num_actions(self):\n return self._total_num_actions\n\n def _transform_action_spec(self, time_step_spec, action_spec, spatial_names, structured_names):\n\n num_actions = 0\n for name in spatial_names:\n assert name in time_step_spec.observation\n height = time_step_spec.observation[name][0].shape[0]\n width = time_step_spec.observation[name][0].shape[1]\n if name in action_spec:\n num_actions += height*width*(action_spec[name].maximum[0]-action_spec[name].minimum[0]+1)\n for name in structured_names:\n if name in action_spec:\n num_actions += action_spec[name].maximum[0]-action_spec[name].minimum[0]+1\n\n # include no-op action\n self._total_num_actions = num_actions + 1\n\n return tensor_spec.BoundedTensorSpec(\n shape=(), dtype=np.int32, name='combined_action_spec',\n minimum=(0,),\n maximum=(num_actions - 1,))\n\n def _build_action_lookup(self):\n\n num_actions = 0\n num_action_types = 0\n for name in self._spatial_names:\n assert name in self._time_step_spec.observation\n\n if name in self._keyed_action_spec:\n num_types = self._keyed_action_spec[name].maximum[0] - self._keyed_action_spec[name].minimum[0] + 1\n height = self._time_step_spec.observation[name][0].shape[0]\n width = self._time_step_spec.observation[name][0].shape[1]\n\n # iterate through flattened q value index\n for i in range(height):\n for j in range(width):\n for k in range(num_types):\n # normalize coordinates\n # self._action_lookup.append([float(num_action_types+k), i/float(height), j/float(width)])\n # change y coordinate to x coordinate\n self._action_lookup.append(\n [float(num_action_types + k), j / float(width), i / float(height)])\n num_actions += 1\n num_action_types += num_types\n\n for name in self._structured_names:\n if name in self._keyed_action_spec:\n num_types = self._keyed_action_spec[name].maximum[0]-self._keyed_action_spec[name].minimum[0]+1\n for k in range(num_types):\n # self._action_lookup[num_actions] = (float(num_action_types), (0.0, 0.0))\n self._action_lookup.append([float(num_action_types), 0.0, 0.0])\n num_actions += 1\n num_action_types += 1\n\n # embed masked action at the end\n # self._action_lookup[num_actions] = (float(num_action_types), (0.0, 0.0))\n self._action_lookup.append([float(num_action_types), 0.0, 0.0])\n self._action_lookup = tf.convert_to_tensor(self._action_lookup)\n\n if self._total_num_actions - 1 != num_actions:\n raise ValueError(\"total number of actions don't match between action lookup build and \"\n \"action spec transform\")\n return\n\n def inverse_transform_action(self, action):\n\n if action.shape.ndims > 1:\n assert action.shape[1] == 1\n action = tf.squeeze(action, axis=-1)\n\n transformed_actions = []\n action_per_batch = tf.unstack(action)\n for act in action_per_batch:\n # tf.convert_to_tensor([act, self._total_num_actions])\n assert_op = tf.Assert(tf.less_equal(act, self._total_num_actions - 1), [act, self._total_num_actions])\n with tf.control_dependencies([assert_op]):\n # if act.numpy() > self._total_num_actions - 1:\n # raise ValueError(\"action index exceeds total number of actions\")\n # act = tf.convert_to_tensor(tf.nest.flatten(self._action_lookup[act.numpy()]))\n assert act.shape.ndims < 2\n act = tf.gather(self._action_lookup, act)\n act = tf.reshape(act, [1, -1])\n transformed_actions.append(act)\n transformed_actions = tf.concat(transformed_actions, axis=0)\n\n return transformed_actions\n\n def _distribution(self, time_step, policy_state):\n # In DQN, we always either take a uniformly random action, or the action\n # with the highest Q-value. However, to support more complicated policies,\n # we expose all Q-values as a categorical distribution with Q-values as\n # logits, and apply the GreedyPolicy wrapper in dqn_agent.py to select the\n # action with the highest Q-value.\n\n neg_inf = tf.constant(-np.inf, dtype=tf.float32)\n previous_action = time_step.observation[self._previous_action_key] if self._use_previous_action else None\n func_action = time_step.observation[self._func_action_key] if self._use_previous_action else None\n\n assert (self._available_actions_key in time_step.observation) == \\\n (self._available_actions_key in self._time_step_spec.observation)\n available_actions = None\n if self._available_actions_key in time_step.observation:\n available_actions = time_step.observation[self._available_actions_key]\n\n # time_step.observation is a dict of screen, minimap and structured info.\n time_step_obs = time_step.observation\n (spatial_q_values, structured_q_values), q_policy_state = self._mixed_q_network(\n time_step_obs, time_step.step_type, policy_state)\n assert isinstance(spatial_q_values, dict) and isinstance(structured_q_values, dict)\n if available_actions is not None:\n available_actions = tf.convert_to_tensor(available_actions)\n assert all([available_actions.shape == t.shape for t in structured_q_values.values()])\n structured_q_values = tf.nest.map_structure(lambda x: tf.where(tf.equal(available_actions, 1), x, neg_inf),\n structured_q_values)\n\n # TODO(b/122314058): Validate and enforce that sampling distributions\n # created with the q_network logits generate the right action shapes. This\n # is curretly patching the problem.\n\n # If the action spec says each action should be shaped (1,), add another\n # dimension so the final shape is (B, 1, A), where A is the number of\n # actions. This will make Categorical emit events shaped (B, 1) rather than\n # (B,). Using axis -2 to allow for (B, T, 1, A) shaped q_values.\n assert all([s.shape.ndims == 1 for s in self._flat_action_spec]) \\\n or all([s.shape.ndims == 0 for s in self._flat_action_spec]), \\\n \"all action specs' ndims should be consistently 1 or 0\"\n\n if previous_action is not None:\n tf.assert_equal(tf.add_n([spatial_q_values[k].shape[-1] if k in spatial_q_values else 0\n for k in self._spatial_names]\n + [structured_q_values[k].shape[-1] if k in structured_q_values else 0\n for k in self._structured_names]),\n self._func_arg_mask.shape[-1])\n\n if func_action is None:\n discrete_func_action = None\n else:\n assert isinstance(previous_action, dict)\n discrete_func_action = func_action[self._discrete_action_key] \\\n if self._discrete_action_key in func_action else None\n spatial_q_values, structured_q_values = self._mask_logits(spatial_q_values, structured_q_values,\n discrete_func_action)\n\n if self._flat_action_spec[0].shape.ndims == 1:\n for k, v in spatial_q_values.items():\n spatial_q_values[k] = tf.expand_dims(v, -2)\n for k, v in structured_q_values.items():\n structured_q_values[k] = tf.expand_dims(v, -2)\n\n # concatenate q_values into single represnetation\n q_values = []\n for name in self._spatial_names:\n if name in spatial_q_values:\n q_values.append(spatial_q_values[name])\n for name in self._structured_names:\n if name in structured_q_values:\n q_values.append(structured_q_values[name])\n q_values = tf.concat(q_values, axis=-1)\n\n ##TODO: mask_split_fn needs to be investigated\n # logits = spatial_q_values[self._spatial_names[0]]\n # mask_split_fn = self._q_network.mask_split_fn\n #\n # neg_inf = tf.constant(-np.inf, dtype=tf.float32)\n # if mask_split_fn:\n # _, mask = mask_split_fn(time_step.observation)\n #\n # # Expand the mask as needed in the same way as q_values above.\n # if self._flat_action_spec.shape.ndims == 1:\n # mask = tf.expand_dims(mask, -2)\n #\n # # Overwrite the logits for invalid actions to -inf.\n # logits = tf.compat.v2.where(tf.cast(mask, tf.bool), logits, neg_inf)\n\n # TODO(kbanoop): Handle distributions over nests.\n q_distribution = shifted_categorical.ShiftedCategorical(\n logits=q_values, dtype=tf.int32, shift=0)\n\n # q_distributions = dict()\n # for k, v in spatial_q_values.items():\n # print(v.shape)\n # q_distributions[k] = shifted_categorical.ShiftedCategorical(\n # logits=v,\n # dtype=self._action_spec[k].dtype,\n # shift=self._action_spec[k].minimum[0])\n # for k, v in structured_q_values.items():\n # print(v.shape)\n # q_distributions[k] = shifted_categorical.ShiftedCategorical(\n # logits=v,\n # dtype=self._action_spec[k].dtype,\n # shift=self._action_spec[k].minimum[0])\n # q_distribution = tf.nest.pack_sequence_as(self._action_spec, [q_distribution])\n\n return policy_step.PolicyStep(q_distribution, q_policy_state)\n\n def _action(self, time_step, policy_state, seed):\n \"\"\"Implementation of `action`.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n seed: Seed to use if action performs sampling (optional).\n\n Returns:\n A `PolicyStep` named tuple containing:\n `action`: An action Tensor matching the `action_spec()`.\n `state`: A policy state tensor to be fed into the next call to action.\n `info`: Optional side information such as action log probabilities.\n \"\"\"\n seed_stream = tfd.SeedStream(seed=seed, salt='ppo_policy')\n distribution_step = self._distribution(time_step, policy_state)\n actions = tf.nest.map_structure(lambda d: d.sample(seed=seed_stream()),\n distribution_step.action)\n info = distribution_step.info\n if self.emit_log_probability:\n try:\n log_probability = tf.nest.map_structure(lambda a, d: d.log_prob(a),\n actions,\n distribution_step.action)\n info = policy_step.set_log_probability(info, log_probability)\n except:\n raise TypeError('%s does not support emitting log-probabilities.' %\n type(self).__name__)\n\n ##TODO: need to consider whether we convert action to (func, arg) here or in actionwrapper.\n\n\n return distribution_step._replace(action=actions, info=info)\n\n\n def _convert_spatial_action(self, action, height=None, width=None):\n\n if (height and not width) or (not height and width):\n raise ValueError('height/width should co-exist')\n\n if height and width:\n action_height = tf.cast(action/width, tf.int32)\n action_width = tf.cast(action - action_height*width, tf.int32)\n discrete_action = tf.cast(action/(height*width), tf.int32)\n\n return discrete_action, (action_height, action_width)\n else:\n return action\n\n def _mask_logits(self, spatial_q_values, structured_q_values, previous_action):\n\n # mask logits based on previous action taken\n\n neg_inf = tf.constant(-np.inf, dtype=tf.float32)\n if self._func_arg_mask is not None and previous_action is not None:\n assert previous_action.shape.ndims == 1\n\n previous_action_mask = tf.gather(self._func_arg_mask, previous_action)\n assert previous_action_mask.shape.ndims == 2\n idx_offset = 0\n for name in self._spatial_names:\n if name in spatial_q_values:\n q_values_shape = spatial_q_values[name].shape\n height, width, nchannels = q_values_shape[1], q_values_shape[2], q_values_shape[-1]\n sliced_previous_action_mask = previous_action_mask[:, idx_offset:idx_offset+nchannels]\n sliced_previous_action_mask = tf.expand_dims(tf.expand_dims(sliced_previous_action_mask, axis=1), axis=1)\n broadcast_previous_action_mask = tf.tile(sliced_previous_action_mask, [1, height, width, 1])\n tf.assert_equal(spatial_q_values[name].shape, broadcast_previous_action_mask.shape)\n\n spatial_q_values[name] = tf.compat.v2.where(\n tf.cast(tf.equal(broadcast_previous_action_mask, 1), tf.bool), spatial_q_values[name], neg_inf)\n spatial_q_values[name] = tf.reshape(spatial_q_values[name], [q_values_shape[0], -1])\n idx_offset += nchannels\n\n for name in self._structured_names:\n if name in structured_q_values:\n nchannels = structured_q_values[name].shape[-1]\n sliced_previous_action_mask = previous_action_mask[:, idx_offset:idx_offset+nchannels]\n tf.assert_equal(structured_q_values[name].shape, sliced_previous_action_mask.shape)\n\n structured_q_values[name] = tf.compat.v2.where(\n tf.cast(tf.equal(sliced_previous_action_mask, 1), tf.bool), structured_q_values[name], neg_inf)\n idx_offset += nchannels\n\n if idx_offset != self._func_arg_mask.shape[1]:\n raise ValueError(\"feature num of channels doesn't match func_arg mask shape\")\n else:\n for name, v in spatial_q_values.items():\n q_values_shape = v.shape\n spatial_q_values[name] = tf.reshape(v, [q_values_shape[0], -1])\n\n return spatial_q_values, structured_q_values\n","sub_path":"tf_agents/policies/hetero_q_policy.py","file_name":"hetero_q_policy.py","file_ext":"py","file_size_in_byte":20902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"354480579","text":"#a simulator that uses classes , to recreate the idea of evolution\n#non-graphical for now\n#\n#made by: Pablo en Wayne\n#started planning: 10-11-2013\n#\n#everything should be logged\n#every entity will be an object, with functions and variables, detailing relationship handeling and health\n#the number of class files, from which the objects originate, should grow in time\n#\n#every entity will be in a group just like animals and trees are their own group\n#the groups will have certain characteristics, and will define how an entity handles communication with an entity from another group\n\n#a dictionary (variable) with a list of active entity's\n#every entity his own name as to create the possibility of recognizing different entity's\n#entity's can be omnivore, carnivore etc.\n#to live the entity's should eat what his diet says, and might have to kill.\n#different entity's should have different food value (kg?)\n#\n#Entity's should have different things like mouths, legs and stuff\n#also a food chain value, deciding if they are a predator or not compared to another entity\n#this will simulate the survival of the fittest\n#\n#the foodconstant will be decided by how much the entity weights, and the mouthPower\n#so an animal with a 3.0 foodconstant can get 3* 5 kg animal compared to 2.5 * 5kg foodconstant\n#foodconstant is calculated by: (100/mass_of_entity)*mouth_power\n#e.g. (100/60kg)*1.05mp= 1.75 food/kg\n#an animal is full when he reaches 100 'food points'\n#\n#Some animals should be more social, making them not kill eachother\n#\n#entity functions:\n#-aggressive/destructive (danger)\n#-feeding (after killing)\n#-friendly (creates an relationship making entity's friends)\n#-reproduction with the possibility of creating new entity's\n#-.foo some should not be able to do certain things (like reproducing)\n#\n#the output to see how the entity's are doing should be low, with some statistics\n#active entity's: x\n#different entity's: a\n#killed: b\n#average health: c\n#\n\n#modules\nimport os\nimport time\nimport random\nimport copy\nfrom tkinter import *\nimport pickle #database\n\n#supporting functions\ndef ERROR(ERROR_str=\"\", ERROR_code='001'):\n print(ERROR_str)\n if ERROR_code[-1]=='0':\n print(\"error severe,\",ERROR_str,ERROR_code)\n os.system(\"pause\")\n exit()\n elif ERROR_code[-1]=='1':\n print(\"trying to resume,\",ERROR_str,ERROR_code)\n os.system(\"pause\")\n\n\n#main functions\ndef create(abject,name='entity'):\n global number\n number+=1\n name=random.choice(list(all_classes.keys()))\n abject=all_classes[name]()\n name+=str(number)\n return name, abject\n\ndef create_child():\n global number, entity0, entity1, active_entity\n\n winner=random.choice([entity0,entity1])\n new_entity=copy.deepcopy(winner)\n if entity0.mouth != entity1.mouth:\n if entity0.mouth=='None' or entity1.mouth=='None':\n new_entity.mouth='None'\n elif entity0.mouth=='periwinkleblue' or entity1.mouth=='periwinkleblue':\n new_entity.mouth='periwinkleblue'\n new_entity.social=(entity0.social+entity1.social)/2\n new_entity.weight=(entity0.weight+entity1.weight)/2\n new_entity.predator=(entity0.predator+entity1.predator)/2\n\n name=new_entity.family+str(number)\n number+=1\n active_entity[name]=new_entity\n\n\n\ndef kill():\n global entity0, entity1, death\n if entity1.family=='tree':\n entity1.health-=17\n #health increases by\n entity0.health+=(entity1.weight/6*mouths[entity0.mouth][0])\n else:\n entity1.health=0\n #health increases by\n entity0.health+=(entity1.weight/(entity1.health+1)*mouths[entity0.mouth][0])\n death+=1\n\ndef befriend():\n global entity0, entity1\n entity0.friends.append(entity1)\n entity1.friends.append(entity0)\n\n\ntry:\n from classes import *\nexcept:\n ERROR(\"import ERROR: classes.py couldn't be loaded\",'000')\n\n#first run\n\n#graphic initializtion\n#screen=Tk()\n#screen.title(\"entity simulator\")\n#screen.geometry(\"900x600\")\n#screen.mainloop()\n\n#first entity's\nnumber=0\nactive_entity={}\n\n#generate random entity's from the existing family's\nfor n in range(20):\n name,abject=create(number)\n active_entity[name]=abject\n\n#main\nprint(\"Welcome to the entity survival simulation\")\ndelay=input(\"choose a time delay (recommended 2) in seconds: \")\ninformation=input(\"would you wish extra output information?\")\nif information in ['yes','yeah','ja','sure','ye']:\n information = True\nelse:\n information = False\nos.system('cls')\nall_data=list()\nrounds=0\ndeath=0\nborn=0\nwhile True:\n #entity interaction (by chance)\n for name0 in list(active_entity.keys()):\n done = False\n entity0=active_entity[name0]\n name1=random.choice(list(active_entity.keys()))\n entity1=active_entity[name1]\n #make sure the entity is not interacting with itself\n if entity0.health<1:\n continue\n loops=0\n while entity0==entity1 or entity1.health<1:\n if loops>20:\n ERROR(\"couldn't find any active entity's\",'100')\n name1=random.choice(list(active_entity.keys()))\n entity1=active_entity[name1]\n loops+=1\n #entity interaction_choice\n for g in range(1):\n if (entity0.health<40 or random.randrange(4)==1) and (entity1 not in entity0.friends) and done==False:\n if entity1.family in diets[mouths[entity0.mouth][1]] and (entity0.predator>entity1.predator or (entity0.health<40 and random.randrange(3)==1)):\n entity0=kill()\n done=True\n try:\n if (entity0.social in range(entity1.social-24,entity1.social+25)) and done==False:\n if entity1 in entity0.friends and random.randrange(5)==3:\n create_child()\n born+=1\n done=True\n elif random.randrange(2)==1 and entity0 not in entity1.friends:\n befriend()\n done=True\n except AttributeError:\n pass\n\n #entity health check and delete\n family_members={'tree':0,'orangered':0,'periwinkleblue':0,'standard':0}\n for name in list(active_entity.keys()):\n if active_entity[name].health<=0:\n del active_entity[name]\n else:\n family_members[active_entity[name].family]+=1\n if active_entity[name].health>100:\n active_entity[name].health=100\n if active_entity[name].family != 'tree': active_entity[name].health-=5\n\n #output (logging)\n #print logging\n data=[]\n print('\\nround:',str(rounds))\n print(\"entity's alive:\",str(len(list(active_entity.keys()))))\n print('death:',str(death))\n print('born:', str(born))\n data=[rounds, len(list(active_entity.keys())), death, born]\n if information==True:\n lst=[]\n fam=\"\"\n for name in list(active_entity.keys()):\n if active_entity[name].family not in lst:\n lst.append(active_entity[name].family)\n fam=fam+\", \"+active_entity[name].family\n print(\"living family's:\", fam)\n print(\"living entity's:\", str(list(active_entity.keys())))\n data=data+[fam,list(active_entity.keys())]\n all_data.append(data)\n #file logging\n try:\n dat = open(\"data.dat\", \"w+\")\n pickle.dump(all_data,dat)\n dat.close()\n except:\n pass\n #time_delay\n try:\n time.sleep(int(delay))\n except:\n time.sleep(2)\n os.system('cls')\n print('\\nround:',str(rounds))\n print(\"entity's alive:\",str(len(list(active_entity.keys()))))\n print('death:\\t',str(death))\n print('born:\\t', str(born))\n if information==True:\n print(\"living family's:\", fam)\n print(\"living entity's:\", str(list(active_entity.keys())))\n rounds+=1\n\n# 2014 Pablo ","sub_path":"source/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"158143192","text":"check = [False]*10001\n\nfor i in range(2, 10001):\n if(i*i > 10000):\n break\n for j in range(2*i, 10001, i):\n check[j] = True\n\nl = []\nsum = 0\nn = int(input())\nm = int(input())\nfor i in range(n, m+1):\n if(i > 1 and check[i] == False):\n l.append(i)\n sum += i\nif(len(l) != 0):\n print(sum)\n print(min(l))\nelse:\n print(-1)\n","sub_path":"기본 문법/2581/2581.py","file_name":"2581.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"636293703","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 GIG Technology NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\n\nimport base64\nimport imghdr\nimport importlib\nimport json\nimport logging\nimport uuid\n\nfrom google.appengine.api import images\nfrom google.appengine.ext import deferred, ndb\n\nfrom mcfw.consts import MISSING\nfrom mcfw.rpc import returns, arguments\nfrom rogerthat.bizz.job import run_job\nfrom rogerthat.bizz.payment.response_handlers import update_payment_providers_response_handler, \\\n update_payment_provider_response_handler, update_payment_assets_response_handler, \\\n update_payment_asset_response_handler, update_payment_status_response_handler\nfrom rogerthat.bizz.user import get_lang\nfrom rogerthat.capi.payment import updatePaymentProvider, updatePaymentStatus, updatePaymentProviders, \\\n updatePaymentAssets, updatePaymentAsset\nfrom rogerthat.dal.payment import get_payment_provider, get_payment_providers, get_payment_user, get_payment_user_key, \\\n get_payment_service, get_payment_service_key\nfrom rogerthat.dal.profile import get_user_profile\nfrom rogerthat.exceptions.payment import PaymentProviderNotFoundException, PaymentProviderAlreadyExistsException, \\\n InvalidPaymentProviderException, InvalidPaymentImageException, PaymentProviderNoOauthSettingsException, \\\n PaymentException\nfrom rogerthat.models import Image\nfrom rogerthat.models.apps import EmbeddedApplication\nfrom rogerthat.models.payment import PaymentProvider, PaymentUser, PaymentPendingReceive, PaymentUserProvider, \\\n PaymentService, PaymentServiceProvider\nfrom rogerthat.rpc import users\nfrom rogerthat.rpc.rpc import logError\nfrom rogerthat.settings import get_server_settings\nfrom rogerthat.to.payment import AppPaymentProviderTO, UpdatePaymentProviderRequestTO, UpdatePaymentStatusRequestTO, \\\n ErrorPaymentTO, PendingPaymentTO, PendingPaymentDetailsTO, \\\n PaymentProviderAssetTO, UpdatePaymentProvidersRequestTO, UpdatePaymentAssetsRequestTO, PaymentProviderTO, \\\n CreatePaymentAssetTO, CryptoTransactionTO, GetPaymentProfileResponseTO, TargetInfoTO, TargetInfoAssetTO, \\\n CreateTransactionResultTO\nfrom rogerthat.to.service import UserDetailsTO\nfrom rogerthat.utils import now\nfrom rogerthat.utils.app import get_app_id_from_app_user, create_app_user_by_email\nfrom rogerthat.utils.service import create_service_identity_user\n\nIMAGE_MAX_SIZE = 102400 # 100kb\n\n\ndef get_api_module(provider_id, log_error=True):\n module_name = 'rogerthat.bizz.payment.providers.%s.api' % provider_id\n try:\n return importlib.import_module(module_name)\n except ImportError:\n if log_error:\n logging.error('Payment module %s not found', module_name)\n else:\n logging.info('Payment module %s not found', module_name)\n return None\n\n\ndef is_valid_provider_id(provider_id):\n if get_api_module(provider_id, False):\n return True\n return False\n\n\ndef _create_image(image):\n try:\n _meta, img_b64 = image.split(',')\n image = base64.b64decode(img_b64)\n except:\n raise InvalidPaymentImageException()\n\n image_type = imghdr.what(None, image)\n img = images.Image(image)\n orig_width = img.width\n orig_height = img.height\n if orig_width != orig_height:\n logging.info('Image has an invalid ratio. (expected 1:1, got %s:%s)', orig_width, orig_height)\n raise InvalidPaymentImageException('invalid_aspect_ratio', {'aspect_ratio': '1:1'})\n\n img = images.Image(image)\n img.resize(250, 250)\n image_model = Image(blob=img.execute_transforms(images.JPEG if image_type == 'jpeg' else images.PNG))\n image_model.put()\n return image_model\n\n\ndef _get_logo_id(original_id, logo):\n if logo is not MISSING and logo:\n image = _create_image(logo)\n if original_id:\n Image.create_key(original_id).delete()\n return image.id\n else:\n return original_id\n\n\n@returns(PaymentProvider)\n@arguments(provider=PaymentProvider, data=PaymentProviderTO)\ndef _save_provider(provider, data):\n \"\"\"\n Args:\n provider (PaymentProvider)\n data (PaymentProviderTO)\n \"\"\"\n try:\n settings = json.loads(data.settings)\n except (ValueError, TypeError) as e:\n logging.info('Could not JSON decode custom setting: %s', e.message)\n settings = provider.settings\n if MISSING.default(data.embedded_application, None):\n embedded_app = EmbeddedApplication.create_key(data.embedded_application)\n else:\n embedded_app = None\n provider.populate(name=data.name,\n version=data.version,\n description=data.description,\n oauth_settings=data.oauth_settings.to_model() if data.oauth_settings else None,\n logo_id=_get_logo_id(provider.logo_id, data.logo),\n black_white_logo_id=_get_logo_id(provider.black_white_logo_id, data.black_white_logo),\n background_color=data.background_color,\n text_color=data.text_color,\n button_color=data.button_color,\n asset_types=data.asset_types,\n currencies=data.currencies,\n settings=settings,\n embedded_application=embedded_app,\n app_ids=data.app_ids)\n provider.put()\n return provider\n\n\n@ndb.transactional(xg=True)\n@returns(PaymentProvider)\n@arguments(data=PaymentProviderTO)\ndef create_payment_provider(data):\n \"\"\"\n Args:\n data (CreatePaymentProviderTO)\n \"\"\"\n provider_id = data.id\n if not is_valid_provider_id(provider_id):\n raise InvalidPaymentProviderException(provider_id)\n if get_payment_provider(provider_id):\n raise PaymentProviderAlreadyExistsException(provider_id)\n\n provider = PaymentProvider(key=PaymentProvider.create_key(provider_id))\n return _save_provider(provider, data)\n\n\n@ndb.transactional(xg=True)\n@returns(PaymentProvider)\n@arguments(provider_id=unicode, data=PaymentProviderTO)\ndef update_payment_provider(provider_id, data):\n \"\"\"\n Args:\n provider_id (unicode)\n data (CreatePaymentProviderTO)\n \"\"\"\n provider = get_payment_provider(provider_id)\n if not provider:\n raise PaymentProviderNotFoundException(provider_id)\n provider = _save_provider(provider, data)\n deferred.defer(send_update_payment_provider_request_to_users, provider_id, _countdown=5, _transactional=True)\n return provider\n\n\n@returns(tuple)\n@arguments(provider_id=unicode)\ndef get_payment_provider_oauth_secrets(provider_id):\n pp = get_payment_provider(provider_id)\n if not pp.oauth_settings:\n raise PaymentProviderNoOauthSettingsException(provider_id)\n\n return pp.oauth_settings.client_id, pp.oauth_settings.secret\n\n\n@returns(dict)\n@arguments(app_user=users.User, provider_id=unicode)\ndef get_access_token_for_user(app_user, provider_id):\n payment_user = get_payment_user(app_user)\n if not payment_user:\n return None\n\n pup = payment_user.get_provider(provider_id)\n if pup:\n return pup.token\n return None\n\n\n@returns(AppPaymentProviderTO)\n@arguments(app_user=users.User, provider_id=unicode)\ndef get_payment_provider_for_user(app_user, provider_id):\n payment_user = get_payment_user(app_user)\n pp = get_payment_provider(provider_id)\n base_url = get_server_settings().baseUrl\n enabled = payment_user is not None and payment_user.has_provider(pp.id)\n return AppPaymentProviderTO.from_model(base_url, pp, enabled, app_user)\n\n\n@returns([AppPaymentProviderTO])\n@arguments(app_user=users.User)\ndef get_payment_providers_for_user(app_user):\n pps = get_payment_providers()\n payment_user = get_payment_user(app_user)\n tos = []\n base_url = get_server_settings().baseUrl\n for pp in pps:\n enabled = payment_user is not None and payment_user.has_provider(pp.id)\n tos.append(AppPaymentProviderTO.from_model(base_url, pp, enabled, app_user))\n return tos\n\n\n@returns(GetPaymentProfileResponseTO)\n@arguments(app_user=users.User, provider_id=unicode)\ndef get_payment_profile(app_user, provider_id):\n return get_api_module(provider_id).get_payment_profile(app_user)\n\n\n@returns([PaymentProviderAssetTO])\n@arguments(app_user=users.User, provider_id=unicode, save=bool)\ndef get_payment_assets(app_user, provider_id, save=True):\n latest_assets = get_api_module(provider_id).get_payment_assets(app_user) # type: list[PaymentProviderAssetTO]\n payment_user = get_payment_user(app_user)\n db_assets = payment_user.get_assets_by_provider(provider_id)\n to_assets = {asset.id: asset for asset in latest_assets}\n\n if not save:\n assets = []\n for asset_id in to_assets:\n assets.append(PaymentProviderAssetTO.from_model(db_assets.get(asset_id), to_assets.get(asset_id)))\n return assets\n\n for pua in db_assets.values():\n payment_user.assets.remove(pua)\n\n for asset in latest_assets:\n model = asset.to_model()\n if asset.id in db_assets:\n # merge with existing asset\n db_action = db_assets[asset.id].required_action\n if db_action:\n model.required_action = db_action\n payment_user.assets.append(model)\n payment_user.put()\n return [PaymentProviderAssetTO.from_model(model, to_assets.get(model.asset_id)) for model in payment_user.assets]\n\n\n@returns(PaymentProviderAssetTO)\n@arguments(app_user=users.User, provider_id=unicode, asset_id=unicode)\ndef get_payment_asset(app_user, provider_id, asset_id):\n asset = get_api_module(provider_id).get_payment_asset(app_user, asset_id)\n pu = get_payment_user(app_user)\n if not pu:\n return None\n db_assets = pu.get_assets_by_provider(provider_id)\n return PaymentProviderAssetTO.from_model(db_assets.get(asset.id), asset)\n\n\n@returns(PaymentProviderAssetTO)\n@arguments(app_user=users.User, asset=CreatePaymentAssetTO)\ndef create_payment_asset(app_user, asset):\n \"\"\"\n Args:\n app_user (users.User)\n asset (CreatePaymentAssetTO)\n Raises:\n PaymentException: In case an exception occurred\n \"\"\"\n new_asset = get_api_module(asset.provider_id).create_payment_asset(app_user, asset)\n assert isinstance(new_asset, PaymentProviderAssetTO)\n payment_user = get_payment_user(app_user)\n payment_user.assets.append(new_asset.to_model())\n payment_user.put()\n # Sync the new asset to the phone\n updatePaymentAsset(update_payment_asset_response_handler, logError, app_user, request=new_asset)\n return new_asset\n\n\n@returns(TargetInfoTO)\n@arguments(app_user=users.User, provider_id=unicode, target=unicode, currency=unicode)\ndef get_target_info(app_user, provider_id, target, currency):\n target_user = users.User(target)\n\n if '/' not in target:\n target_service_user = create_service_identity_user(target_user)\n else:\n target_service_user = target_user\n target_info_service = _get_target_info_service(target_service_user, provider_id, currency)\n if target_info_service:\n return target_info_service\n\n if ':' not in target:\n target_user = create_app_user_by_email(target, get_app_id_from_app_user(app_user))\n return _get_target_info_user(target_user, provider_id, currency)\n\n\ndef _get_target_info_service(target_user, provider_id, currency):\n ps = get_payment_service(target_user)\n if not ps:\n return None\n if not ps.has_provider(provider_id):\n return None\n\n return get_api_module(provider_id).get_target_info_service(target_user,\n currency,\n ps.get_provider(provider_id).settings)\n\n\ndef _get_target_info_user(target_user, provider_id, currency):\n pu = get_payment_user(target_user)\n if not pu:\n return None\n\n user_profile = get_user_profile(target_user)\n\n to = TargetInfoTO()\n to.name = user_profile.name if user_profile else None\n to.assets = []\n for asset in pu.get_assets_by_provider(provider_id).itervalues():\n if asset.currency == currency:\n asset_to = TargetInfoAssetTO()\n asset_to.id = asset.asset_id\n asset_to.type = asset.type\n to.assets.append(asset_to)\n return to\n\n\n@returns(CreateTransactionResultTO)\n@arguments(app_user=users.User, provider_id=unicode, params=unicode)\ndef create_transaction(app_user, provider_id, params):\n return get_api_module(provider_id).create_transaction(app_user, params)\n\n\n@returns()\n@arguments(app_user=users.User, provider_id=unicode, asset_id=unicode, code=unicode)\ndef verify_payment_asset(app_user, provider_id, asset_id, code):\n return get_api_module(provider_id).verify_payment_asset(app_user, asset_id, code)\n\n\n@returns(tuple)\n@arguments(app_user=users.User, provider_id=unicode, asset_id=unicode, cursor=unicode, type=unicode)\ndef get_payment_transactions(app_user, provider_id, asset_id, cursor, type): # @ReservedAssignment\n if type == u\"confirmed\":\n return get_api_module(provider_id).get_confirmed_transactions(app_user, asset_id, cursor)\n elif type == u\"pending\":\n return get_api_module(provider_id).get_pending_transactions(app_user, asset_id, cursor)\n else:\n logging.error(u\"Called get_payment_transactions with unknown type: '%s'\" % type)\n return [], None\n\n\n@returns(PendingPaymentTO)\n@arguments(app_user=users.User, provider_id=unicode, asset_id=unicode, amount=(int, long), memo=unicode,\n precision=(int, long))\ndef receive_payment(app_user, provider_id, asset_id, amount, memo, precision):\n currency = get_api_module(provider_id).get_payment_asset_currency(app_user, asset_id)\n\n if not currency:\n raise PaymentException(ErrorPaymentTO.CURRENCY_UNKNOWN, get_lang(app_user))\n\n transaction_id = unicode(uuid.uuid4())\n ppr = PaymentPendingReceive(key=PaymentPendingReceive.create_key(transaction_id))\n ppr.timestamp = now()\n ppr.provider_id = provider_id\n ppr.asset_id = asset_id\n ppr.app_user = app_user\n ppr.currency = currency\n ppr.amount = amount\n ppr.memo = memo\n ppr.precision = precision\n ppr.status = PaymentPendingReceive.STATUS_CREATED\n ppr.put()\n\n return PendingPaymentTO.create(ppr.status, transaction_id)\n\n\n@returns()\n@arguments(app_user=users.User, transaction_id=unicode)\ndef cancel_payment(app_user, transaction_id):\n ppr = PaymentPendingReceive.create_key(transaction_id).get()\n _validate_transaction_call(ppr, app_user, validate_started=False)\n\n if ppr.status not in (PaymentPendingReceive.STATUS_CREATED, PaymentPendingReceive.STATUS_SCANNED):\n raise PaymentException(ErrorPaymentTO.TRANSACTION_FINISHED, get_lang(app_user))\n\n if ppr.app_user == app_user:\n ppr.status = PaymentPendingReceive.STATUS_CANCELLED_BY_RECEIVER\n elif ppr.pay_user and ppr.pay_user == app_user:\n ppr.status = PaymentPendingReceive.STATUS_CANCELLED_BY_PAYER\n else:\n raise PaymentException(ErrorPaymentTO.PERMISSION_DENIED, get_lang(app_user))\n ppr.put()\n deferred.defer(send_update_payment_status_request, ppr)\n\n\n@returns(PendingPaymentDetailsTO)\n@arguments(app_user=users.User, transaction_id=unicode)\ndef get_pending_payment_details(app_user, transaction_id):\n ppr = PaymentPendingReceive.create_key(transaction_id).get()\n user_profile = get_user_profile(ppr.app_user)\n payment_provider = _validate_transaction_call(ppr, app_user, validate_started=False)\n provider_module = get_api_module(ppr.provider_id)\n\n status = PaymentPendingReceive.STATUS_SCANNED\n payment_user = get_payment_user(app_user)\n enabled = payment_user is not None and payment_user.has_provider(payment_provider.id)\n base_url = get_server_settings().baseUrl\n provider = AppPaymentProviderTO.from_model(base_url, payment_provider, enabled, app_user)\n\n assets = []\n if payment_user and payment_user.has_provider(ppr.provider_id):\n assets = provider_module.get_payment_assets(app_user, ppr.currency)\n receiver = UserDetailsTO.fromUserProfile(user_profile)\n receiver_asset = provider_module.get_payment_asset(ppr.app_user, ppr.asset_id)\n\n ppr.status = status\n ppr.pay_user = app_user\n if ppr.precision is None:\n ppr.precision = 2\n\n ppr.put()\n\n deferred.defer(send_update_payment_status_request, ppr)\n return PendingPaymentDetailsTO(status, transaction_id, provider, assets, receiver, receiver_asset, ppr.currency,\n ppr.amount, ppr.memo, ppr.timestamp, ppr.precision)\n\n\n@returns(CryptoTransactionTO)\n@arguments(app_user=users.User, transaction_id=unicode, asset_id=unicode)\ndef get_pending_payment_signature_data(app_user, transaction_id, asset_id):\n ppr = PaymentPendingReceive.create_key(transaction_id).get()\n _validate_transaction_call(ppr, app_user)\n\n if not ppr.pay_user or ppr.pay_user != app_user:\n raise PaymentException(ErrorPaymentTO.PERMISSION_DENIED, get_lang(app_user))\n\n if ppr.status not in (PaymentPendingReceive.STATUS_SCANNED,):\n raise PaymentException(ErrorPaymentTO.TRANSACTION_FINISHED, get_lang(app_user))\n try:\n ppr.status = PaymentPendingReceive.STATUS_SIGNATURE\n ppr.pay_asset_id = asset_id\n return get_api_module(ppr.provider_id).get_payment_signature_data(app_user, transaction_id, asset_id,\n ppr.asset_id, ppr.amount, ppr.currency,\n ppr.memo, ppr.precision)\n except PaymentException:\n raise\n except Exception as e:\n logging.exception(e)\n ppr.status = PaymentPendingReceive.STATUS_FAILED\n raise PaymentException(ErrorPaymentTO.UNKNOWN, get_lang(app_user))\n finally:\n ppr.put()\n deferred.defer(send_update_payment_status_request, ppr)\n\n\ndef _validate_transaction_call(pending_payment, app_user, validate_started=True):\n # type: (PaymentPendingReceive, users.User) -> PaymentProvider\n if not pending_payment:\n raise PaymentException(ErrorPaymentTO.TRANSACTION_NOT_FOUND, get_lang(app_user))\n payment_provider = get_payment_provider(pending_payment.provider_id)\n if not payment_provider:\n raise PaymentException(ErrorPaymentTO.PROVIDER_NOT_FOUND, get_lang(app_user))\n if validate_started and not pending_payment.pay_user:\n raise PaymentException(ErrorPaymentTO.TRANSACTION_NOT_INITIATED, get_lang(app_user))\n return payment_provider\n\n\n@returns(PendingPaymentTO)\n@arguments(app_user=users.User, transaction_id=unicode, crypto_transaction=CryptoTransactionTO)\ndef confirm_payment(app_user, transaction_id, crypto_transaction):\n ppr = PaymentPendingReceive.create_key(transaction_id).get()\n _validate_transaction_call(ppr, app_user)\n\n if not ppr.pay_user or ppr.pay_user != app_user:\n raise PaymentException(ErrorPaymentTO.PERMISSION_DENIED, get_lang(app_user))\n\n if ppr.status not in (PaymentPendingReceive.STATUS_SIGNATURE,):\n raise PaymentException(ErrorPaymentTO.TRANSACTION_FINISHED, get_lang(app_user))\n\n try:\n ppr.status = get_api_module(ppr.provider_id).confirm_payment(ppr.pay_user, ppr.app_user, transaction_id,\n ppr.pay_asset_id,\n ppr.asset_id, ppr.amount, ppr.currency, ppr.memo,\n ppr.precision, crypto_transaction)\n if ppr.status == PaymentPendingReceive.STATUS_CONFIRMED:\n deferred.defer(sync_payment_asset, app_user, ppr.provider_id, ppr.pay_asset_id)\n deferred.defer(sync_payment_asset, ppr.app_user, ppr.provider_id, ppr.asset_id)\n return PendingPaymentTO.create(ppr.status, transaction_id)\n except Exception as e:\n logging.exception(e)\n ppr.status = PaymentPendingReceive.STATUS_FAILED\n raise PaymentException(ErrorPaymentTO.UNKNOWN, get_lang(app_user))\n finally:\n ppr.put()\n deferred.defer(send_update_payment_status_request, ppr)\n\n\ndef get_and_update_payment_database_info(app_user):\n pps = get_payment_providers()\n payment_user = get_payment_user(app_user)\n\n added_providers = {}\n\n provider_list = []\n asset_list = []\n\n if not payment_user:\n payment_user_key = get_payment_user_key(app_user)\n payment_user = PaymentUser(key=payment_user_key)\n payment_user.providers = []\n payment_user.assets = []\n payment_user.put()\n\n for pp in pps:\n pp_enabled = payment_user.has_provider(pp.id)\n try:\n assets = get_payment_assets(app_user, pp.id, False)\n if not assets:\n continue\n\n provider_list.append(pp)\n\n if not pp_enabled and pp.id not in added_providers:\n payment_user.providers.append(PaymentUserProvider(provider_id=pp.id, token=None))\n added_providers[pp.id] = []\n\n elif pp.id not in added_providers:\n db_assets = payment_user.get_assets_by_provider(pp.id)\n for pua in db_assets.values():\n payment_user.assets.remove(pua)\n\n added_providers[pp.id] = []\n\n for asset in assets:\n asset_enabled = payment_user.has_asset(pp.id, asset.id)\n if not asset_enabled:\n if pp.id not in added_providers:\n added_providers[pp.id] = []\n\n if asset.id not in added_providers[pp.id]:\n added_providers[pp.id].append(asset.id)\n payment_user.assets.append(asset.to_model())\n\n asset_list.extend(assets)\n except:\n logging.exception(\"Failed to sync_app_database for provider '%s'\", pp.id)\n\n if added_providers:\n payment_user.put()\n\n return provider_list, asset_list\n\n\n@returns()\n@arguments(app_user=users.User)\ndef sync_payment_database(app_user):\n provider_list, asset_list = get_and_update_payment_database_info(app_user)\n base_url = get_server_settings().baseUrl\n\n request = UpdatePaymentProvidersRequestTO()\n request.provider_ids = [] # empty array clears all providers\n request.payment_providers = [UpdatePaymentProviderRequestTO.from_model(base_url, pp, True, app_user) for pp\n in provider_list]\n updatePaymentProviders(update_payment_providers_response_handler, logError, app_user, request=request)\n\n request = UpdatePaymentAssetsRequestTO()\n request.provider_ids = [] # empty array clears all assets\n request.assets = asset_list\n updatePaymentAssets(update_payment_assets_response_handler, logError, app_user, request=request)\n\n\n@returns()\n@arguments(app_user=users.User, provider_id=unicode, save=bool)\ndef sync_payment_assets(app_user, provider_id, save=False):\n \"\"\"\n Args:\n app_user (users.User)\n provider_id (unicode)\n save (bool)\n \"\"\"\n logging.debug('Syncing payment assets for user %s and provider %s', app_user.email(), provider_id)\n request = UpdatePaymentAssetsRequestTO()\n request.provider_ids = [provider_id] # Updates all assets of this provider_id\n request.assets = get_payment_assets(app_user, provider_id, save)\n updatePaymentAssets(update_payment_assets_response_handler, logError, app_user, request=request)\n\n\n@returns()\n@arguments(app_user=users.User, provider_id=unicode, asset_id=unicode)\ndef sync_payment_asset(app_user, provider_id, asset_id):\n request = get_payment_asset(app_user, provider_id, asset_id)\n if not request:\n return\n updatePaymentAsset(update_payment_asset_response_handler, logError, app_user, request=request)\n\n\n@returns()\n@arguments(provider_id=unicode)\ndef send_update_payment_provider_request_to_users(provider_id):\n base_url = get_server_settings().baseUrl\n pp = get_payment_provider(provider_id)\n run_job(send_update_payment_provider_request_query, [provider_id], send_update_payment_provider_request_worker,\n [base_url, pp])\n\n\ndef send_update_payment_provider_request_query(provider_id):\n return PaymentUser.list_by_provider_id(provider_id)\n\n\ndef send_update_payment_provider_request_worker(payment_user_key, base_url, pp):\n payment_user = payment_user_key.get()\n enabled = payment_user.has_provider(pp.id)\n request = UpdatePaymentProviderRequestTO.from_model(base_url, pp, enabled, payment_user.user)\n updatePaymentProvider(update_payment_provider_response_handler, logError, payment_user.user, request=request)\n\n\n@returns()\n@arguments(ppr=PaymentPendingReceive)\ndef send_update_payment_status_request(ppr):\n send_update_payment_status_request_to_user(ppr.app_user, ppr.transaction_id, ppr.status)\n if ppr.pay_user:\n send_update_payment_status_request_to_user(ppr.pay_user, ppr.transaction_id, ppr.status)\n\n\n@returns()\n@arguments(app_user=users.User, transaction_id=unicode, status=unicode)\ndef send_update_payment_status_request_to_user(app_user, transaction_id, status):\n request = UpdatePaymentStatusRequestTO.create(status, transaction_id)\n updatePaymentStatus(update_payment_status_response_handler, logError, app_user, request=request)\n\n\n@returns()\n@arguments(service_identity_user=users.User, provider_id=unicode, settings=dict, test_mode=bool)\ndef service_put_provider(service_identity_user, provider_id, settings, test_mode=False):\n psp = PaymentServiceProvider()\n psp.provider_id = provider_id\n psp.settings = settings\n\n ps_key = get_payment_service_key(service_identity_user)\n\n def trans():\n ps = ps_key.get()\n if not ps:\n ps = PaymentService(key=ps_key)\n ps.remove_provider(provider_id, test_mode)\n ps.add_provider(psp, test_mode)\n ps.put()\n\n ndb.transaction(trans)\n\n\n@returns(dict)\n@arguments(service_identity_user=users.User, provider_id=unicode, test_mode=bool)\ndef service_get_provider(service_identity_user, provider_id, test_mode=False):\n ps = get_payment_service(service_identity_user)\n if not ps:\n return None\n provider = ps.get_provider(provider_id, test_mode)\n if provider:\n return provider.settings\n return None\n\n\n@returns()\n@arguments(service_identity_user=users.User, provider_id=unicode, test_mode=bool)\ndef service_delete_provider(service_identity_user, provider_id, test_mode=False):\n def trans():\n ps = get_payment_service(service_identity_user)\n if ps and ps.remove_provider(provider_id, test_mode):\n ps.put()\n\n ndb.transaction(trans)\n\n\n@returns([unicode])\n@arguments(service_identity_user=users.User, test_mode=bool)\ndef service_get_providers(service_identity_user, test_mode=False):\n ps = get_payment_service(service_identity_user)\n if not ps:\n return []\n l = []\n for psp in ps.get_providers(test_mode):\n l.append(psp.provider_id)\n return l\n","sub_path":"src/rogerthat/bizz/payment/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":27522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"102003445","text":"#!/usr/bin/env python3\nimport copy\nfrom .checker import Checker, Context, BugReport\nfrom ..lib import rank_utils, config\nfrom ..parse.explorer import is_call, is_lock, is_unlock, match_call, CallType\nfrom ..parse.symbol import IDSymbol\n\nclass ThreadSafetyContext(Context):\n def get_bugs(self):\n bugs = []\n for key, value in self.ctx_uses.items():\n total = self.total_uses[key]\n diff = copy.copy(total)\n scores = {}\n for ctx, codes in value.items():\n score = len(codes) / len(total)\n if score >= config.THRESHOLD and ctx and score != 1:\n diff = diff - codes\n for bug in diff:\n scores[bug] = score\n\n if len(diff) != len(total):\n added = set()\n for bug in diff:\n if bug in added:\n continue\n added.add(bug)\n br = BugReport(scores[bug], bug, key, ctx, total - diff)\n bugs.append(br)\n return bugs\n\n\nclass ThreadSafetyChecker(Checker):\n parse_constraints = False\n\n def _initialize_process(self):\n self.context = ThreadSafetyContext()\n\n def _process_path(self, path):\n mutex = False\n for node in path:\n m = match_call(node)\n if m is not None:\n call_name = node.event.call_name\n code = node.event.code\n if m == CallType.LOCK:\n mutex = True\n elif m == CallType.UNLOCK:\n mutex = False\n else: # normal call\n self.context.add(call_name, mutex, code)\n\n def _finalize_process(self):\n return self.context\n\n def merge(self, ctxs):\n if not ctxs:\n return None\n ctx = ctxs[0]\n for i in range(1, len(ctxs)):\n ctx.merge(ctxs[i])\n return self.rank(ctx.get_bugs())\n\n def rank(self, reports):\n return sorted(reports, key=lambda k: k.score, reverse=True)\n","sub_path":"analyzer/apisan/check/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"645576298","text":"\"\"\"\n 2.1 Sauvegarde d'images\n\"\"\"\n\ndef dim(img):\n n = len(img)\n p = len(img[0])\n return (n,p)\n\ndef rectangle_noir(n,p):\n img = []\n for i in range(n):\n img.append([0]*p)\n return img\n\ndef sauve_image(img, N, f):\n with open(f,\"w\") as fo:\n fo.write(\"P2\\n\")\n fo.write(\"{} {}\\n\".format(dim(img)[1],dim(img)[0]))\n fo.write(\"{}\\n\".format(N))\n for i in range(dim(img)[0]):\n for j in range(dim(img)[1]):\n fo.write(\"{}\\n\".format(img[i][j]))\n return None\n\ndef sauve_rectangle_noir(n,p,N,f):\n sauve_image(rectangle_noir(n,p),N,f)\n return None\n\ndef rectangle_blanc(n,p,N):\n img = []\n for i in range(n):\n img.append([N]*p)\n return img\n\ndef sauve_rectangle_blanc(n,p,N,f):\n sauve_image(rectangle_blanc(n,p,N),N,f)\n return None\n\ndef xor(a,b):\n return ((not a) and b) or (a and (not b))\n\ndef echiquier(p,N):\n img = []\n tab_temp = []\n for i in range(p):\n for j in range(p):\n if (i // (p // 8)) % 2 == 0:\n a = False\n else:\n a = True\n if (j // (p // 8)) % 2 == 0:\n b = False\n else:\n b = True\n if xor(a,b):\n tab_temp.append(0)\n else:\n tab_temp.append(N)\n img.append(tab_temp)\n return img\n\ndef sauve_echiquier(p,N,f):\n sauve_image(echiquier(p,N),N,f)\n return None\n","sub_path":"info-ancien/TP07/tp07_rennesson_hyenne.py","file_name":"tp07_rennesson_hyenne.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"148572570","text":"'''\nName: Paul Talaga\nDate: 2-11-2020\nDesc: Piano player\n AAAFCAFCA EEEFCA#FCAFCA\n\n'''\n\nimport socket\nimport pickle\nfrom tkinter import *\n\nmaster = Tk()\n\nport_number = 5555\nserver_ip = '127.0.0.1' # put destination address here\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ndef keyToFreq(key):\n # https://en.wikipedia.org/wiki/Piano_key_frequencies\n return 2.0 ** ((key - 49) / 12.0) * 440\n\ndef playSound(freq, dur):\n message = pickle.dumps( (freq,dur) )\n s.sendto( message, (server_ip, port_number))\n print(\"{} {} sent\".format(freq,dur))\n\ndef wasClicked(event):\n playSound(event.widget.secretValue, 100)\n\nkeys = [1,3,5,6,8,10,12] # need 13 to finish\n\nfor n in keys:\n button = Button(master,text=str(n))\n button.grid(column=n, row= 0) \n button.bind('\",\n \"style\": \"\",\n \"js\": \"document.getElementById(\\\\\\\"button_@@name@@\\\\\\\").ontouchstart = function() {\\n\" +\n \" connection.send(\\\\\\\"@@name@@down\\\\\\\");\\n\" +\n \"};\\n\" +\n \"document.getElementById(\\\\\\\"button_@@name@@\\\\\\\").ontouchend = function() {\\n\" +\n \" connection.send(\\\\\\\"@@name@@up\\\\\\\");\\n\" +\n \"};\\n\",\n \"event\": \"if(!strcmp(\\\"@@name@@down\\\", (char *) payload)){\\n\" +\n \" button_@@name@@ = 1;\\n\" +\n \"}\\n\" +\n \"if(!strcmp(\\\"@@name@@up\\\", (char *) payload)){\\n\" +\n \" button_@@name@@ = 0;\\n\" +\n \"}\\n\"\n }\n }\n }\n\n self.addInterface(\"outInt\", OutIntPort(self, \"outInt\", \"button_@@name@@\"))\n CodeComponent.define(self, **kwargs)\n\n\n def assemble(self):\n CodeComponent.assemble(self)\n\n\n\n\n","sub_path":"src/interface/ppr/svggen/library/UIButton.py","file_name":"UIButton.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"165782083","text":"# 7562 백준\r\n\r\nfrom collections import deque\r\n\r\n# 넓이 우선 탐색 알고리즘\r\ndef BFS():\r\n queue = deque() # 큐 생성\r\n queue.append([x1, y1]) # 큐에 시작 나이트가 현재 있는 칸을 넣음\r\n\r\n while queue:\r\n x, y = queue.popleft() # 큐에서 꺼낸 값을 x, y에 대입\r\n\r\n # 나이트가 이동한 위치에 도착하면 이동한 수를 출력\r\n if x == x2 and y == y2:\r\n print(visited[x][y])\r\n return\r\n \r\n # 나이트의 위치를 이동함\r\n for i in range(8):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n # 나이트가 이동하는 거리가 범위에서 벗어나지 않아야 함\r\n if 0 <= nx < chess and 0 <= ny < chess:\r\n # 그리고 방문하지 않은 곳이어야 함\r\n if visited[nx][ny] == 0:\r\n visited[nx][ny] = visited[x][y] + 1\r\n queue.append([nx, ny])\r\n \r\n\r\nT = int(input()) # 테스트 케이스의 개수 입력\r\n\r\n# 나이트가 한 번에 이동할 수 있는 거리\r\ndx = [1, 1, 2, 2, -1, -1, -2, -2]\r\ndy = [2, -2, 1, -1, 2, -2, 1, -1]\r\n\r\nfor i in range(T):\r\n chess = int(input()) # 체스판의 한 변의 길이\r\n x1, y1 = map(int, input().split()) # x1, y1 : 나이트가 있는 칸 \r\n x2, y2 = map(int, input().split()) # x2, y2 : 나이트가 이동하려고 하는 칸\r\n\r\n visited = [[0] * chess for _ in range(chess)]\r\n\r\n if x1 == x2 and y1 == y2:\r\n print(0)\r\n else:\r\n BFS()\r\n","sub_path":"BFS/나이트의 이동.py","file_name":"나이트의 이동.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"408043728","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport json\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\nimport numpy as np\nimport math\n\nfrom scipy.stats import ks_2samp\nfrom statsmodels.graphics.gofplots import qqplot\n#import matplotlib.pyplot as plt\nimport plotly.graph_objs as go\nimport plotly.figure_factory as ff\n\nfrom evidently.model.widget import BaseWidgetInfo, AlertStats, AdditionalGraphInfo\nfrom evidently.widgets.widget import Widget\n\nred = \"#ed0400\"\ngrey = \"#4d4d4d\"\n\n\nclass RegProdQualityMetricsWidget(Widget):\n def __init__(self, title: str):\n super().__init__()\n self.title = title\n\n def get_info(self) -> BaseWidgetInfo:\n #if self.wi:\n return self.wi\n #raise ValueError(\"No reference data with target and prediction provided\")\n\n def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping): \n if column_mapping:\n date_column = column_mapping.get('datetime')\n id_column = column_mapping.get('id')\n target_column = column_mapping.get('target')\n prediction_column = column_mapping.get('prediction')\n num_feature_names = column_mapping.get('numerical_features')\n if num_feature_names is None:\n num_feature_names = []\n else:\n num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])] \n\n cat_feature_names = column_mapping.get('categorical_features')\n if cat_feature_names is None:\n cat_feature_names = []\n else:\n cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])] \n \n else:\n date_column = 'datetime' if 'datetime' in reference_data.columns else None\n id_column = None\n target_column = 'target' if 'target' in reference_data.columns else None\n prediction_column = 'prediction' if 'prediction' in reference_data.columns else None\n\n utility_columns = [date_column, id_column, target_column, prediction_column]\n\n num_feature_names = list(set(reference_data.select_dtypes([np.number]).columns) - set(utility_columns))\n cat_feature_names = list(set(reference_data.select_dtypes([np.object]).columns) - set(utility_columns))\n\n if production_data is not None:\n if target_column is not None and prediction_column is not None:\n production_data.replace([np.inf, -np.inf], np.nan, inplace=True)\n production_data.dropna(axis=0, how='any', inplace=True)\n \n #calculate quality metrics\n abs_err = list(map(lambda x : abs(x[0] - x[1]), \n zip(production_data[prediction_column], production_data[target_column])))\n mae = np.mean(abs_err)\n sdae = np.std(abs_err, ddof = 1)\n\n abs_perc_err = list(map(lambda x : 100*abs(x[0] - x[1])/x[0], \n zip(production_data[prediction_column], production_data[target_column])))\n mape = np.mean(abs_perc_err)\n sdape = np.std(abs_perc_err, ddof = 1)\n\n sqrt_err = list(map(lambda x : (x[0] - x[1])**2, \n zip(production_data[prediction_column], production_data[target_column])))\n mse = np.mean(sqrt_err)\n sdse = np.std(sqrt_err, ddof = 1)\n\n #error_norm_json = json.loads(error_norm.to_json())\n\n self.wi = BaseWidgetInfo(\n title=\"Production Quality\",\n type=\"counter\",\n details=\"\",\n alertStats=AlertStats(),\n alerts=[],\n alertsPosition=\"row\",\n insights=[],\n size=2,\n params={ \n \"counters\": [\n {\n \"value\": str(round(mae, 2)) + \" (\" + str(round(sdae,2)) + \")\",\n \"label\": \"MAE\"\n },\n {\n \"value\": str(round(mape, 2)) + \" (\" + str(round(sdape, 2)) + \")\",\n \"label\": \"MAPE\"\n },\n {\n \"value\": str(round(mse, 2)) + \" (\" + str(round(sdse, 2)) + \")\",\n \"label\": \"MSE\"\n }\n ]\n },\n additionalGraphs=[],\n )\n else:\n self.wi = None\n else:\n self.wi = None\n\n","sub_path":"evidently/widgets/reg_prod_quality_metrics_widget.py","file_name":"reg_prod_quality_metrics_widget.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"425458169","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\n 136. 只出现一次的数字\n https://leetcode-cn.com/problems/single-number/\n 给定一个非空整数数组,除了某个元素只出现一次以外,其余每个元素均出现两次。\n 找出那个只出现了一次的元素。\n \"\"\"\n def singleNumber(self, nums: List[int]) -> int:\n res = 0\n for num in nums:\n res ^= num\n\n return res\n\n\nso = Solution()\nprint(so.singleNumber([4,1,2,1,2]))\n","sub_path":"arr.single-number.py","file_name":"arr.single-number.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"544285950","text":"import numpy as np\nfrom stl import mesh\n\n\ndef get_mid(pt1: np.array, pt2:np.array) -> np.array:\n\n\n return (pt1+pt2)/2\n\n\ndef get_vertices():\n \n bottom_tri = np.array([[0, 0, 0],\n [1, 0, 0],\n [0.5, np.sqrt(3)/2, 0]])\n\n top_tri = np.array([[0, 0, 1],\n [1, 0, 1],\n [0.5, np.sqrt(3)/2, 1]])\n barycenter = np.sum(top_tri, axis=0) / 3\n mid1 = get_mid(top_tri[0], top_tri[1])\n mid2 = get_mid(top_tri[1], top_tri[2])\n mid3 = get_mid(top_tri[2], top_tri[0])\n\n\n vertex_array = np.vstack([bottom_tri, top_tri, barycenter, mid1, mid2, mid3])\n \n for i in range(vertex_array.shape[0]):\n print(f'{i}\\t{vertex_array[i]}')\n # print(f'vertices: {vertices}')\n # print(f'vertices shape: {vertices.shape}')\n \n return vertex_array\n\n\ndef make_stl(vertex_array: np.array, shape, how: str='one'):\n \n if how == 'one':\n # create mesh and then STL file\n simplex = mesh.Mesh(np.zeros(shape.shape[0], dtype=mesh.Mesh.dtype))\n for i, f in enumerate(shape):\n for j in range(3):\n simplex.vectors[i][j] = vertex_array[f[j],:]\n\n simplex.save('barycentric.stl')\n\n elif how =='many':\n for k, tetra in enumerate(shape):\n # create mesh and then STL file\n stl_tetra = mesh.Mesh(np.zeros(tetra.shape[0], dtype=mesh.Mesh.dtype))\n for i, f in enumerate(tetra):\n for j in range(3):\n stl_tetra.vectors[i][j] = vertex_array[f[j],:]\n\n stl_tetra.save(f'tetra{k}.stl')\n\n\n\n\nif __name__ == '__main__':\n \n vertices = get_vertices()\n\n # we should have 10 total tetrahedra\n tetra0 = np.array([[3,6,7],\n [0,3,7],\n [0,3,6],\n [0,6,7]], dtype=int)\n tetra1 = np.array([[3,9,6],\n [0,3,9],\n [0,9,6],\n [0,3,6]], dtype=int)\n tetra2 = np.array([[5,6,9],\n [2,5,9],\n [2,6,9],\n [2,5,6]], dtype=int)\n tetra3 = np.array([[5,6,8],\n [2,5,6],\n [2,6,8],\n [2,5,8]], dtype=int)\n tetra4 = np.array([[4,6,8],\n [1,4,6],\n [1,6,8],\n [1,4,8]], dtype=int)\n tetra5 = np.array([[4,7,6],\n [1,4,7],\n [1,7,6],\n [1,4,6]], dtype=int)\n # weird side tetrahedra\n tetra6 = np.array([[0,1,7],\n [0,7,6],\n [1,7,6],\n [0,1,6]], dtype=int)\n tetra7 = np.array([[1,2,8],\n [2,6,8],\n [1,6,8],\n [1,2,6]], dtype=int)\n tetra8 = np.array([[0,2,9],\n [0,9,6],\n [2,9,6],\n [0,2,6]], dtype=int)\n # bottom tetrahedron\n tetra9 = np.array([[0,1,2],\n [0,1,6],\n [0,2,6],\n [1,2,6]], dtype=int)\n\n # combine all the tetrahedra to form triangular cylinder\n all_tetra = [tetra0, tetra1, tetra2, tetra3, tetra4, tetra5, tetra6, tetra7, tetra8, tetra9]\n total_tetra = np.vstack(all_tetra)\n\n # create mesh and then STL file\n make_stl(vertices, all_tetra, how='many')\n make_stl(vertices, total_tetra, how='one')\n\n","sub_path":"scripts/barycentric.py","file_name":"barycentric.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"653351649","text":"from secu.models import Role, Right, RoleRight\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom tools.ui.pager import get_record_index\nfrom django.core.urlresolvers import reverse\nfrom attendence.decrator import *\n\nclass _Role:\n @valid_user\n def index(request):\n return render(request, 'secu/role.html')\n\n @staticmethod\n @valid_user \n def list(request):\n if request.method == 'GET':\n record = get_record_index(request)\n total_records = Role.objects.all().count()\n li = Role.objects.all()[record['start']:record['end']]\n\n data = [\n {\n \"id\": item.id, \n \"name\": item.name, \n \"detail\": item.detail,\n \"url\": reverse('secu:role_right_list', kwargs={'role_id':item.id})\n } \n for item in li\n ]\n\n context = {\n \"totalrecords\": total_records,\n \"data\": data\n }\n return JsonResponse(context, safe=False)\n\n @staticmethod\n @valid_user \n def single(request):\n if request.method == 'GET':\n instance = Role.objects.get(id=request.GET['role_id'])\n\n data = {\n \"id\": instance.id,\n \"name\": instance.name,\n \"detail\": instance.detail\n }\n\n context = {\n \"code\": 50000,\n \"data\": data\n }\n return JsonResponse(context, safe=False) \n\n @valid_user \n def get_right_list_by_role(request, role_id):\n if request.method == 'GET':\n role = Role.objects.get(id=role_id)\n record = get_record_index(request)\n\n role_right_list = [item.right.id for item in RoleRight.objects.filter(role=role)]\n\n total_records = Right.objects.all().count()\n li = Right.objects.all()[record['start']:record['end']]\n\n data = [\n {\n \"id\": item.id, \n \"name\": item.name, \n \"detail\": item.detail,\n \"own\": 'checked' if item.id in role_right_list else '',\n } \n for item in li\n ]\n\n context = {\n \"totalrecords\": total_records,\n \"data\": data\n }\n return JsonResponse(context, safe=False) \n \n @staticmethod \n @valid_user\n def add_delete_right_to_role(request):\n if request.method == 'POST':\n role = Role.objects.get(id=request.POST['role_id'])\n right = Right.objects.get(id=request.POST['right_id'])\n operation = request.POST['operation']\n if operation == 'add':\n RoleRight.objects.create(role=role, right=right)\n elif operation == 'delete':\n RoleRight.objects.filter(role=role, right=right).delete()\n return HttpResponse(50000)\n\n @valid_user \n def add(request):\n if request.method == 'POST':\n role = Role()\n role.name = request.POST['name']\n role.detail = request.POST['detail']\n role.save()\n return HttpResponse(50000)\n def delete(request):\n if request.method == 'POST':\n role = Role.objects.get(id=request.POST['role_id'])\n UserRole.objects.filter(role=role).delete()\n RoleRight.objects.filter(role=role).delete()\n role.delete()\n return HttpResponse(50000)\n\n def edit(request):\n if request.method == 'POST':\n role = Role.objects.get(id=request.POST['role_id'])\n role.name = request.POST['name']\n role.detail = request.POST['detail']\n role.save()\n return HttpResponse(50000) \n\n","sub_path":"secu/views/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"450416962","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\n__all__ = [\n \"Sum\", \"Product\", \"Kernel\",\n \"ConstantKernel\", \"WhiteKernel\", \"DotProductKernel\",\n \"RadialKernel\", \"ExpKernel\", \"ExpSquaredKernel\", \"RBFKernel\",\n \"CosineKernel\", \"ExpSine2Kernel\",\n \"Matern32Kernel\", \"Matern52Kernel\",\n]\n\nimport numpy as np\nfrom scipy.linalg import cho_factor, cho_solve\n\n\nclass Kernel(object):\n \"\"\"\n The abstract kernel type. Every kernel implemented in George should be\n a subclass of this object.\n\n :param pars:\n The hyper-parameters of the kernel.\n\n :param ndim: (optional)\n The number of input dimensions of the kernel. (default: ``1``)\n\n \"\"\"\n\n is_kernel = True\n kernel_type = -1\n\n # This function deals with weird behavior when performing arithmetic\n # operations with numpy scalars.\n def __array_wrap__(self, array, context=None):\n if context is None:\n raise TypeError(\"Invalid operation\")\n ufunc, args, _ = context\n if ufunc.__name__ == \"multiply\":\n return float(args[0]) * args[1]\n elif ufunc.__name__ == \"add\":\n return float(args[0]) + args[1]\n raise TypeError(\"Invalid operation\")\n __array_priority__ = np.inf\n\n def __init__(self, *pars, **kwargs):\n self.ndim = kwargs.get(\"ndim\", 1)\n self.pars = np.array(pars)\n\n def __repr__(self):\n return \"{0}({1})\".format(self.__class__.__name__,\n \", \".join(map(\"{0}\".format,\n self.pars) +\n [\"ndim={0}\".format(self.ndim)]))\n\n def lnprior(self):\n return 0.0\n\n @property\n def vector(self):\n # return self.pars\n return np.log(self.pars)\n\n @vector.setter\n def vector(self, v):\n # self.pars = v\n self.pars = np.exp(v)\n\n @property\n def pars(self):\n \"\"\"\n A NumPy array listing the vector of hyperparameters specifying the\n kernel.\n\n \"\"\"\n return self._pars\n\n @pars.setter\n def pars(self, v):\n self._pars = np.array(v)\n self.set_pars(v)\n self.dirty = True\n\n def __getitem__(self, i):\n return self.vector[i]\n\n def __setitem__(self, i, v):\n vec = self.vector\n vec[i] = v\n self.vector = vec\n\n def set_pars(self, pars):\n pass\n\n def __len__(self):\n return len(self.vector)\n\n def __add__(self, b):\n if not hasattr(b, \"is_kernel\"):\n return Sum(ConstantKernel(np.sqrt(np.abs(float(b))),\n ndim=self.ndim), self)\n return Sum(self, b)\n\n def __radd__(self, b):\n return self.__add__(b)\n\n def __mul__(self, b):\n if not hasattr(b, \"is_kernel\"):\n return Product(ConstantKernel(np.sqrt(np.abs(float(b))),\n ndim=self.ndim), self)\n return Product(self, b)\n\n def __rmul__(self, b):\n return self.__mul__(b)\n\n def __call__(self, x1, x2):\n \"\"\"\n The value of the kernel evaluated at a specific pair of coordinates.\n\n \"\"\"\n raise NotImplementedError(\"Must be implemented by subclasses\")\n\n def grad(self, x1, x2):\n \"\"\"\n The kernel gradient evaluated at a specific pair of coordinates. The\n order of the parameters is the same as the ``.vector`` property.\n\n \"\"\"\n raise NotImplementedError(\"Must be implemented by subclasses\")\n\n\nclass _operator(Kernel):\n is_kernel = False\n operator_type = -1\n\n def __init__(self, k1, k2):\n if k1.ndim != k2.ndim:\n raise ValueError(\"Dimension mismatch\")\n self.k1 = k1\n self.k2 = k2\n self.ndim = k1.ndim\n self.dirty = True\n\n @property\n def pars(self):\n return np.append(self.k1.pars, self.k2.pars)\n\n @pars.setter\n def pars(self, v):\n self.dirty = True\n i = len(self.k1)\n self.k1.pars = v[:i]\n self.k2.pars = v[i:]\n\n @property\n def vector(self):\n return np.append(self.k1.vector, self.k2.vector)\n\n @vector.setter\n def vector(self, v):\n self.dirty = True\n i = len(self.k1)\n self.k1.vector = v[:i]\n self.k2.vector = v[i:]\n\n def __getitem__(self, i):\n return self.vector[i]\n\n def __setitem__(self, i, v):\n p = self.vector\n p[i] = v\n self.vector = p\n\n\nclass Sum(_operator):\n is_kernel = False\n operator_type = 0\n\n def __repr__(self):\n return \"{0} + {1}\".format(self.k1, self.k2)\n\n def __call__(self, x1, x2):\n return self.k1(x1, x2) + self.k2(x1, x2)\n\n def grad(self, x1, x2):\n g1 = self.k1.grad(x1, x2)\n g2 = self.k2.grad(x1, x2)\n return np.concatenate((g1, g2), axis=0)\n\n\nclass Product(_operator):\n is_kernel = False\n operator_type = 1\n\n def __repr__(self):\n return \"{0} * {1}\".format(self.k1, self.k2)\n\n def __call__(self, x1, x2):\n return self.k1(x1, x2) * self.k2(x1, x2)\n\n def grad(self, x1, x2):\n g1 = self.k1.grad(x1, x2)\n g2 = self.k2.grad(x1, x2)\n s = [None] + [slice(None)] * (len(g1.shape) - 1)\n return np.concatenate((g1 * self.k2(x1, x2)[s],\n g2 * self.k1(x1, x2)[s]), axis=0)\n\n\nclass ConstantKernel(Kernel):\n r\"\"\"\n This kernel returns the constant\n\n .. math::\n\n k(\\mathbf{x}_i,\\,\\mathbf{x}_j) = c^2\n\n where :math:`c` is a parameter.\n\n :param value:\n The constant value :math:`c` in the above equation.\n\n \"\"\"\n kernel_type = 0\n\n def __init__(self, value, ndim=1):\n super(ConstantKernel, self).__init__(value, ndim=ndim)\n\n def __call__(self, x1, x2):\n return self.pars[0] ** 2 + np.zeros((len(x1), len(x2)))\n\n def grad(self, x1, x2):\n return 2 * self.pars[0] ** 2 + np.zeros((1, len(x1), len(x2)))\n\n\nclass WhiteKernel(Kernel):\n r\"\"\"\n This kernel returns constant along the diagonal.\n\n .. math::\n\n k(\\mathbf{x}_i,\\,\\mathbf{x}_j) = c^2 \\, \\delta_{ij}\n\n where :math:`c` is the parameter.\n\n :param value:\n The constant value :math:`c` in the above equation.\n\n \"\"\"\n kernel_type = 8\n\n def __init__(self, value, ndim=1):\n super(WhiteKernel, self).__init__(value, ndim=ndim)\n\n def __call__(self, x1, x2):\n d = np.sum((x1[:, None] - x2[None, :]) ** 2, axis=-1)\n return self.pars[0] ** 2 * (d == 0.0)\n\n def grad(self, x1, x2):\n d = np.sum((x1[:, None] - x2[None, :]) ** 2, axis=-1)\n g = np.zeros(np.append(1, d.shape))\n g[0] = 2 * self.pars[0] ** 2 * (d == 0)\n return g\n\n\nclass DotProductKernel(Kernel):\n r\"\"\"\n The dot-product kernel takes the form\n\n .. math::\n\n k(\\mathbf{x}_i,\\,\\mathbf{x}_j) = \\mathbf{x}_i^{\\mathrm{T}} \\cdot\n \\mathbf{x}_j\n\n \"\"\"\n kernel_type = 1\n\n def __init__(self, ndim=1):\n super(DotProductKernel, self).__init__(ndim=ndim)\n\n def __call__(self, x1, x2):\n return np.sum(x1[:, None] * x2[None, :], axis=-1)\n\n def grad(self, x1, x2):\n return np.zeros(np.append(1, self(x1, x2).shape))\n\n\nclass RadialKernel(Kernel):\n r\"\"\"\n This abstract base class implements a radial kernel in an arbitrary\n metric. The metric is specified as a matrix :math:`C` where the\n radius :math:`{r_{ij}}^2` is\n\n .. math::\n\n {r_{ij}}^2 = (\\mathbf{x}_i - \\mathbf{x}_j)^\\mathrm{T}\\,\n C^{-1}\\,(\\mathbf{x}_i - \\mathbf{x}_j)\n\n :param metric:\n There are a few different ways that you can specify the metric:\n\n 1. if ``metric`` is a scalar, the metric is assumed isotropic with an\n axis-aligned variance of ``metric`` in each dimension,\n 2. if ``metric`` has the same length as ``ndim``, it is assumed to\n specify the axis-aligned variances in each dimension, and\n 3. if ``metric`` has the length ``(ndim*ndim + ndim) / 2``, it is\n assumed to give the triangular matrix :math:`C` in the form\n specified below.\n\n :param isotropic: (optional)\n Is the metric isotropic? If not given, this is inferred from the\n metric.\n\n :param axis_aligned: (optional)\n Is the metric axis-aligned? If not given, this is inferred from the\n metric.\n\n However you specify ``metric``, the ``pars`` property of the kernel will\n be a NumPy array with elements:\n\n .. code-block:: python\n\n kernel.pars = [a, b, c, d, e, f, ...]\n\n where\n\n .. math::\n\n C = \\left ( \\begin{array}{cccc}\n a & b & d & \\\\\n b & c & e & \\cdots \\\\\n d & e & f & \\\\\n & \\vdots & & \\ddots\n \\end{array} \\right )\n\n **Note:**\n Subclasses should implement the :func:`get_value` method to give\n the value of the kernel at a given radius and this class will deal with\n the metric.\n\n **Another Note:**\n 1-D is treated as a special case for speed.\n\n \"\"\"\n\n def __init__(self, metric, ndim=1, extra=[], axis_aligned=None,\n isotropic=None):\n # Special case 1-D for speed.\n if ndim == 1:\n self.nextra = len(extra)\n super(RadialKernel, self).__init__(*(np.append(extra, metric)),\n ndim=1)\n else:\n # Save the meta-data about the kernel parameters.\n self.axis_aligned = axis_aligned\n self.isotropic = isotropic\n self.nextra = len(extra)\n self.ndim = ndim\n pars = self._coerce_metric(metric)\n super(RadialKernel, self).__init__(*(np.append(extra, pars)),\n ndim=ndim)\n\n # Build the gradient indicator masks.\n self.gm = np.empty(np.append(len(self)-self.nextra,\n self.matrix.shape),\n dtype=int)\n for i in range(len(self) - self.nextra):\n ind = np.zeros(len(self) - self.nextra, dtype=int)\n ind[i] = 1\n self.gm[i] = self._build_matrix(self._coerce_metric(ind))\n\n def _coerce_metric(self, metric):\n # Coerce the input metric into the form required by the C++ solver.\n inds = np.tri(self.ndim, dtype=bool)\n if np.isscalar(metric) or len(metric) == 1:\n # A scalar metric gives an isotropic metric.\n pars = np.diag(float(metric) * np.ones(self.ndim))[inds]\n\n # Update the isotropic flag if it wasn't specified.\n if self.isotropic is None:\n self.isotropic = True\n else:\n # If we asked for an isotropic kernel but the given metric\n # isn't a scalar then something went wrong.\n if self.isotropic:\n raise ValueError(\"Isotropic kernels only take one metric \"\n \"parameter, {0}\".format(metric))\n\n # Deal with 1- or 2-dimensional metrics.\n metric = np.atleast_1d(metric)\n if len(metric) == self.ndim:\n pars = np.diag(metric)[inds]\n\n # Update the axis_aligned flag if it wasn't specified.\n if self.axis_aligned is None:\n self.axis_aligned = True\n elif len(metric) == self.ndim * (self.ndim + 1) // 2:\n if self.axis_aligned:\n raise ValueError(\"Axis-aligned kernels can only \"\n \"take a diagonal metric\")\n pars = np.array(metric)\n else:\n raise ValueError(\"Dimension mismatch\")\n return pars\n\n @property\n def vector(self):\n if self.ndim == 1:\n return np.log(self.pars)\n if self.isotropic:\n return np.log(np.append(self.pars[:self.nextra],\n self.matrix[0, 0]))\n if self.axis_aligned:\n p = self.matrix[np.diag_indices_from(self.matrix)]\n return np.log(np.append(self.pars[:self.nextra], p))\n return np.log(self.pars)\n\n @vector.setter\n def vector(self, v):\n if self.ndim == 1:\n self.pars = np.exp(v)\n return\n p = self._coerce_metric(np.exp(v[self.nextra:]))\n self.pars = np.append(np.exp(v[:self.nextra]), p)\n\n def _build_matrix(self, x, dtype=float):\n m = np.zeros((self.ndim, self.ndim), dtype=dtype)\n m[np.tril_indices_from(m)] = x\n m += m.T\n m[np.diag_indices_from(m)] *= 0.5\n return m\n\n def set_pars(self, pars):\n if self.ndim > 1:\n self.matrix = self._build_matrix(pars[self.nextra:])\n self._factor = cho_factor(self.matrix)\n else:\n self.ivar = 1.0 / pars[self.nextra]\n\n def __call__(self, x1, x2):\n if self.ndim > 1:\n dx = x1[:, None] - x2[None, :]\n dxf = dx.reshape((-1, self.ndim)).T\n r = np.sum(dxf * cho_solve(self._factor, dxf), axis=0)\n r = r.reshape(dx.shape[:-1])\n else:\n r = np.sum((x1[:, None] - x2[None, :]), axis=-1) ** 2 * self.ivar\n return self.get_value(r)\n\n def grad(self, x1, x2):\n if self.ndim > 1:\n dx = x1[:, None] - x2[None, :]\n dxf = dx.reshape((-1, self.ndim)).T\n alpha = cho_solve(self._factor, dxf)\n\n # Compute the radial gradient.\n g = np.empty(np.append(len(self.gm)+self.nextra, dx.shape[:-1]))\n for i, gm in enumerate(self.gm):\n g[self.nextra+i] = \\\n np.sum(dxf*cho_solve(self._factor, np.dot(gm, alpha)),\n axis=0).reshape(dx.shape[:-1])\n\n # Compute the function gradient.\n r = np.sum(dxf * alpha, axis=0)\n r = r.reshape(dx.shape[:-1])\n if self.nextra:\n kg, g[:self.nextra] = self.get_grad(r)\n else:\n kg = self.get_grad(r)\n\n # Update the full gradient to include the kernel.\n s1 = [None] + [slice(None)] * len(r.shape)\n s2 = [slice(None)] + [None] * len(r.shape)\n g[self.nextra:] *= -kg[s1] * np.exp(self.vector[self.nextra:])[s2]\n\n else:\n r = np.sum((x1[:, None] - x2[None, :]), axis=-1) ** 2 * self.ivar\n\n # Compute the radial gradient.\n g = np.empty(np.append(len(self), r.shape))\n if self.nextra:\n kg, g[:self.nextra] = self.get_grad(r)\n else:\n kg = self.get_grad(r)\n g[self.nextra] = -kg * r\n\n return g\n\n def get_value(self, r):\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n def get_grad(self, r):\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n\nclass ExpKernel(RadialKernel):\n r\"\"\"\n The exponential kernel is a :class:`RadialKernel` where the value at a\n given radius :math:`r^2` is given by:\n\n .. math::\n\n k({r_{ij}}) = \\exp \\left ( -|r| \\right )\n\n :param metric:\n The custom metric specified as described in the :class:`RadialKernel`\n description.\n\n \"\"\"\n kernel_type = 2\n\n def get_value(self, dx):\n return np.exp(-np.sqrt(dx))\n\n def get_grad(self, dx):\n sx = np.sqrt(dx)\n m = sx > 0\n g = np.zeros_like(sx)\n g[m] = -0.5 * np.exp(-sx[m]) / sx[m]\n return g\n\n\nclass ExpSquaredKernel(RadialKernel):\n r\"\"\"\n The exponential-squared kernel is a :class:`RadialKernel` where the value\n at a given radius :math:`r^2` is given by:\n\n .. math::\n\n k(r^2) = \\exp \\left ( -\\frac{r^2}{2} \\right )\n\n :param metric:\n The custom metric specified as described in the :class:`RadialKernel`\n description.\n\n \"\"\"\n kernel_type = 3\n\n def get_value(self, dx):\n return np.exp(-0.5 * dx)\n\n def get_grad(self, dx):\n return -0.5 * np.exp(-0.5 * dx)\n\n\nclass RBFKernel(ExpSquaredKernel):\n r\"\"\"\n An alias for :class:`ExpSquaredKernel`.\n\n \"\"\"\n\n\nclass Matern32Kernel(RadialKernel):\n r\"\"\"\n The Matern-3/2 kernel is a :class:`RadialKernel` where the value at a\n given radius :math:`r^2` is given by:\n\n .. math::\n\n k(r^2) = \\left( 1+\\sqrt{3\\,r^2} \\right)\\,\n \\exp \\left (-\\sqrt{3\\,r^2} \\right )\n\n :param metric:\n The custom metric specified as described in the :class:`RadialKernel`\n description.\n\n \"\"\"\n kernel_type = 6\n\n def get_value(self, dx):\n r = np.sqrt(3.0 * dx)\n return (1.0 + r) * np.exp(-r)\n\n def get_grad(self, dx):\n r = np.sqrt(3.0 * dx)\n return -3 * 0.5 * np.exp(-r)\n\n\nclass Matern52Kernel(RadialKernel):\n r\"\"\"\n The Matern-5/2 kernel is a :class:`RadialKernel` where the value at a\n given radius :math:`r^2` is given by:\n\n .. math::\n\n k(r^2) = \\left( 1+\\sqrt{5\\,r^2} + \\frac{5\\,r^2}{3} \\right)\\,\n \\exp \\left (-\\sqrt{5\\,r^2} \\right )\n\n :param metric:\n The custom metric specified as described in the :class:`RadialKernel`\n description.\n\n \"\"\"\n kernel_type = 7\n\n def get_value(self, dx):\n r = np.sqrt(5.0 * dx)\n return (1.0 + r + r*r / 3.0) * np.exp(-r)\n\n def get_grad(self, dx):\n r = np.sqrt(5.0 * dx)\n return -5 * (1.0 + r) * np.exp(-r) / 6.0\n\n\nclass RationalQuadraticKernel(RadialKernel):\n r\"\"\"\n The Matern-5/2 kernel is a :class:`RadialKernel` where the value at a\n given radius :math:`r^2` is given by:\n\n .. math::\n\n k(r^2) = \\left( 1+ \\frac{r^2}{2\\,\\alpha} \\right )^{-\\alpha}\n\n :param alpha:\n The shape parameter :math:`\\alpha`.\n\n :param metric:\n The custom metric specified as described in the :class:`RadialKernel`\n description.\n\n \"\"\"\n kernel_type = 9\n\n def __init__(self, alpha, metric, ndim=1, **kwargs):\n super(RationalQuadraticKernel, self).__init__(metric, extra=[alpha],\n ndim=ndim, **kwargs)\n\n def get_value(self, dx):\n a = self.pars[0]\n return (1.0 + 0.5 * dx / a) ** (-a)\n\n def get_grad(self, dx):\n a = self.pars[0]\n t1 = 1 + 0.5 * dx / a\n t2 = 2 * a + dx\n return -0.5 * t1**(-a-1), t1 ** (-a) * (dx - t2 * np.log(t1)) / t2 * a\n\n\nclass CosineKernel(Kernel):\n r\"\"\"\n The cosine kernel is given by:\n\n .. math::\n\n k(\\mathbf{x}_i,\\,\\mathbf{x}_j) =\n \\cos\\left(\\frac{2\\,\\pi}{P}\\,\\left|x_i-x_j\\right| \\right)\n\n where :math:`P` is the period.\n\n :param period:\n The period :math:`P` of the oscillation (in the same units as\n :math:`\\mathbf{x}`).\n\n **Note:**\n A shortcoming of this kernel is that it currently only accepts a single\n period so it's not very applicable to problems with input dimension larger\n than one.\n\n \"\"\"\n kernel_type = 4\n\n def __init__(self, period, ndim=1):\n super(CosineKernel, self).__init__(period, ndim=ndim)\n\n def set_pars(self, pars, twopi=2*np.pi):\n self._omega = twopi / float(pars)\n\n def __call__(self, x1, x2):\n return np.cos(self._omega * np.sqrt(np.sum((x1[:, None]\n - x2[None, :]) ** 2,\n axis=-1)))\n\n def grad(self, x1, x2, itwopi=1.0/(2*np.pi)):\n x = np.sqrt(np.sum((x1[:, None] - x2[None, :]) ** 2, axis=-1))\n g = np.empty(np.append(1, x.shape))\n s = [0] + [slice(None)] * len(x.shape)\n g[s] = x * np.sin(self._omega * x) * self._omega\n return g\n\n\nclass ExpSine2Kernel(Kernel):\n r\"\"\"\n The exp-sine-squared kernel is used to model stellar rotation and *might*\n be applicable in some other contexts. It is given by the equation:\n\n .. math::\n\n k(\\mathbf{x}_i,\\,\\mathbf{x}_j) =\n \\sin \\left( -\\Gamma\\,\\sin^2\\left[\n \\frac{\\pi}{P}\\,\\left|x_i-x_j\\right|\n \\right] \\right)\n\n where :math:`\\Gamma` is the \"scale\" of the correlation and :math:`P` is\n the period of the oscillation measured in the same units as\n :math:`\\mathbf{x}`.\n\n :param gamma:\n The scale :math:`\\Gamma` of the correlations.\n\n :param period:\n The period :math:`P` of the oscillation (in the same units as\n :math:`\\mathbf{x}`).\n\n **Note:**\n A shortcoming of this kernel is that it currently only accepts a single\n period and scale so it's not very applicable to problems with input\n dimension larger than one.\n\n \"\"\"\n kernel_type = 5\n\n def __init__(self, gamma, period, ndim=1):\n super(ExpSine2Kernel, self).__init__(gamma, period, ndim=ndim)\n\n def set_pars(self, pars):\n self._omega = np.pi / pars[1]\n\n def __call__(self, x1, x2):\n d = x1[:, None] - x2[None, :]\n s = np.sin(self._omega * np.sqrt(np.sum(d ** 2, axis=-1)))\n return np.exp(-self.pars[0] * s**2)\n\n def grad(self, x1, x2):\n # Pre-compute some factors.\n d = x1[:, None] - x2[None, :]\n x = np.sqrt(np.sum(d ** 2, axis=-1))\n sx = np.sin(self._omega * x)\n cx = np.cos(self._omega * x)\n A2 = sx*sx\n a = self.pars[0]\n f = np.exp(-a * A2)\n\n # Build the output array.\n g = np.empty(np.append(2, x.shape))\n s = [0] + [slice(None)] * len(x.shape)\n\n # Compute the scale derivative.\n g[s] = -f * A2 * self.pars[0]\n\n # Compute the period derivative.\n s[0] = 1\n g[s] = 2 * f * a * sx * cx * x * self._omega\n\n return g\n","sub_path":"george/kernels.py","file_name":"kernels.py","file_ext":"py","file_size_in_byte":21567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"478452865","text":"\"\"\"\nИзвестно, что на доске 8×8 можно расставить 8 ферзей так,\nчтобы они не били друг друга.\nВам дана расстановка 8 ферзей на доске, определите,\nесть ли среди них пара бьющих друг друга.\nПрограмма получает на вход восемь пар чисел,\nкаждое число от 1 до 8 — координаты 8 ферзей.\nЕсли ферзи не бьют друг друга, выведите слово NO, иначе выведите YES.\n\"\"\"\n\nn = 8\nx = []\ny = []\n\nfor i in range(n):\n x1, y1 = [int(s) for s in input().split()]\n x.append(x1)\n y.append(y1)\n\ncross = False\nfor i in range(n):\n for j in range(i + 1, n):\n if x[i] == x[j] or y[i] == y[j] or abs(x[i] - x[j]) == abs(y[i] - y[j]):\n cross = True\n\nif cross == True:\n print(\"YES\")\nelse:\n print(\"NO\")\n\n# variant_1:\nx = []\ny = []\nfor i in range(8):\n p = input().split()\n x.append(int(p[0]))\n y.append(int(p[1]))\nf = 0\nif len(set(x)) < 8 or len(set(y)) < 8:\n f = 1\nelse:\n import itertools\n for i, j in itertools.combinations(range(8), 2):\n if (abs(x[i]-x[j])==abs(y[i]-y[j])):\n f = 1\n break\nprint((\"NO\",\"YES\")[f])\n","sub_path":"Pythontutor/lists/queens.py","file_name":"queens.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60652849","text":"import sys\nsys.path.append(r\"..\\.\")\nimport numpy as np\nimport meshtools as mt\nimport matplotlib.pyplot as plt\n\n\n## Definition des Netzes, Klausur WW1819 ##\n\n# \nNL=2\nRa=25e-2\nHH=10e-2\nDx=3e-3\nV0=1.\n\n# Abstand platte 1 innen bis Platte NL innen\nBreite=(2*NL-3)*Dx+2*1e-5\n#Frequenz\nf0=10e3\n#Fuellstandshoehe\nhh=HH/2.\n\n# Dielektrizitaetszahlen\nepsr=11\neps0= 8.854187817e-12\n#Leitfaehigkeiten\nsigma=1e-4\nsigma0=1e-12\n\n# kappa, Abkuerzung \n#kappa=sigma+2.*np.pi*f0*eps0*epsr*1J\n#kappa0=2.*np.pi*f0*eps0*1J\n#kappa=eps0*epsr\n#kappa0=eps0\n\nlength=0.0053*np.sqrt(NL)\n\n# aeusserer Kreis\np,v=mt.CircleSegments([0,0],Ra,edge_length=5*length)\n\n# innere Rechtecke\nRU=[]\nRO=[]\nHolPoi=[]\nx0=-NL*Dx+Dx/2.\nfor k in range(NL):\n x=x0+k*2*Dx\n RU+=[[x,-HH/2.]]\n RO+=[[x+Dx,HH/2.]]\n # Rechteck mit eckigen Kanten\n p0,v0=mt.RectangleSegments(RU[-1],RO[-1],edge_lengthx=length/55,edge_lengthy=length/15)\n # Rechteck mit runden Kanten \n #p0,v0=mt.ORecSegments(RU[-1],RO[-1],Dx/8,edge_lengthx=length/50,edge_lengthy=length/12,num_pc=15)\n p,v=mt.AddCurves(p,v,p0,v0)\n HolPoi+=[(x+Dx/2.,0)]\n\n\n#Verfeinerung\ndef myrefine(tri_points, area):\n center_tri = np.sum(np.array(tri_points), axis=0)/3.\n [x0,y0]=np.array([center_tri[0],center_tri[1]])\n rsp=x0**2+y0**2\n if np.abs(x0)<1.15*NL*Dx and np.abs(y0)<1.15*HH/2:\n max_area=length*length/200\n elif np.abs(x0)<1.25*NL*Dx and np.abs(y0)<1.25*HH/2:\n max_area=length*length/20\n else:\n max_area=10*length*length\n return bool(area>max_area)\n\n# Netz wird generiert\npoi,tri,bou,li,bou_elem=mt.DoTriMesh(p,v,edge_length=length,tri_refine=myrefine,holes=HolPoi)\n\nprint(\"Anzahl der Punkte: \",len(poi))\nprint(\"Anzahl der Dreieckselemente: \",len(tri))\nplt.show()\n\n\n# Randkurven\nPs=[[Ra,0],[Ra,0]]\ntyp=['Segments']+NL*['Nodes']\nfor k in range(NL):\n Ps+=[RU[k],RU[k]]\n\nbseg=mt.RetrieveSegments(poi,bou,li,Ps,typ)\n\n# Boundary-Plot\nfor k in range(0,NL+1):\n mt.PlotBoundary(poi,bseg[k],typ[k])\nplt.show()\n \n# Robin-Rand \nR0=bseg[0]\n#Dirichlet-Rand\nG0=[]\nfor k in range(1,NL+1):\n G0+=bseg[k] \n\n","sub_path":"WS1819/KlausurWS1819_netz.py","file_name":"KlausurWS1819_netz.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"250652688","text":"import tkinter\nimport sys\n\nfrom .customtkinter_frame import CTkFrame\nfrom .appearance_mode_tracker import AppearanceModeTracker\nfrom .customtkinter_color_manager import CTkColorManager\n\n\nclass CTkSlider(tkinter.Frame):\n \"\"\" tkinter custom slider, always horizontal \"\"\"\n\n def __init__(self,\n bg_color=None,\n border_color=None,\n fg_color=CTkColorManager.SLIDER_BG,\n button_color=CTkColorManager.MAIN,\n button_hover_color=CTkColorManager.MAIN_HOVER,\n from_=0,\n to=1,\n width=160,\n height=16,\n border_width=5.5,\n command=None,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n AppearanceModeTracker.add(self.change_appearance_mode)\n\n if bg_color is None:\n if isinstance(self.master, CTkFrame):\n self.bg_color = self.master.fg_color\n else:\n self.bg_color = self.master.cget(\"bg\")\n else:\n self.bg_color = bg_color\n\n self.border_color = border_color\n self.fg_color = fg_color\n self.button_color = self.bg_color if button_color is None else button_color\n self.button_hover_color = self.bg_color if button_hover_color is None else button_hover_color\n\n self.appearance_mode = AppearanceModeTracker.get_mode() # 0: \"Light\" 1: \"Dark\"\n\n self.width = width\n self.height = height\n self.border_width = border_width\n self.callback_function = command\n self.value = 0.5\n self.hover_state = False\n self.from_ = from_\n self.to = to\n self.output_value = self.from_ + (self.value * (self.to - self.from_))\n\n self.configure(width=self.width, height=self.height)\n if sys.platform == \"darwin\":\n self.configure(cursor=\"pointinghand\")\n\n self.canvas = tkinter.Canvas(master=self,\n highlightthicknes=0,\n width=self.width,\n height=self.height)\n self.canvas.place(x=0, y=0)\n\n self.canvas.bind(\"\", self.on_enter)\n self.canvas.bind(\"\", self.on_leave)\n self.canvas.bind(\"\", self.clicked)\n self.canvas.bind(\"\", self.clicked)\n\n self.border_parts = []\n self.fg_parts = []\n self.button_parts = []\n\n self.draw()\n\n def draw(self):\n self.canvas.delete(\"all\")\n self.border_parts = []\n self.fg_parts = []\n self.button_parts = []\n\n # frame_border\n self.border_parts.append(self.canvas.create_oval(0, 0,\n self.height, self.height))\n self.border_parts.append(self.canvas.create_rectangle(self.height/2, 0,\n self.width-(self.height/2), self.height))\n self.border_parts.append(self.canvas.create_oval(self.width-self.height, 0,\n self.width, self.height))\n\n # foreground\n self.fg_parts.append(self.canvas.create_oval(self.border_width, self.border_width,\n self.height-self.border_width, self.height-self.border_width))\n self.fg_parts.append(self.canvas.create_rectangle(self.height/2, self.border_width,\n self.width-(self.height/2), self.height-self.border_width))\n self.fg_parts.append(self.canvas.create_oval(self.width-self.height+self.border_width, self.border_width,\n self.width-self.border_width, self.height-self.border_width))\n\n # button\n self.button_parts.append(self.canvas.create_oval(self.value*self.width - self.height/2, 0,\n self.value*self.width + self.height/2, self.height))\n\n self.canvas.configure(bg=CTkColorManager.single_color(self.bg_color, self.appearance_mode))\n\n for part in self.border_parts:\n self.canvas.itemconfig(part, fill=CTkColorManager.single_color(self.border_color, self.appearance_mode), width=0)\n\n for part in self.fg_parts:\n self.canvas.itemconfig(part, fill=CTkColorManager.single_color(self.fg_color, self.appearance_mode), width=0)\n\n for part in self.button_parts:\n self.canvas.itemconfig(part, fill=CTkColorManager.single_color(self.button_color, self.appearance_mode), width=0)\n\n def clicked(self, event=0):\n self.value = event.x / self.width\n\n if self.value > 1:\n self.value = 1\n if self.value < 0:\n self.value = 0\n\n self.output_value = self.from_ + (self.value * (self.to - self.from_))\n\n self.update()\n\n if self.callback_function is not None:\n self.callback_function(self.output_value)\n\n def update(self):\n for part in self.button_parts:\n self.canvas.delete(part)\n\n self.button_parts.append(self.canvas.create_oval(self.value * (self.width-self.height), 0,\n self.value * (self.width-self.height) + self.height, self.height))\n\n for part in self.button_parts:\n if self.hover_state is True:\n self.canvas.itemconfig(part, fill=CTkColorManager.single_color(self.button_hover_color, self.appearance_mode), width=0)\n else:\n self.canvas.itemconfig(part, fill=CTkColorManager.single_color(self.button_color, self.appearance_mode), width=0)\n\n def on_enter(self, event=0):\n self.hover_state = True\n for part in self.button_parts:\n self.canvas.itemconfig(part, fill=CTkColorManager.single_color(self.button_hover_color, self.appearance_mode), width=0)\n\n def on_leave(self, event=0):\n self.hover_state = False\n for part in self.button_parts:\n self.canvas.itemconfig(part, fill=CTkColorManager.single_color(self.button_color, self.appearance_mode), width=0)\n\n def get(self):\n return self.output_value\n\n def set(self, output_value):\n self.output_value = output_value\n self.value = (self.output_value - self.from_) / (self.to - self.from_)\n self.update()\n\n if self.callback_function is not None:\n self.callback_function(self.output_value)\n\n def change_appearance_mode(self, mode_string):\n if mode_string.lower() == \"dark\":\n self.appearance_mode = 1\n elif mode_string.lower() == \"light\":\n self.appearance_mode = 0\n\n if isinstance(self.master, CTkFrame):\n self.bg_color = self.master.fg_color\n else:\n self.bg_color = self.master.cget(\"bg\")\n\n self.draw()\n\n","sub_path":"customtkinter/customtkinter_slider.py","file_name":"customtkinter_slider.py","file_ext":"py","file_size_in_byte":6928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510482238","text":"from telegram.ext import Updater,CommandHandler\nimport logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\nupdater=Updater(token=\"563988282:AAFWbTft6amNduY6HJ4W7XFMKRTYZJetBeQ\",use_context=True)\ndispatcher = updater.dispatcher\ndef start(update,context):\n context.bot.send_message(chat_id=update.effective_chat.id,text=\"help\")\nstart_handler=CommandHandler(\"help\",start)\ndispatcher.add_handler(start_handler)\nupdater.start_polling()","sub_path":"Pythonbot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"414518850","text":"''' Helper function to init API client'''\nimport os\nfrom collections import namedtuple\n\nfrom idf_component_tools.api_client import APIClient\nfrom idf_component_tools.errors import FatalError\nfrom idf_component_tools.sources.web_service import default_component_service_url\n\nfrom .config import ConfigManager\n\ntry:\n from typing import Optional\nexcept ImportError:\n pass\n\nServiceDetails = namedtuple('ServiceDetails', ['client', 'namespace'])\n\n\ndef service_details(namespace=None, service_profile=None): # type: (Optional[str], Optional[str]) -> ServiceDetails\n config = ConfigManager().load()\n profile_name = service_profile or 'default'\n profile = config.profiles.get(profile_name, {})\n\n print(profile)\n\n service_url = profile.get('url')\n if not service_url or service_url == 'default':\n service_url = default_component_service_url()\n\n print(service_url)\n print(type(service_url))\n\n # Priorities: idf.py option > IDF_COMPONENT_NAMESPACE env variable > profile value\n namespace = namespace or profile.get('default_namespace')\n if not namespace:\n raise FatalError('Namespace is required to upload component')\n\n # Priorities: IDF_COMPONENT_API_TOKEN env variable > profile value\n token = os.getenv('IDF_COMPONENT_API_TOKEN', profile.get('api_token'))\n if not token:\n raise FatalError('API token is required to upload component')\n\n client = APIClient(base_url=service_url, auth_token=token)\n\n return ServiceDetails(client, namespace)\n","sub_path":"upload_components/component-manager/idf_component_manager/service_details.py","file_name":"service_details.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"30096338","text":"\"\"\"\nBase settings to build other settings files upon.\n\"\"\"\nimport os\nfrom pathlib import Path\n\nimport environ\n\n\nSITE_NAME = \"A320 Expert\"\n\n\nROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent\n# app/\nAPPS_DIR = ROOT_DIR / \"app\"\nenv = environ.Env()\n\nREAD_DOT_ENV_FILE = env.bool(\"DJANGO_READ_DOT_ENV_FILE\", default=True)\n# define the file that contains the environment variables, if any\nENV_FILE = env.str(\"ENV_FILE\", default=\".env\")\nif READ_DOT_ENV_FILE:\n # OS environment variables take precedence over variables from .env\n path = str(ROOT_DIR / ENV_FILE)\n if os.path.exists(path):\n print(f\"Load environment from {ENV_FILE}\")\n env.read_env(str(ROOT_DIR / ENV_FILE))\n\n# GENERAL\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool(\"DJANGO_DEBUG\", False)\n# Local time zone. Choices are\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# though not all of them may be available with every OS.\n# In Windows, this must be set to your system time zone.\nTIME_ZONE = \"UTC\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = \"en-us\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths\nLOCALE_PATHS = [str(ROOT_DIR / \"locale\")]\n\n# DATABASES\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\"default\": env.db(\"DATABASE_URL\")}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# URLS\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf\nROOT_URLCONF = \"config.urls\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# APPS\n# ------------------------------------------------------------------------------\nDJANGO_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n # \"django.contrib.humanize\", # Handy template tags\n \"django.contrib.admin\",\n \"django.forms\",\n]\nTHIRD_PARTY_APPS = [\n \"crispy_forms\",\n \"allauth\",\n \"allauth.account\",\n \"allauth.socialaccount\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"corsheaders\",\n \"captcha\",\n \"ckeditor\",\n \"ckeditor_uploader\",\n]\n\nLOCAL_APPS = [\n \"app.main.apps.MainConfig\",\n \"app.users.apps.UsersConfig\",\n \"app.quiz.apps.QuizConfig\"\n # Your stuff: custom apps go here\n]\n# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\n# MIGRATIONS\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules\nMIGRATION_MODULES = {\"sites\": \"app.contrib.sites.migrations\"}\n\n# AUTHENTICATION\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n]\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model\nAUTH_USER_MODEL = \"users.User\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url\nLOGIN_REDIRECT_URL = \"quiz:quiz_progress\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#login-url\nLOGIN_URL = \"account_login\"\n\n# PASSWORDS\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers\nPASSWORD_HASHERS = [\n # https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django\n \"django.contrib.auth.hashers.Argon2PasswordHasher\",\n \"django.contrib.auth.hashers.PBKDF2PasswordHasher\",\n \"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher\",\n \"django.contrib.auth.hashers.BCryptSHA256PasswordHasher\",\n]\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n]\n\n# MIDDLEWARE\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#middleware\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.common.BrokenLinkEmailsMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n]\n\n# STATIC\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = str(ROOT_DIR / \"staticfiles\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nSTATIC_URL = \"/static/\"\n# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = [str(APPS_DIR / \"static\")]\n# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n]\n\n# MEDIA\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = str(ROOT_DIR / \"media\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = \"/media/\"\n\n# TEMPLATES\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n # https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n \"DIRS\": [str(APPS_DIR / \"templates\")],\n \"OPTIONS\": {\n # https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n # https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"app.utils.context_processors.settings_context\",\n ],\n },\n }\n]\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer\nFORM_RENDERER = \"django.forms.renderers.TemplatesSetting\"\n\n# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs\nCRISPY_TEMPLATE_PACK = \"bootstrap4\"\n\n# FIXTURES\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs\nFIXTURE_DIRS = (str(APPS_DIR / \"fixtures\"),)\n\n# SECURITY\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly\nSESSION_COOKIE_HTTPONLY = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly\nCSRF_COOKIE_HTTPONLY = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter\nSECURE_BROWSER_XSS_FILTER = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options\nX_FRAME_OPTIONS = \"DENY\"\n\n# EMAIL\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend\nEMAIL_BACKEND = env(\n \"DJANGO_EMAIL_BACKEND\", default=\"django.core.mail.backends.smtp.EmailBackend\"\n)\n# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout\nEMAIL_TIMEOUT = 5\n\n# ADMIN\n# ------------------------------------------------------------------------------\n# Django Admin URL.\nADMIN_URL = \"admin/\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#admins\nADMINS = [(\"\"\"Thomas Tartiere\"\"\", \"thomas.tartiere@gmail.com\")]\n# https://docs.djangoproject.com/en/dev/ref/settings/#managers\nMANAGERS = ADMINS\n\n# LOGGING\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#logging\n# See https://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"format\": \"%(levelname)s %(asctime)s %(module)s \"\n \"%(process)d %(thread)d %(message)s\"\n }\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n }\n },\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n}\n\n\n# django-allauth\n# ------------------------------------------------------------------------------\nACCOUNT_ALLOW_REGISTRATION = env.bool(\"DJANGO_ACCOUNT_ALLOW_REGISTRATION\", True)\nACCOUNT_AUTHENTICATION_METHOD = \"email\"\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_USER_MODEL_USERNAME_FIELD = None\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\nACCOUNT_ADAPTER = \"app.users.adapters.AccountAdapter\"\nSOCIALACCOUNT_ADAPTER = \"app.users.adapters.SocialAccountAdapter\"\n\n# django-compressor\n# ------------------------------------------------------------------------------\n# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation\nINSTALLED_APPS += [\"compressor\"]\nSTATICFILES_FINDERS += [\"compressor.finders.CompressorFinder\"]\n\n# django-rest-framework\n# -------------------------------------------------------------------------------\n# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n ),\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n}\n\n# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup\nCORS_URLS_REGEX = r\"^/api/.*$\"\n\n# CKEditor - Rich text editor\n# ------------------------------------------------------------------------------\n\nCKEDITOR_BASEPATH = STATIC_URL + \"ckeditor/ckeditor/\"\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_IMAGE_BACKEND = \"pillow\"\nCKEDITOR_CONFIGS = {\n \"default\": {\n \"skin\": \"moono-lisa\",\n \"toolbar_Basic\": [[\"Source\", \"-\", \"Bold\", \"Italic\"]],\n \"toolbar_Full\": [\n [\n \"Styles\",\n \"Format\",\n \"Bold\",\n \"Italic\",\n \"Underline\",\n \"Strike\",\n \"SpellChecker\",\n \"Undo\",\n \"Redo\",\n \"uploadimage\",\n \"uploadwidget\",\n ],\n [\"Link\", \"Unlink\", \"Anchor\"],\n [\"Image\", \"Table\", \"HorizontalRule\"],\n [\"TextColor\", \"BGColor\"],\n [\"Smiley\", \"SpecialChar\"],\n [\"Source\"],\n ],\n \"toolbar\": \"Full\",\n \"height\": 291,\n \"width\": 835,\n \"filebrowserWindowWidth\": 940,\n \"filebrowserWindowHeight\": 725,\n \"disableNativeSpellChecker\": False,\n \"extraPlugins\": \",\".join(\n [\n \"uploadimage\", # the upload image feature\n # your extra plugins here\n \"div\",\n \"autolink\",\n \"autoembed\",\n \"embedsemantic\",\n \"autogrow\",\n # 'devtools',\n \"widget\",\n \"lineutils\",\n \"clipboard\",\n \"dialog\",\n \"dialogui\",\n \"elementspath\",\n ]\n ),\n }\n}\n","sub_path":"config/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":13656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"629105185","text":"import boto3\nimport json\nimport subprocess as sp\n\ntest_datum = {\n \"fixed acidity\": 7.8,\n \"volatile acidity\": 0.88,\n \"pH\": 3.2,\n \"citric acid\": 0.0,\n \"residual sugar\": 2.6,\n \"chlorides\": 0.098,\n \"free sulfur dioxide\": 25.0,\n \"total sulfur dioxide\": 67.0,\n \"density\": 0.9968,\n \"sulphates\": 0.68,\n \"alcohol\": 9.8 * 2\n}\n\n\npulumi_config = json.loads(sp.check_output('pulumi config --json', shell=True).decode().strip())\nregion = pulumi_config['aws:region']['value']\npulumi_stack_output = json.loads(sp.check_output('pulumi stack output --json', shell=True).decode().strip())\nlambda_client = boto3.client('lambda', region_name=region)\nsagemaker_wrapper_function_name = pulumi_stack_output['lambda_function_name']\n\nresponse = lambda_client.invoke(\n FunctionName=sagemaker_wrapper_function_name,\n Payload=json.dumps(test_datum).encode())\n\n\nout = json.load(response['Payload'])\n\nprint(json.dumps({'lambda': sagemaker_wrapper_function_name,\n 'request': test_datum,\n 'response': out}, indent=True, sort_keys=True))\n","sub_path":"examples/winequality-component-resource/invoke_lambda.py","file_name":"invoke_lambda.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364231404","text":"import logging\n\nlogger = logging.getLogger('gpn')\n\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n_FORMAT = \"[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s\"\n#'%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nformatter = logging.Formatter(_FORMAT)\nch.setFormatter(formatter)\nlogger.setLevel(logging.DEBUG)\n\nlogger.addHandler(ch)\n","sub_path":"analyser/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"11730307","text":"import discord\nfrom discord.ext import commands\nimport random\n\ndescription = '''A helpful(?) bot made by Justin and Julian.'''\nbot = commands.Bot(command_prefix='?', description=description)\n\n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n\n@bot.command()\nasync def hello():\n \"\"\"Says hello to the user.\"\"\"\n await bot.say('Hello! I am a bot made by Justin and Julian. Type \\'?help\\' for a command list.')\n\n@bot.command()\nasync def add(left : int, right : int):\n \"\"\"Adds two numbers together.\"\"\"\n await bot.say(left + right)\n\n@bot.command()\nasync def roll(dice : str):\n \"\"\"Rolls a dice in NdN format.\"\"\"\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await bot.say('Format has to be in NdN!')\n return\n\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await bot.say(result)\n\n@bot.command()\nasync def odds(num : int, thing : str):\n\t\"\"\"What are the odds?\"\"\"\n\todd1 = random.randint(1,num)\n\todd2 = random.randint(1,num)\n\n\tif odd1 == odd2:\n\t\tawait bot.say('You must ' + thing)\n\telse:\n\t\tawait bot.say('You got lucky')\n\tawait bot.say('Roll 1: ' + str(odd1) + '\\t\\tRoll 2: ' + str(odd2))\n\n@bot.command(description='For when you wanna settle the score some other way')\nasync def choose(*choices : str):\n \"\"\"Chooses between multiple choices.\"\"\"\n await bot.say(random.choice(choices))\n\n@bot.command()\nasync def repeat(times : int, content='repeating...'):\n \"\"\"Repeats a message multiple times.\"\"\"\n for i in range(times):\n await bot.say(content)\n\n@bot.command()\nasync def joined(member : discord.Member):\n \"\"\"Says when a member joined.\"\"\"\n await bot.say('{0.name} joined in {0.joined_at}'.format(member))\n\n@bot.command()\nasync def tags(command : str, query = 'query'):\n if command == 'list':\n \"\"\"Lists all the commands\"\"\"\n msg = '```Tags:'\n for tag in list(sorted(bot.commands.keys())):\n msg += '\\n\\t' + tag\n msg += '```'\n await bot.say(msg)\n\n else:\n \"\"\"Returns commands that fit the query\"\"\"\n msg = '```Tags found matching \\'' + query + '\\':'\n for tag in list(sorted(bot.commands.keys())):\n if tag.find(query) > -1:\n msg += '\\n\\t' + tag\n await bot.say(msg + '```')\n\n@bot.command()\nasync def joke():\n\t\"\"\"It's a funny joke\"\"\"\n\tawait bot.say('Why was Six afraid of Seven')\n\tawait bot.say('Because Seven ate Ass!')\n\n@bot.command()\nasync def randomow():\n \"\"\"Picks a random Overwatch character\"\"\"\n charlist = [\"Ana\", \"Bastion\", \"D.Va\", \"Genji\", \"Hanzo\", \"Junkrat\", \"Lucio\", \"McCree\", \"Mei\", \"Mercy\", \"Pharah\", \"Reaper\", \"Reinhardt\", \"Roadhoag\", \"Soldier: 76\", \"Symmetra\", \"Torbjorn\", \"Tracer\", \"Widowmaker\", \"Winston\", \"Zarya\", \"Zenyatta\"]\n await bot.say(random.choice(charlist))\n\n@bot.command()\nasync def source():\n \"\"\"Replies with a link to the GitHub Page\"\"\"\n await bot.say('https://github.com/justinpchang/DontBanDog')\n\n@bot.command()\nasync def wiki(query : str):\n \"\"\"Returns the first Wikipedia result\"\"\"\n await bot.say('https://en.wikipedia.org/w/index.php?search=' + query.replace(\" \", \"+\"))\n\n@bot.command()\nasync def google(query : str):\n \"\"\"Returns the Google search page\"\"\"\n await bot.say('https://www.google.com/search?num=100&site=webhp&source=hp&q=' + query.replace(\" \", \"+\"))\n\nf = open(\"token.txt\", \"r\")\nbot.run(f.read())\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"145544140","text":"\nimport sys\nsys.path.append('..')\n\nimport config\nfrom db_wrapper import with_connector\nfrom db_wrapper import with_transcation\nfrom db_wrapper import insert \nfrom db_wrapper import select\nfrom db_wrapper import delete\nfrom db_wrapper import update\nfrom db_wrapper import LOCK\nimport const_var\nimport exception.SystemException as SystemException\n\n@with_connector\ndef update_payment_order_url(pay_url, order_id):\n update('UPDATE Payment_Orders SET payUrl = ? WHERE orderId = ?',\n pay_url, order_id)\n\n@with_connector\ndef insert_payment_order(pay_order_id, uid, money, create_time, expire_time):\n insert('INSERT INTO Payment_Orders(orderId, uid, money, createTime, \\\n expireTime) VALUES (?, ?, ?, ?, ?)', \n pay_order_id, uid, money, create_time, expire_time)\n\n@with_connector \ndef select_payment_orders(uid, page):\n return select('SELECT orderId, money, createTime, expireTime, status, \\\n payUrl FROM Payment_Orders WHERE uid = ? ORDER BY createTime DESC\\\n LIMIT ?, ?', uid, _getStart(page), config.PAGE_NUM)\n\n@with_connector\ndef select_expire_time(order_id):\n return select('SELECT UNIX_TIMESTAMP(expireTime) FROM Payment_Orders WHERE \\\n orderId = ?', order_id, one=True)['UNIX_TIMESTAMP(expireTime)']\n\n@with_transcation\ndef update_payment_orders(order_id, complete_time):\n order_info = select('SELECT status, money, uid FROM Payment_Orders WHERE \\\n orderId = ?', order_id, one=True, lock=LOCK.exclusive)\n status = order_info['status']\n money = order_info['money']\n uid = order_info['uid']\n if status == const_var.PAYMENT_ORDERS_PAY:\n raise SystemException.PaymentOrderHavaPay\n if status == const_var.PAYMENT_ORDERS_TIMEOUT:\n raise SystemException.PaymentOrderTimeout\n if status != const_var.PAYMENT_ORDERS_UNPAY:\n raise SystemException.PaymentOrderUnknowStatus\n balance = select('SELECT balance FROM User WHERE uid = ?',\n uid, one=True, lock=LOCK.exclusive)['balance']\n balance = balance + money\n update('UPDATE User SET balance = ? WHERE uid = ?', balance, uid)\n update('UPDATE Payment_Orders SET status = ?, completeTime = ? WHERE\\\n orderId = ?', const_var.PAYMENT_ORDERS_PAY, complete_time, order_id)\n\n@with_connector\ndef delete_payment_order(uid, order_id):\n delete('DELETE FROM Payment_Orders WHERE orderId = ? and uid = ?', \n order_id, uid)\n\ndef _getStart(page):\n return (page-1)*config.PAGE_NUM \n","sub_path":"main/db/payment_orders_tb.py","file_name":"payment_orders_tb.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"133305375","text":"#!/usr/bin/env python2.7\nimport sys\nimport math\n\nNbs= int(sys.argv[1], 16)\n\nA = 4\nW = 255\nB = 4\nFclkm = 6.25\n\nFloop = Nbs * A * B * Fclkm / W\n\nOutput = \"Nbs = \" + hex(Nbs) + \",\" + \"Floop = \" + str(Floop)\nprint(Output)\n","sub_path":"avalon/a3210/psen.py","file_name":"psen.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"486595713","text":"import os\nimport dnaweaver as dw\nfrom dnaweaver.biotools import gc_content\nimport matplotlib.pyplot as plt\n\noligo_com = dw.CommercialDnaOffer(\n name=\"Oligo.com\",\n sequence_constraints=[dw.SequenceLengthConstraint(max_length=200)],\n pricing=dw.PerBasepairPricing(0.10),\n lead_time=7,\n)\n\ndeluxe_dna_com = dw.CommercialDnaOffer(\n name=\"DeluxeDNA.com\",\n sequence_constraints=[dw.SequenceLengthConstraint(max_length=10000)],\n pricing=dw.PerBasepairPricing(0.25),\n lead_time=7,\n)\n\ncheap_dna_com = dw.CommercialDnaOffer(\n name=\"CheapDNA.com\",\n sequence_constraints=[\n dw.SequenceLengthConstraint(max_length=4000),\n dw.NoPatternConstraint(enzyme=\"AarI\"),\n dw.NoPatternConstraint(enzyme=\"BsaI\"),\n lambda seq: (0.4 < gc_content(seq) < 0.6),\n ],\n pricing=dw.PerBasepairPricing(0.10),\n lead_time=15,\n)\n\n# OLIGOS TO BLOCKS ASSEMBLY\n\noligo_assembly_station = dw.DnaAssemblyStation(\n name=\"Oligo Assembly Station\",\n assembly_method=dw.OligoAssemblyMethod(\n overhang_selector=dw.TmSegmentSelector(\n min_size=15, max_size=25, min_tm=50, max_tm=70\n ),\n min_segment_length=40,\n max_segment_length=200,\n sequence_constraints=[dw.SequenceLengthConstraint(max_length=1500)],\n duration=8,\n cost=2,\n ),\n supplier=oligo_com,\n coarse_grain=20,\n fine_grain=False,\n a_star_factor=\"auto\",\n memoize=True,\n)\n\n# BLOCKS TO CHUNKS ASSEMBLY\n\nblocks_sources_comparator = dw.DnaSuppliersComparator(\n name=\"bs_comparator\",\n suppliers=[oligo_assembly_station, cheap_dna_com, deluxe_dna_com],\n memoize=True,\n)\n\ngibson_blocks_assembly_station = dw.DnaAssemblyStation(\n name=\"Gibson Blocks Assembly\",\n assembly_method=dw.GibsonAssemblyMethod(\n overhang_selector=dw.FixedSizeSegmentSelector(80),\n min_segment_length=1000,\n max_segment_length=4000,\n duration=8,\n cost=16,\n ),\n supplier=blocks_sources_comparator,\n coarse_grain=300,\n fine_grain=False,\n memoize=True,\n a_star_factor=\"auto\",\n)\n\ngoldengate_blocks_assembly_station = dw.DnaAssemblyStation(\n name=\"Golden Gate Blocks Assembly\",\n assembly_method=dw.GoldenGateAssemblyMethod(\n enzyme=\"BsmBI\",\n wildcard_basepair=\"A\",\n min_segment_length=1000,\n max_segment_length=4000,\n duration=5,\n cost=6,\n ),\n supplier=blocks_sources_comparator,\n coarse_grain=400,\n fine_grain=False,\n memoize=True,\n a_star_factor=\"auto\",\n)\nECOLI_DB_PATH = os.path.join(\"..\", \"..\", \"data\", \"ecoli_blast_db\", \"ecoli\")\necoli_genome = dw.PcrExtractionStation(\n \"E. coli Genome (PCR)\",\n primers_supplier=oligo_com,\n homology_selector=dw.TmSegmentSelector(\n min_size=18, max_size=22, min_tm=55, max_tm=65\n ),\n blast_database=ECOLI_DB_PATH,\n max_amplicon_length=10000,\n extra_time=3,\n extra_cost=1,\n)\n\n# CHUNKS TO MEGACHUNKS ASSEMBLY\n\nchunks_assembly_station = dw.DnaAssemblyStation(\n name=\"Chunks assembly (Gibson)\",\n assembly_method=dw.GibsonAssemblyMethod(\n overhang_selector=dw.FixedSizeSegmentSelector(300),\n min_segment_length=7000,\n max_segment_length=25000,\n duration=8,\n ),\n supplier=dw.DnaSuppliersComparator(\n [\n ecoli_genome,\n goldengate_blocks_assembly_station,\n gibson_blocks_assembly_station,\n deluxe_dna_com,\n ]\n ),\n coarse_grain=1000,\n fine_grain=None,\n memoize=True,\n a_star_factor=\"auto\",\n)\n\nwith open(\"50kb_sequence.txt\", \"r\") as f:\n sequence = f.read()\n\nfig, axes = plt.subplots(1, 4, figsize=(16, 3), sharey=True)\nchunks_assembly_station.prepare_network_on_sequence(sequence)\nfor ax, max_lead_time in zip(axes, [20, 25, 30, 35]):\n quote = chunks_assembly_station.get_quote(\n sequence, max_lead_time=max_lead_time - 1, with_assembly_plan=True\n )\n\n print(\"Computing plan for lead time under:\", max_lead_time)\n report = quote.to_assembly_plan_report(refine_fragments_locations=False)\n report.plot_assembly_blocks(\n ax=ax, parts_offset=0.1, legend=False, plot_top_assembly=False\n )\n ax.set_title(\n \"Best plan under %d days\\n\\n£%d, %d days\"\n % (max_lead_time, quote.price, quote.lead_time)\n )\n ax.set_ylim(top=1)\n\n\nfig.savefig(\"different_max_lead_times.pdf\")\nprint (\"Done! See different_max_lead_times.pdf\")","sub_path":"examples/manuscript_examples/different_deadlines/different_deadlines.py","file_name":"different_deadlines.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"201780159","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def rob(self, root):\n \"\"\"\n Smart thief realized houses form a binary tree.\n It will automatically contact the police if two directly-linked houses were broken into on the same night.\n Determine maximum value thief can rob without alerting police.\n :type root: TreeNode\n :rtype: int\n \"\"\"\n def search(node):\n if node is None: return (0, 0) # include / exclude node\n l = search(node.left)\n r = search(node.right)\n include_node = node.val + l[1] + r[1]\n exclude_node = max(l) + max(r)\n return (include_node, exclude_node)\n return max(search(root))\n","sub_path":"Dynamic_programming/337_House_robber_3.py","file_name":"337_House_robber_3.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"327192735","text":"#!/bin/env python\n# coding:utf-8\n\n# 文字種類を数えて見る\nimport os\nimport argparse\nimport logging\nlogger = logging.getLogger(\"logger\") #logger名loggerを取得\nlogger.setLevel(logging.DEBUG) #loggerとしてはDEBUGで\nimport sqlite3\nimport pandas as pd\n\n\ndef main():\n parser = argparse.ArgumentParser(description='calculate board score')\n parser.add_argument('--db',type=str, default=os.environ[\"DATA\"]+\"/board/board.db\")\n parser.add_argument('--logfile',type=str, default=os.path.basename(__file__).replace(\".py\",\"\")+\".log\")\n parser.add_argument('--outfile',type=str, default=\"retHist.txt\")\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.DEBUG,\n filename=args.logfile,\n format=\"%(asctime)s %(levelname)-7s %(message)s\")\n\n StockRet(args).run()\n\nclass StockRet:\n def __init__(self, args):\n self.args=args\n self.codes=[]\n\n con = sqlite3.connect(self.args.db)\n #sql=\"select code from stockMaster where code not like \\\"J%\\\" and code not like \\\"^%\\\" \"\n sql=\"select code from stockMaster where code like \\\"J%\\\" or code like \\\"^%\\\" \"\n data = pd.read_sql_query(sql, con, params=())\n for i in range(len(data)):\n code=data[i:i+1][\"code\"].values[0]\n self.codes.append(code)\n con.close()\n\n def run(self):\n with open(self.args.outfile, \"w\") as f:\n\n con = sqlite3.connect(self.args.db)\n for code in self.codes:\n sql = \"select date,adj_close from histDaily where code = ? order by date\"\n data = pd.read_sql_query(sql, con, params=(code,))\n\n prev_date=None\n prev_price=None\n for i in range(len(data)):\n d=data[i:i+1]\n date=d[\"date\"].values[0]\n price=d[\"adj_close\"].values[0]\n if prev_date is not None:\n #sql=\"insert into retDaily (date,code,prev_date,ret) values(?,?,?,?)\"\n #pd.read_sql_query(sql, con ,params=(date,code,prev_date,(price-prev_price)/prev_price,))\n ret=(price-prev_price)/prev_price\n f.write(date+\"\\t\"+code+\"\\t\"+prev_date+\"\\t\"+str(ret)+\"\\n\")\n prev_date=date\n prev_price=price\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"theme/utils/stock_ret.py","file_name":"stock_ret.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"482200307","text":"import time\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib as mp\nmp.use('Agg')\nimport argparse\nimport matplotlib.pyplot as plt\n\nstart = time.time()\n\nparser = argparse.ArgumentParser(\n description='Define parameters to learn optimizer.')\nparser.add_argument('--n_samplings', type=int, default=10)\nparser.add_argument('--n_dimension', type=int, default=10)\nparser.add_argument('--n_hidden', type=int, default=20)\nparser.add_argument('--n_layers', type=int, default=2)\nparser.add_argument('--optimization_step', type=int, default=50)\nparser.add_argument('--unroll_time', type=int, default=20)\nparser.add_argument('--filename', type=str, default=\"NOMORE_\")\nargs = parser.parse_args()\n\n# using LSTM cell once to avoid reuse bug\nwith tf.variable_scope(\"RNN\") as rnn_opt:\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(args.n_hidden, state_is_tuple=True)\n cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * args.n_layers, state_is_tuple=True)\n # lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(args.n_hidden)\n # cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * args.n_layers)\n ini_output, ini_state = cell(\n tf.random_normal([1, 1]), cell.zero_state(1, tf.float32))\n ini_softmax_w = tf.get_variable(\"softmax_w\", [args.n_hidden, 1], initializer=tf.random_normal_initializer(), trainable=True)\n ini_softmax_b = tf.get_variable(\"softmax_b\", [1], initializer=tf.constant_initializer(0.0), trainable=True)\n # ini_softmax_w = tf.get_variable(\"softmax_w\", [args.n_hidden, 1], initializer=tf.random_normal_initializer())\n # ini_softmax_b = tf.get_variable(\"softmax_b\", [1], initializer=tf.constant_initializer(0.0))\n\n\ndef main():\n #Build the Graph\n loss_list, loss, train_op = OPTIMIZER()\n test_cost_list = OPTIMIZER(train=False)\n\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n\n print(\"initialization time: %f\" %(time.time()-start))\n now = time.time()\n\n print(\"======= Training Start ======:\")\n cost_list = []\n for epoch in range(args.unroll_time):\n cost_s, cost, _ = sess.run([loss_list, loss, train_op])\n cost_list.append(cost)\n cost_single = sess.run(tf.unpack(cost_s))\n plt.plot(range(len(cost_single)), cost_single)\n print(\"At %d training step : loss %f\" % (epoch, cost))\n imagename = args.filename + \"train_single_loop\" + \".png\"\n plt.savefig(imagename)\n plt.cla()\n\n print(\"~~~~~~training time: %f==\" %(time.time()-now))\n now = time.time()\n \n plt.plot(range(len(cost_list)), cost_list)\n imagename = args.filename + \"train_cost\" + \".png\"\n plt.savefig(imagename)\n plt.cla()\n\n print(\"======= Testing Start ======:\")\n for sample in range(args.n_samplings):\n t_cost_list = sess.run(test_cost_list)\n test_cost_list = sess.run(tf.unpack(t_cost_list))\n plt.plot(range(len(test_cost_list)), test_cost_list)\n \n print(\"~~~~~~test time: %f\" %(time.time()-now))\n imagename = args.filename + \"test_single_loop\" + \".png\"\n plt.savefig(imagename)\n plt.cla()\n \n print(\"~~~~~~total time: {0}\".format(time.time()-start))\n\n\ndef Sample_generater():\n W = tf.truncated_normal([args.n_dimension, args.n_dimension], mean=0.0, stddev=1.0)\n THETA = tf.truncated_normal([args.n_dimension, 1], mean=0.0, stddev=1.0)\n y = tf.mul(W, THETA)\n theta = tf.truncated_normal([args.n_dimension, 1], mean=0.0, stddev=1.0)\n return W, y, theta\n\n\ndef LSTM_cell(grad, state):\n with tf.variable_scope(\"RNN\", reuse=True):\n cell_output, state = cell(grad, state)\n softmax_w = tf.get_variable(\"softmax_w\")\n softmax_b = tf.get_variable(\"softmax_b\")\n delta = tf.matmul(cell_output, softmax_w) + softmax_b\n return state, delta\n\n\ndef Optimizee(W, y, theta):\n error = tf.reduce_sum(tf.square(tf.matmul(W, theta) - y))\n grad = tf.gradients(error, theta)[0]\n return error, grad\n\n\ndef optimization(grad, state_list, theta):\n theta_temp =tf.unpack(theta)\n for i in range(args.n_dimension):\n state_i = state_list[i]\n grad_i = tf.slice(grad, begin=[i, 0], size=[1, 1])\n state_new, delta = LSTM_cell(grad_i, state_i)\n state_list[i] = state_new\n theta_temp[i] = theta_temp[i] + delta\n theta_new = tf.reshape(tf.pack(theta_temp),[args.n_dimension, 1])\n #theta_temp is a list theta_new is a tensor\n return state_list, theta_new\n\n\ndef OPTIMIZER(train = True):\n W, y, theta = Sample_generater()\n state = [cell.zero_state(1, tf.float32) for i in range(args.n_dimension)]\n error, grad = Optimizee(W, y, theta)\n l_fai = []\n for step in range(args.optimization_step):\n print(step)\n state, theta = optimization(grad, state, theta)\n error, grad = Optimizee(W, y, theta)\n l_fai.append(error)\n l_temp = tf.pack(l_fai)\n\n if train :\n # optimize the Optimizer\n loss = tf.reduce_sum(l_temp) / args.optimization_step\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n lr = 0.001\n optimizer_rnn = tf.train.AdamOptimizer(lr)\n train_op = optimizer_rnn.apply_gradients(zip(grads, tvars))\n return l_temp, loss, train_op\n else:\n return l_temp\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"function_style/ltol.py","file_name":"ltol.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"605406541","text":"\"\"\"\nAn emailing notification backend for django-notification. \n\n\"\"\"\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext_lazy\n\n# favour django-mailer but fall back to django.core.mail\nif 'mailer' in settings.INSTALLED_APPS:\n from mailer import send_mail\nelse:\n from django.core.mail import send_mail\n\n\nclass BaseBackend(object):\n mediums = ()\n \n def send(self):\n raise NotImplementedError()\n\n\nclass ModelBackend(BaseBackend):\n\n def send(self, user, notice_type, on_site, sender, context, **kwargs):\n \"\"\"\n Create a Notice model.\n \"\"\"\n from notification.models import Notice\n message = notice_type.render_template('notice.html', context)\n Notice.objects.create(recipient=user, message=message,\n notice_type=notice_type, on_site=on_site, sender=sender)\n\n\nclass EmailBackend(BaseBackend):\n mediums = (('1', ugettext_lazy('Email')),)\n medium_defaults = {'1': 2}\n \n def send(self, user, notice_type, context, **kwargs):\n \"\"\"\n Send the notification to a user as an e-mail.\n \"\"\"\n if not user.email or not notice_type.should_send(user, '1'):\n return\n \n # Strip newlines from subject\n subject = ''.join(render_to_string('notification/email_subject.txt', {\n 'message': notice_type.render_template('short.txt', context),\n }, context).splitlines())\n \n body = render_to_string('notification/email_body.txt', {\n 'message': notice_type.render_template('full.txt', context),\n }, context)\n \n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [user.email])\n","sub_path":"notification/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"141928720","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# NCTR, Nile Center for Technology Research\n# Copyright (C) 2011-2012 NCTR ().\n#\n##############################################################################\n\nimport time\nfrom lxml import etree\nfrom openerp.osv import fields, osv,orm\nfrom openerp.tools.translate import _\nclass account_report_asset_list(osv.osv_memory):\n _name = \"account.report.asset.operation\"\n _description = \"General Ledger Report\"\n\n _columns = {\n \n 'filter': fields.selection([('filter_no', 'No Filters'), ('filter_by_date', 'Filter By Date')], \"Filter by\", required=True),\n 'company_id':fields.many2one('res.company','Company'),\n 'date_from': fields.date( 'Start Date'),\n 'date_to': fields.date( 'End Date'),\n 'category_id': fields.many2many('account.asset.category', 'account_category_rel', 'asset_id', 'category_id', 'Asset Category', required=False),\n 'asset_lists': fields.selection([\n ('list', 'Asset List'),\n ('initial', 'Initial Value'),\n ('revalue', 'Revalue Asset'),\n ('abandon', 'Abandon Asset'),\n ('sale', 'Asset Sale'),\n ('depreciation', 'Asset Depreciation Value'),\n ('details', 'Assets Detials'),\n ('end_of_year', 'End Of Year Sammary'),\n ], 'Asset Lists', required=True),\n 'date': fields.date( 'Date'),\n }\n \n\n def _get_all_category(self, cr, uid, context=None):\n return self.pool.get('account.asset.category').search(cr, uid , [])\n \n _defaults = {\n 'filter' :'filter_no',\n } \n\n\n def check_report(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n data = {}\n data['ids'] = context.get('active_ids', [])\n data['model'] = context.get('active_model', 'ir.ui.menu')\n data['form'] = self.read(cr, uid, ids, ['company_id','category_id','filter','date_from', 'date_to','asset_lists', 'revalue_asset','depreciation_value','sale_value','addition_value','abandon_asset','filter_by_date','date'])[0] \n if data['form']['asset_lists'] == 'details':\n return {'type': 'ir.actions.report.xml',\\\n 'report_name': 'account.assets.details.report',\\\n 'datas': data}\n if data['form']['asset_lists'] == 'end_of_year':\n return {'type': 'ir.actions.report.xml',\\\n 'report_name': 'account.assets.end_of_year.report',\\\n 'datas': data}\n if data['form']['asset_lists'] == 'depreciation':\n return { 'type': 'ir.actions.report.xml', 'report_name': 'account.asset.Depr', 'datas': data}\n return { 'type': 'ir.actions.report.xml', 'report_name': 'account.asset.operation', 'datas': data}\n \n \naccount_report_asset_list()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"v_7/Dongola/common/account_asset_custom/wizard/account_report_asset_operation.py","file_name":"account_report_asset_operation.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"34459619","text":"\nimport sys\nimport os\nimport time\nimport argparse\nBIN = os.path.expanduser(\"../../../\") # folder containing PyECLOUD, PyPIC, PyKLU\nif BIN not in sys.path:\n sys.path.append(BIN)\n\nfrom PyECLOUD.buildup_simulation import BuildupSimulation\n\nsim_folder = 'LHC_ArcDipReal_450GeV_sey1.70_2.5e11ppb_bl_1.00ns_multispecies'\n\n# check if user provided folder as command line argument\nparser = argparse.ArgumentParser()\nparser.add_argument('--folder', help='Simulation_folder')\nparser.add_argument('--angle-dist-func',\n help='Angular distribution of new MPs relative to surface normal. Introduced in July 2017.',\n choices=('2D', '3D'), default='3D')\n\nargs = parser.parse_args()\nif args.folder:\n sim_folder = args.folder\n\nangle_distribution = 'cosine_%s' % args.angle_dist_func\nfilen_main_outp = sim_folder + '/Pyecltest_angle%s.mat' % args.angle_dist_func\n\n\ntime_0 = time.time()\nsim = BuildupSimulation(pyecl_input_folder=sim_folder, filen_main_outp=filen_main_outp,\n secondary_angle_distribution=angle_distribution, photoelectron_angle_distribution=angle_distribution)\nsim.run()\n\ntime_needed = time.time() - time_0\n\n\nprint('')\nprint('Test simulation done in %.2f s!' % time_needed)\nprint('To inspect the results you can run:')\nprint('001_comparison_against_reference.py')\nprint('')\n","sub_path":"testing/tests_multicloud/000_run_simulation.py","file_name":"000_run_simulation.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"336209816","text":"def divisorSum(n):\n result = 0\n for i in range(1, int(n/2)+1):\n if n % i == 0:\n result += i\n return result\n\nabundantNumbers = set()\nabundantSums = set()\n\ndef listAbundantNumbers(n):\n\tfor i in range(1, n+1):\n\t\tif divisorSum(i) > i:\n\t\t\tabundantNumbers.add(i)\n\treturn 0\n\ndef listAbundantSums():\n\tfor i in abundantNumbers:\n\t\tfor j in abundantNumbers:\n\t\t\tabundantSums.add(i+j)\n\ndef abundantTotal(n):\n\ttotal = 0\n\tlistAbundantNumbers(n)\n\tprint(len(abundantNumbers))\n\tlistAbundantSums()\n\tprint(len(abundantSums))\n\tfor i in range(n):\n\t\tif i not in abundantSums:\n\t\t\ttotal += i\n\treturn total\n\nprint(abundantTotal(28124))\n","sub_path":"p023/p023.py","file_name":"p023.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"217087360","text":"import turtle\r\n\r\nwn = turtle.Screen()\r\n\r\nwn.bgcolor('black')\r\nwn.title(\"PingPong\")\r\nwn.setup(height=600, width=800)\r\n\r\n#Score!!!\r\nscore_a = 0\r\nscore_b = 0\r\n\r\n\r\n\r\n\r\n# paddle1 (firt player) setup\r\n\r\npaddle1 = turtle.Turtle()\r\npaddle1.speed(0)\r\npaddle1.shape(\"square\")\r\npaddle1.color(\"white\")\r\npaddle1.shapesize(stretch_wid=5, stretch_len=1)\r\npaddle1.penup()\r\npaddle1.goto(-350, 0)\r\n\r\n\r\n# paddle1 movement\r\n\r\ndef paddle1_up():\r\n y = paddle1.ycor()\r\n y += 20\r\n paddle1.sety(y)\r\n\r\ndef paddle1_down():\r\n y = paddle1.ycor()\r\n y -= 20\r\n paddle1.sety(y)\r\n\r\n\r\nwn.listen()\r\nwn.onkeypress(paddle1_up, \"w\")\r\nwn.onkeypress(paddle1_down, \"s\")\r\n\r\n# paddle2 (second player)\r\n\r\npaddle2 = turtle.Turtle()\r\npaddle2.speed(0)\r\npaddle2.shape(\"square\")\r\npaddle2.color(\"white\")\r\npaddle2.shapesize(stretch_wid=5, stretch_len=1)\r\npaddle2.penup()\r\npaddle2.goto(350, 0)\r\n\r\n\r\n# paddle2 movement\r\n\r\ndef paddle2_up():\r\n y = paddle2.ycor()\r\n y += 20\r\n paddle2.sety(y)\r\n\r\n\r\ndef paddle2_down():\r\n y = paddle2.ycor()\r\n y -= 20\r\n paddle2.sety(y)\r\n\r\n\r\nwn.listen()\r\nwn.onkeypress(paddle2_up, \"Up\")\r\nwn.onkeypress(paddle2_down, \"Down\")\r\n\r\n# making a ball\r\n\r\nball = turtle.Turtle()\r\nball.speed(0)\r\nball.shape(\"circle\")\r\nball.color(\"white\")\r\nball.penup()\r\nball.goto(0, 0)\r\nball.dx = 3 # ball moves every second for 2 pixels in x\r\nball.dy = 3 # ball moves every second for 2 pixels in y\r\n\r\n#PEN (draws the score)\r\npen = turtle.Turtle()\r\npen.speed(0)\r\npen.color(\"white\")\r\npen.penup()\r\npen.hideturtle()\r\npen.goto(0, 260)\r\npen.write(\"Player A: 0 Player B: 0\", align=\"center\", font=(\"Courier\",24,\"normal\")) #font це шрифт і розмір (нормал значить тип нормальний), align це по центру, зліва чи справа\r\n\r\n# MAIN GAME LOOP\r\nwhile True:\r\n wn.update()\r\n # Move the ball\r\n ball.setx(ball.xcor() + ball.dx) # xcor and ycor is a function which displays your object coordinate in x or in y\r\n ball.sety(ball.ycor() + ball.dy)\r\n # Borders\r\n if ball.ycor() > 290: # for Y positive\r\n ball.sety(290) # idk why not 300 if height is 300 + 300 = 600\r\n ball.dy = ball.dy * -1\r\n\r\n if ball.ycor() < -290: # for Y negative\r\n ball.sety(-290)\r\n ball.dy = ball.dy * -1\r\n\r\n if ball.xcor() > 390: # for X positive\r\n ball.goto(0, 0)\r\n ball.dx = ball.dx * -1\r\n score_a += 1\r\n pen.clear()\r\n pen.write(f\"Player A: {score_a} Player B: {score_b} \", align=\"center\", font=(\"Courier\", 24, \"normal\"))\r\n\r\n if ball.xcor() < -390: # for X negative\r\n ball.goto(0, 0)\r\n ball.dx = ball.dx * -1\r\n score_b += 1\r\n pen.clear()\r\n pen.write(f\"Player A: {score_a} Player B: {score_b}\", align=\"center\", font=(\"Courier\", 24, \"normal\"))\r\n #Paddle and ball collisions\r\n if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle2.ycor() + 40 and ball.ycor() > paddle2.ycor() - 40):\r\n ball.setx(340)\r\n ball.dx = ball.dx * -1\r\n if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle1.ycor() + 40 and ball.ycor() > paddle1.ycor() - 40):\r\n ball.setx(-340)\r\n ball.dx = ball.dx * -1\r\n #Paddles cant go out the border\r\n #paddle1\r\n if paddle1.ycor() > 260:\r\n paddle1.sety(260)\r\n if paddle1.ycor() < -260:\r\n paddle1.sety(-260)\r\n #paddle2\r\n if paddle2.ycor() > 260:\r\n paddle2.sety(260)\r\n if paddle2.ycor() < -260:\r\n paddle2.sety(-260)\r\n# if score_a == 5:\r\n #pen.clear()\r\n #pen.goto(0,0)\r\n #ball.dx = 0\r\n #ball.dy = 0\r\n #pen.write(\"Player A Won!\", align=\"center\",font=(\"Courier\", 26,\"normal\"))\r\n# if score_a == 5:\r\n #pen.clear()\r\n #pen.goto(0,20) #Воно якогось хріна блимає, бо це цикл, мені виправляти влом\r\n #all.dx = 0\r\n #ball.dy = 0\r\n #pen.write(\"Player B Won!\", align=\"center\",font=(\"Courier\", 26,\"normal\"))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nwn.mainloop()\r\n","sub_path":"PinGPonG_in1program.py","file_name":"PinGPonG_in1program.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519272838","text":"# Tool imports\r\nfrom bph.tools.windows.peid import BphPeid as Peid\r\n\r\n# Core Imports\r\nfrom bph.core.server.template import BphTemplateServer as TemplateServer\r\nfrom bph.core.session import BphSession as Session\r\nfrom bph.core.sample import BphLabFile as LabFile\r\n\r\nsession = Session(project_name='blackhat_arsenal_2019')\r\nsession.start()\r\nsession.set_launcher(move_sample=False)\r\n\r\ntemplateserver = TemplateServer()\r\ntemplateserver.start()\r\n\r\nsample_file = LabFile(session.launcher_abs_path)\r\n\r\npeid = Peid(sample_file)\r\npeid.kanal_scan()\r\npeid.execute()\r\npeid.output()\r\npeid.files()\r\n","sub_path":"scripts/examples/tools/peid_kanal.py","file_name":"peid_kanal.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"74347150","text":"from common.point import ccw, read_points, output_convex_hull\n\n\ndef graham_scan(points):\n points.sort(key=lambda p: p.x)\n upper_hull = []\n for p in points:\n while len(upper_hull) > 1 and ccw(upper_hull[-2], upper_hull[-1], p) >= 0:\n upper_hull.pop()\n upper_hull.append(p)\n lower_hull = []\n for p in points:\n while len(lower_hull) > 1 and ccw(lower_hull[-2], lower_hull[-1], p) <= 0:\n lower_hull.pop()\n lower_hull.append(p)\n lower_hull.reverse()\n upper_hull.extend(lower_hull[1:])\n return upper_hull\n\n\nif __name__ == '__main__':\n points = read_points('points')\n convex_hull = graham_scan(points)\n output_convex_hull('graham scan', convex_hull)\n","sub_path":"handin3/graham_scan.py","file_name":"graham_scan.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"414741040","text":"import numpy as np\nimport time\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow as tf\nimport sys\n\nclass ECMSolver(object):\n def __init__(self, model, data, val_data, **kwargs):\n\n self.model = model\n self.data = data\n self.val_data = val_data\n self.word_to_idx = kwargs.pop('word2idx', None)\n self.n_epochs = kwargs.pop('n_epochs', 10)\n self.batch_size = kwargs.pop('batch_size', 100)\n self.learning_rate = kwargs.pop('learning_rate', 0.01)\n self.print_every = kwargs.pop('print_every', 100)\n self.save_every = kwargs.pop('save_every', 1)\n self.log_path = kwargs.pop('log_path', './log/')\n self.model_path = kwargs.pop('model_path', './model/')\n self.pretrained_model = kwargs.pop('pretrained_model', None)\n self.test_model = kwargs.pop('test_model', './model/lstm/model-1')\n self._start = self.word_to_idx['']\n self._null = self.word_to_idx['']\n self.idx2word = dict([(v,k) for k,v in self.word_to_idx.items()])\n\n if not os.path.exists(self.model_path):\n os.makedirs(self.model_path)\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n def train(self):\n # train/val dataset\n n_examples = len(self.data['questions'])\n n_iters_per_epoch = int(np.ceil(float(n_examples) / self.batch_size))\n posts = np.array(self.data['trans_questions'])\n emotion_category = np.array(self.data['questions_emotion'])\n response = np.array(self.data['trans_answers'])\n\n logits_list, loss, train_op, memory_norm, memory_norm_list = self.model.build_model()\n print(\"model built\")\n tf.get_variable_scope().reuse_variables()\n\n # train op\n # with tf.name_scope('optimizer'):\n\n print(\"The number of epoch: %d\" % self.n_epochs)\n print(\"Data size: %d\" % n_examples)\n print(\"Batch size: %d\" % self.batch_size)\n print(\"Iterations per epoch: %d\" % n_iters_per_epoch)\n\n\n config = tf.ConfigProto(allow_soft_placement=True)\n # config.gpu_options.per_process_gpu_memory_fraction=0.9\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver(max_to_keep=40)\n\n if self.pretrained_model is not None:\n print(\"Start training with pretrained Model..\")\n saver.restore(sess, self.pretrained_model)\n\n for e in range(self.n_epochs):\n rand_idxs = np.random.permutation(n_examples)\n posts = posts[rand_idxs]\n emotion_category = emotion_category[rand_idxs]\n response = response[rand_idxs]\n\n for i in range(n_iters_per_epoch):\n posts_batch = posts[i * self.batch_size:(i + 1) * self.batch_size]\n emotion_batch = np.reshape(emotion_category[i * self.batch_size:(i + 1) * self.batch_size],\n [-1, 1])\n response_batch = response[i * self.batch_size:(i + 1) * self.batch_size]\n feed_dict = {self.model.posts: posts_batch,\n self.model.emotion_category: emotion_batch,\n self.model.response: response_batch}\n _, l, logits_batch, memory_norm_batch, memory_norm_batch_list = sess.run([train_op, loss, logits_list, memory_norm, memory_norm_list], feed_dict)\n # write summary for tensorboard visualization\n\n if (i + 1) % self.print_every == 0:\n response_example = [np.argmax(np.squeeze(elem[0]), axis=0) for elem in logits_batch]\n lists = []\n for idx in response_example:\n if \"END\" not in str(self.idx2word[idx]):\n lists.append(idx)\n else:\n break\n response_example = ' '.join([str(self.idx2word[idx]) for idx in lists])\n response_truth = [str(self.idx2word[idx]) for idx in response_batch[0]]\n while True:\n if response_truth[-1] == '':\n response_truth.pop()\n else:\n break\n response_truth = ' '.join(response_truth)\n print(\"epoch %d iteration %d loss %f\" % (e, i, l))\n print(\"Ground truth response is %s\" % response_truth)\n print(\"Trained response is %s\" % response_example)\n print(\"The final norm of internal memory is %f\" % memory_norm_batch)\n # print(\"The change of the norm of internal memory is\")\n # print(memory_norm_batch_list)\n sys.stdout.flush()\n\n if (e + 1) % self.save_every == 0:\n saver.save(sess, os.path.join(self.model_path, 'model'), global_step=e + 1)\n print(\"model-%s saved.\" % (e + 1))\n\n def apply(self, t):\n n_examples = len(self.data['questions'])\n n_iters_per_epoch = int(np.ceil(float(n_examples) / self.batch_size))\n posts = np.array(self.data['trans_questions'])\n emotion_category = np.array(self.data['questions_emotion'])\n test_decoder_outputs, _ = self.model.build_sampler(t, is_apply=True)\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n responses = []\n with tf.Session(config=config) as sess:\n saver = tf.train.Saver()\n saver.restore(sess, self.test_model)\n\n for i in range(n_iters_per_epoch):\n posts_batch = posts[i * self.batch_size:(i + 1) * self.batch_size]\n emotion_batch = np.reshape(emotion_category[i * self.batch_size:(i + 1) * self.batch_size],\n [-1, 1])\n feed_dict = {self.model.posts: posts_batch,\n self.model.emotion_category: emotion_batch}\n decoder_outputs = sess.run([test_decoder_outputs], feed_dict)\n decoder_outputs = decoder_outputs[0]\n length = len(decoder_outputs[0])\n for i in range(length):\n response = [np.argmax(np.squeeze(elem[i]), axis=0) for elem in decoder_outputs]\n lists = []\n for idx in response:\n if \"END\" not in str(self.idx2word[idx]):\n lists.append(idx)\n else:\n break\n response = ' '.join([str(self.idx2word[idx]) for idx in lists])\n responses.append(response)\n return responses\n\n def test(self):\n n_examples = len(self.data['questions'])\n n_iters_per_epoch = int(np.ceil(float(n_examples) / self.batch_size))\n posts = np.array(self.data['trans_questions'])\n test_response = np.array(self.data['trans_answers'])\n emotion_category = np.array(self.data['questions_emotion'])\n test_decoder_outputs, loss = self.model.build_sampler(t=0, is_apply=False)\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n responses = []\n total_loss = 0\n with tf.Session(config=config) as sess:\n saver = tf.train.Saver()\n saver.restore(sess, self.test_model)\n\n for i in range(n_iters_per_epoch):\n posts_batch = posts[i * self.batch_size:(i + 1) * self.batch_size]\n emotion_batch = np.reshape(emotion_category[i * self.batch_size:(i + 1) * self.batch_size],\n [-1, 1])\n response_batch = test_response[i * self.batch_size:(i + 1) * self.batch_size]\n feed_dict = {self.model.posts: posts_batch,\n self.model.emotion_category: emotion_batch,\n self.model.response: response_batch}\n # import pdb;pdb.set_trace()\n decoder_outputs, cur_loss = sess.run([test_decoder_outputs, loss], feed_dict)\n total_loss += cur_loss * posts.shape[0]\n decoder_outputs = decoder_outputs[0]\n length = len(decoder_outputs[0])\n for i in range(length):\n response = [np.argmax(np.squeeze(elem[i]), axis=0) for elem in decoder_outputs]\n lists = []\n for idx in response:\n if \"END\" not in str(self.idx2word[idx]):\n lists.append(idx)\n else:\n break\n response = ' '.join([str(self.idx2word[idx]) for idx in lists])\n responses.append(response)\n print(\"total loss is \", total_loss)\n new_data = {\"questions\": self.data['questions'], \"answers\": responses,\n \"ground_truth\": self.data['answers'], \"emotion_category\": self.data['questions_emotion']}\n return new_data\n\n","sub_path":"ECM/solver_revise.py","file_name":"solver_revise.py","file_ext":"py","file_size_in_byte":9383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"537172387","text":"import random\n\nfrom faker import Faker\nfrom django.core.management.base import BaseCommand\nfrom django_seed import Seed\nfrom products.models import Service,Room,ServiceCategory\n\n\nclass Command(BaseCommand):\n help = 'This command creates test data'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--number', default=1, type=int, help=\"How many data do you want to create?\"\n )\n\n def handle(self, *args, **options):\n number = options.get('number')\n seeder = Seed.seeder()\n fake = Faker([\"ko_KR\"])\n fake2 = Faker([\"en_US\"])\n\n seeder.add_entity(Service, 80, {\n 'name' : lambda x: fake.word(),\n 'service_category': lambda x: random.choice(ServiceCategory.objects.all())\n })\n seeder.execute()\n\n self.stdout.write(self.style.SUCCESS('services created'))\n\n","sub_path":"users/management/commands/seed_services.py","file_name":"seed_services.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400871386","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.utils.data as data_utils\nfrom data_loader import get_dataset\nfrom crf import CRF\nimport sys\n\nif __name__=='__main__':\n torch.multiprocessing.freeze_support()\n # Tunable hyperparameters\n batch_size = 32 #256\n num_epochs = 10\n max_iters = 1000\n learning_rate = 0.1\n C = 100 # C > 0 is a trade-off weight that balances log-likelihood and regularization\n\n # Model arguments used for parameter initialization\n input_dim = {'flattened': 128, 'height': 16, 'width': 8}\n num_labels = 26\n conv_layers = [ # Define the Convolution layers in order. Tune these too.\n {'filter_shape': (1, 1, 5, 5), 'padding': 0, 'stride': 1},\n {'filter_shape': (1, 1, 3, 3), 'padding': 0, 'stride': 1}\n ]\n\n print_iter = 25 # Prints results every n iterations\n cuda = torch.cuda.is_available() # Use GPU?\n\n # Instantiate the CRF model\n crf = CRF(input_dim, conv_layers, num_labels, batch_size, cuda)\n crf.init_params() # Register submodules & initialize all model parameters (with requires_grad=True)\n\n # print(list(crf.parameters())) # Verify if all parameters are listed\n # for name, param in crf.named_parameters(): # Print parameters with names\n # if param.requires_grad: print(name, param.data)\n\n opt = torch.optim.LBFGS(crf.parameters(), lr=learning_rate, max_iter=5)\n\n ##################################################\n # Begin training\n ##################################################\n step = 0\n for i in range(num_epochs):\n print(\"Processing epoch {}\".format(i))\n dataset = get_dataset()\n split = int(0.5 * len(dataset.data)) # train-test split\n train_data, test_data = dataset.data[:split], dataset.data[split:]\n train_target, test_target = dataset.target[:split], dataset.target[split:]\n\n # Convert dataset into torch tensors\n train = data_utils.TensorDataset(torch.tensor(train_data).float(), torch.tensor(train_target).long())\n test = data_utils.TensorDataset(torch.tensor(test_data).float(), torch.tensor(test_target).long())\n\n # Define train and test loaders\n train_loader = data_utils.DataLoader(train, # dataset to load from\n batch_size=batch_size, # examples per batch (default: 1)\n shuffle=True,\n sampler=None, # if a sampling method is specified, `shuffle` must be False\n num_workers=5, # subprocesses to use for sampling\n pin_memory=False, # whether to return an item pinned to GPU\n )\n\n test_loader = data_utils.DataLoader(test, # dataset to load from\n batch_size=batch_size, # examples per batch (default: 1)\n shuffle=False,\n sampler=None, # if a sampling method is specified, `shuffle` must be False\n num_workers=5, # subprocesses to use for sampling\n pin_memory=False, # whether to return an item pinned to GPU\n )\n print('Loaded dataset... ')\n\n # Now start training\n for i_batch, sample in enumerate(train_loader):\n # print('Batch:', i_batch)\n train_X = sample[0]\n train_Y = sample[1]\n\n if cuda:\n train_X = train_X.cuda()\n train_Y = train_Y.cuda()\n\n def closure():\n opt.zero_grad()\n output = crf(train_X, train_Y)\n # print('output---', output)\n tr_loss = crf.loss(output, C)\n print('loss-----', tr_loss)\n # print()\n # print(crf.W.grad)\n # print(crf.T.grad)\n # print(crf.Ks[0])\n tr_loss.backward()\n # print(crf.Ks[0].grad)\n return tr_loss\n\n tr_loss = opt.step(closure)\n\n # print to stdout occasionally:\n if step % print_iter == 0:\n random_ixs = np.random.choice(test_data.shape[0], batch_size, replace=False)\n test_X = test_data[random_ixs, :]\n test_Y = test_target[random_ixs, :]\n\n # Convert to torch\n test_X = torch.from_numpy(test_X).float()\n test_Y = torch.from_numpy(test_Y).long()\n\n if cuda:\n test_X = test_X.cuda()\n test_Y = test_Y.cuda()\n test_loss = crf.loss(crf(train_X, train_Y), C)\n print(step, tr_loss.data, test_loss.data,\n tr_loss.data / batch_size, test_loss.data / batch_size)\n\n ##################################################################\n # IMPLEMENT WORD-WISE AND LETTER-WISE ACCURACY HERE\n ##################################################################\n\n step += 1\n if step > max_iters: raise StopIteration\n\n # sys.exit()\n\n del train, test\n","sub_path":"try/dg/crf_test.py","file_name":"crf_test.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"412223093","text":"import pico2d\nimport math\nimport random\nIDLE, RUN, SLEEP = range(3)\n\nclass Grass:\n def __init__(self):\n self.image = pico2d.load_image('../res/grass.png')\n print(self.image)\n def draw(self):\n self.image.draw(400, 30)\n\nclass Boy:\n image = None\n def __init__(self):\n print(\"Creating..\")\n self.x = random.randint(100, 700)\n self.y = 90\n self.speed = 3\n self.frame = random.randint(0, 7)\n self.timer = 0\n self.velocity = 0\n self.event_que=[]\n self.cur_state = IDLE\n self.dir = 1\n self.enter_state[IDLE](self)\n if Boy.image == None:\n Boy.image = pico2d.load_image('../res/animation_sheet.png')\n\n def enter_SLEEP(self):\n self.frame = 0\n def exit_SLEEP(self):\n pass\n def do_SLEEP(self):\n self.frame = (self.frame + 1)%8\n def draw_SLEEP(self):\n if self.dir == 1:\n Boy.image.clip_composite_draw(self.frame*100, 300, 100, 100, math.pi/2, '', self.x, self.y, 100, 100)\n else:\n Boy.image.clip_composite_draw(self.frame*100, 200, 100, 100, -math.pi/2, '', self.x, self.y, 100, 100)\n def enter_IDLE(self):\n self.timer =100\n self.frame =0\n def exit_IDLE(self):\n pass\n def do_IDLE(self):\n self.frame = (self.frame + 1)%8\n self.timer -= 1\n if self.timer == 0:\n self.change_state(SLEEP)\n def draw_IDLE(self):\n if self.dir == 1:\n Boy.image.clip_draw(self.frame * 100, 300, 100, 100, self.x, self.y)\n else:\n Boy.image.clip_draw(self.frame * 100, 200, 100, 100, self.x, self.y)\n def enter_RUN(self):\n self.frame = 0\n self.dir = self.velocity\n def exit_RUN(self):\n pass\n def do_RUN(self):\n self.frame = (self.frame + 1)%8\n self.x += self.velocity\n self.x = pico2d.clamp(25, self.x, 800-25)\n def draw_RUN(self):\n if self.velocity == 1:\n Boy.image.clip_draw(self.frame * 100, 100, 100, 100, self.x, self.y)\n else:\n Boy.image.clip_draw(self.frame * 100, 0, 100, 100, self.x, self.y)\n\n enter_state = {IDLE: enter_IDLE, RUN: enter_RUN, SLEEP: enter_SLEEP}\n exit_state = {IDLE: exit_IDLE, RUN: exit_RUN, SLEEP: exit_SLEEP}\n do_state = {IDLE: do_IDLE, RUN: do_RUN, SLEEP: do_SLEEP}\n draw_state = {IDLE: draw_IDLE, RUN: draw_RUN, SLEEP: draw_SLEEP}\n\n def draw(self):\n self.draw_state[self.cur_state](self)\n def update(self):\n self.do_state[self.cur_state](self)\n def change_state(self, state):\n self.exit_state[self.cur_state](self)\n self.enter_state[state](self)\n self.cur_state = state\n def add_event(self, event):\n self.event_que.insert(0, event)\n\ndef handle_events():\n global running, boy\n events = pico2d.get_events()\n for event in events:\n if event.type == pico2d.SDL_QUIT:\n running = False\n elif event.type == pico2d.SDL_KEYDOWN:\n if event.key == pico2d.SDLK_ESCAPE:\n running = False\n elif event.key == pico2d.SDLK_RIGHT:\n boy.velocity += 1\n boy.change_state(RUN)\n elif event.key == pico2d.SDLK_LEFT:\n boy.velocity -= 1\n boy.change_state(RUN)\n elif event.type == pico2d.SDL_KEYUP:\n if event.key == pico2d.SDLK_RIGHT:\n boy.velocity -= 1\n boy.change_state(IDLE)\n elif event.key == pico2d.SDLK_LEFT:\n boy.velocity += 1\n boy.change_state(IDLE)\n \npico2d.open_canvas()\ngrass = Grass()\nboy = Boy()\n\nrunning = True\nwhile running:\n pico2d.clear_canvas()\n grass.draw()\n boy.update()\n boy.draw()\n print('timer = %d'%boy.timer)\n pico2d.update_canvas()\n handle_events()\n pico2d.delay(0.01)\n\npico2d.close_canvas()\n","sub_path":"hw_1019/hw_1019.py","file_name":"hw_1019.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"159888582","text":"# -*- coding: utf-8 -*-\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('applications', '0003_auto_20150311_2109'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='application',\n name='state',\n field=models.CharField(max_length=50, null=True, verbose_name=b'State of the application', choices=[(b'submitted', b'Submitted'), (b'accepted', b'Accepted'), (b'rejected', b'Rejected')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"applications/migrations/0004_auto_20150322_1050.py","file_name":"0004_auto_20150322_1050.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"14559778","text":"import os\nimport json\nimport parsesummoner\n\ndef extract_id(json):\n\ttry:\n\t\treturn int(json['data']['key'])\n\texcept:\n\t\tprint(\"err\")\n\t\treturn 0\n\ndef _tojson_beautify_json(dict):\n\treturn json.dumps(dict, indent=4)\n\nif __name__ == \"__main__\":\n\tsave_path = os.getcwd()+\"\\\\riotdata\"\n\tcomplete_name = os.path.join(save_path, \"sortedchampion.json\")\n\tfile = open(complete_name, \"w\")\n\t\n\ttry:\n\t\tjson_file = json.load(open(\"riotdata\\\\champion.json\"))\n\texcept:\n\t\tprint(\"Error importing json file\")\n\n\tsorted_json = sorted(json_file[\"data\"], key = lambda x: int(json_file[\"data\"][x][\"key\"]))\n\tnew_json = [json_file[\"data\"][ip] for ip in sorted_json]\n\tnew_data = {}\n\tfor val in new_json:\n\t\tnew_data[val[\"key\"]] = [val[\"name\"], int(val[\"key\"])]\n\tjsondump = _tojson_beautify_json(new_data)\n\tprint(jsondump)\n\tfile.write(jsondump)\n\tfile.close()","sub_path":"jsonsort.py","file_name":"jsonsort.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"204891319","text":"\ndef isPhoneNumber(text):\n if (len(text)!=12):\n return False\n for i in range(0,3):\n if not text[i].isdecimal():\n return False\n if (text[3]!='-'):\n return False\n for i in range(4,7):\n if not text[i].isdecimal():\n return False\n if (text[7]!='-'):\n return False\n for i in range(8,11):\n if not text[i].isdecimal():\n return False\n return True\n\n\nprint(\"[!]Phone Number extractor\")\nmessage=input(\"Enter the text: \")\n\nfor i in range(len(message)):\n chunk=message[i:i+12]\n if isPhoneNumber(chunk):\n print(\"[*]Phone Number found:\" + chunk)\nprint(\"*DONE!*\")\n \n\n","sub_path":"phNum.py","file_name":"phNum.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"645159038","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 12 21:43:48 2018\n\n@author: cml\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nfrom sklearn.utils import resample\n\ndef alpha( X, Y ):\n return ((np.var(Y)-np.cov(X, Y))/(np.var(X)+np.var(Y)-2*np.cov(X ,Y)))\n\n#randomly select 100 observations from range 1 to 1000 with replacement\ndef bootstrap (df):\n totalResult = 0\n for i in range(0,1000):\n dfsample = df.sample(frac=1, replace=True)\n X = dfsample.X[0:100]\n Y = dfsample.Y[0:100]\n result = alpha(X, Y)\n totalResult += result\n endResult = totalResult / 1000\n print(\"End result of bootstrap: \", endResult, \"\\n\")\n\ndf = pd.read_csv('Portfolio.csv', usecols=range(1,3))\nX = df.X[0:100]\nY = df.Y[0:100]\nprint('alpha: ', alpha(X,Y), \"\\n\")\n\n#run bootstrap\nbootstrap(df)\n#final output shows alpha = 0.58\n\n#load auto data set to perform bootstrap to estimate\n#the accuracy of a linear regression model\nauto_df = pd.read_csv('Auto.csv', usecols=range(1,10))\n\n#split data\nauto_X = auto_df[\"horsepower\"].values.reshape(-1,1) # our independent variable\nauto_y = auto_df[\"mpg\"].values.reshape(-1,1) # our dependent variable\nauto_X = sm.add_constant(auto_X) #add constant to get intercept\n\n#create linear model and calculate standard errors\n#SE(B0)=0.717, SE(B1)=0.006\nols = sm.OLS(auto_y, auto_X).fit()\nprint(ols.summary())\n\n#use bootstrapping\n#it gives a more accurate estimate of the standard errors\n#SE(B0)=0.448, SE(B1)=0.004\nXsamp, ysamp = resample(auto_X, auto_y, n_samples=1000)\nols_resample = sm.OLS(ysamp, Xsamp).fit()\nprint(ols_resample.summary())","sub_path":"Report/appendix/source code/Exercises/Christian/Bootstrap.py","file_name":"Bootstrap.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"560733210","text":"__author__ = 'zadoev@gmail.com'\n\"\"\"\nWrite a program which determines the number of 1 bits in the internal\nrepresentation of a given integer.\n\nINPUT SAMPLE:\n\nThe first argument is a path to a file. The file contains integers,\none per line.\n\nFor example:\n\n10\n22\n56\nOUTPUT SAMPLE:\n\nPrint to stdout the number of ones in the binary form of each number.\n\nFor example:\n\n2\n3\n3\n\"\"\"\nimport sys\n\nif __name__ == '__main__':\n with open(sys.argv[1]) as test_cases:\n for line in test_cases:\n print(bin(int(line.rstrip())).count('1'))\n","sub_path":"solutions/number_of_ones/main.py3.py","file_name":"main.py3.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"293459054","text":"import math\nimport os\nimport random\nimport re\nimport sys\nfrom collections import deque\n# Complete the activityNotifications function below.\n\n\nclass CustomQueue:\n\n def __init__(self, length):\n self.freq = [0] * 201\n self.queue = deque()\n self.length = length\n\n def add(self, item: int):\n self.queue.append(item)\n self.freq[item] += 1\n if len(self.queue) > self.length:\n val = self.queue.popleft()\n self.freq[val] -= 1\n\n def median(self) -> int:\n a1 = int(self.length / 2)\n a2 = a1 + 1\n mid1 = None\n mid2 = None\n res = 0\n\n for idx, item in enumerate(self.freq):\n res += item\n if res >= a1 and mid1 is None:\n mid1 = idx\n if res >= a2:\n mid2 = idx\n break\n\n if self.length % 2 == 0:\n return (mid1 + mid2) / 2.0\n return mid2\n\n def __repr__(self):\n return str(self.freq)\n\n\ndef activityNotifications(expenditure, d):\n count = 0\n q = CustomQueue(d)\n for i in expenditure[:d]:\n q.add(i)\n\n for idx, item in enumerate(expenditure[d:]):\n median = q.median()\n # print(q)\n # print(median, expenditure[idx: idx + 1])\n if item >= (2 * median):\n count += 1\n q.add(item)\n return count\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nd = input().split()\n\n n = int(nd[0])\n\n d = int(nd[1])\n\n f = open('testcase.txt', 'r')\n\n # expenditure = list(map(int, input().rstrip().split()))\n\n expenditure = list(map(int, f.read().rstrip().split()))\n\n result = activityNotifications(expenditure, d)\n\n print(result)\n\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n","sub_path":"Sorting/fraudulentActivity.py","file_name":"fraudulentActivity.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"136267328","text":"import sys\nfrom collections import Counter\n\ndata = '''\\\needadn\ndrvtee\neandsr\nraavrd\natevrs\ntsrnev\nsdttsa\nrasrtv\nnssdts\nntnada\nsvetve\ntesnvt\nvntsnd\nvrdear\ndvrsen\nenarar'''\n\ndata = sys.stdin.read()\n\nwords = data.splitlines()\nN = len(words[0])\ncounts = [Counter(w[n] for w in words) for n in range(N)]\n\nprint(''.join(c.most_common(1)[0][0] for c in counts))\n","sub_path":"6/6a.py","file_name":"6a.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"370370147","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('../pic/1.jpg')\nrows, cols,_ = img.shape\n# the four corners of the original image\npoints1 = np.float32([[56,65],[368,52],[28,387],[389,390]])\n# the target image four corners: left-up, right-up, left-botom, right-bottom\npoints2 = np.float32([[0,0],[300,0],[0,300],[300,300]])\n# generate the perspective transformation matrix\nmatrix = cv2.getPerspectiveTransform(points1,points2)\nprint(matrix)\n# perspective transformation, the third param is target image size\ndst = cv2.warpPerspective(img, matrix, (cols, rows))\nplt.subplot(121), plt.imshow(img[:, :, ::-1]), plt.title('input')\nplt.subplot(122), plt.imshow(dst[:, :, ::-1]), plt.title('output')\nplt.show()","sub_path":"Camera Basics/task8/src/perspective_transform.py","file_name":"perspective_transform.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"524842618","text":"from string import punctuation\n\nfrom django.shortcuts import render, redirect\n\nfrom django.views.decorators.http import require_http_methods\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n\nfrom alcpt.managerfuncs import testmanager\nfrom alcpt.decorators import permission_check\nfrom alcpt.definitions import UserType, QuestionType, QuestionTypeCounts\nfrom alcpt.models import Exam, TestPaper, Group\nfrom alcpt.exceptions import *\n\n\n@permission_check(UserType.TestManager)\n@require_http_methods([\"GET\"])\ndef exam_list(request):\n exam_name = request.GET.get('exam_name')\n\n if exam_name:\n exams = Exam.objects.filter(is_public=True).filter(name__contains=exam_name)\n else:\n exams = Exam.objects.filter(is_public=True)\n\n page = request.GET.get('page', 0)\n paginator = Paginator(exams, 10) # the second parameter is used to display how many items. Now is display 10\n\n try:\n examList = paginator.page(page)\n except PageNotAnInteger:\n examList = paginator.page(1)\n except EmptyPage:\n examList = paginator.page(paginator.num_pages)\n\n return render(request, 'exam/exam_list.html', locals())\n\n\n@permission_check(UserType.TestManager)\n@require_http_methods([\"GET\"])\ndef testpaper_list(request):\n testpaper_name = request.GET.get('testpaper_name')\n\n if testpaper_name:\n testpapers = TestPaper.objects.filter(is_testpaper=True).filter(name__contains=testpaper_name)\n else:\n testpapers = TestPaper.objects.filter(is_testpaper=True)\n\n page = request.GET.get('page', 0)\n paginator = Paginator(testpapers, 10) # the second parameter is used to display how many items. Now is display 10\n\n try:\n testpaperList = paginator.page(page)\n except PageNotAnInteger:\n testpaperList = paginator.page(1)\n except EmptyPage:\n testpaperList = paginator.page(paginator.num_pages)\n\n return render(request, 'exam/testpaper_list.html', locals())\n\n\n@permission_check(UserType.TestManager)\ndef testpaper_content(request, testpaper_id):\n try:\n testpaper = TestPaper.objects.get(id=testpaper_id)\n except ObjectDoesNotExist:\n messages.error(request, 'Testpaper does not exist, testpaper id: {}'.format(testpaper_id))\n return redirect('testpaper_list')\n\n questions = testpaper.question_set.all().order_by('q_type')\n\n return render(request, 'exam/testpaper_content.html', locals())\n\n\n@permission_check(UserType.TestManager)\ndef testpaper_create(request):\n if request.method == 'POST':\n testpaper_name = request.POST.get('testpaper_name',)\n\n try:\n if TestPaper.objects.filter(name__icontains=testpaper_name):\n raise MultipleObjectsReturned('Question has existed.')\n except ObjectDoesNotExist:\n pass\n\n testpaper = testmanager.create_testpaper(name=testpaper_name, created_by=request.user, is_testpaper=1)\n\n return render(request, 'exam/testpaper_edit.html', locals())\n\n else:\n return render(request, 'exam/testpaper_create.html', locals())\n\n\n# 編輯考卷(未完成)\n@permission_check(UserType.TestManager)\ndef testpaper_edit(request, testpaper_id):\n try:\n testpaper = TestPaper.objects.get(id=testpaper_id)\n except ObjectDoesNotExist:\n messages.error(request, 'Testpaper does not exist, testpaper id: {}'.format(testpaper_id))\n return redirect('testpaper_list')\n\n if testpaper.valid == 1:\n messages.warning(request, \"This testpaper is valid, it can't not edit again.\")\n return redirect('testpaper_list')\n\n if request.method == \"POST\":\n testpaper_name = request.POST.get('testpaper_name',)\n\n testpaper = testmanager.edit_testpaper(testpaper, testpaper_name)\n\n testpaper.valid = testpaper.question_set.count() == sum(QuestionTypeCounts.Exam.value[0])\n\n if testpaper.valid:\n for question in testpaper.question_set.all():\n question.used_freq += 1\n question.save()\n\n testpaper.save()\n\n messages.success(request, 'Successfully update testpaper: testpaper id: {}'.format(testpaper.id))\n\n return redirect('testpaper_list')\n\n else:\n question_types = [0] + QuestionTypeCounts.Exam.value[0]\n selected_num = [0 for _ in question_types]\n reach_limit = [False for _ in question_types]\n\n for question_type in QuestionType.__members__.values():\n type_value = question_type.value[0]\n selected_num[type_value] = testpaper.question_set.filter(q_type=type_value).count()\n reach_limit[type_value] = selected_num[type_value] <= question_types[type_value]\n\n if not all(reach_limit):\n messages.warning(request, \"This testpaper won't start until all questions have been selected.\")\n\n types_num = range(1, len(QuestionType.__members__)+1)\n\n return render(request, 'exam/testpaper_edit.html', locals())\n\n\n# 人工選題(未完成)\n@permission_check(UserType.TestManager)\ndef manual_pick(request, testpaper_id, question_type):\n messages.success(request, str(question_type))\n return render(request, 'exam/testpaper_manual_pick.html', locals())\n\n\n# 自動選題(已完成)\n@permission_check(UserType.TestManager)\ndef auto_pick(request, testpaper_id, question_type):\n try:\n testpaper = TestPaper.objects.get(id=testpaper_id)\n except ObjectDoesNotExist:\n messages.error(request, 'Testpaper does not exist, testpaper id: {}'.format(testpaper_id))\n\n if type(question_type) is int:\n raise IllegalArgumentError('question_type does match category.')\n\n for q_type in QuestionType.__members__.values():\n if q_type.value[0] == int(question_type):\n question_type = q_type\n break\n\n if testmanager.limit_check(testpaper=testpaper, q_type=question_type):\n messages.warning(request, 'This type had reached limit amount.')\n return redirect('/exam/testpaper/{}/edit'.format(testpaper_id))\n\n selected_num = testmanager.auto_pick(testpaper=testpaper, type_counts=QuestionTypeCounts.Exam.value[0],\n question_type=int(question_type.value[0]))\n\n messages.success(request, selected_num)\n\n return redirect('/exam/testpaper/{}/edit'.format(testpaper_id))\n\n\n","sub_path":"alcpt/exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"445548030","text":"# -*- coding: utf-8 -*-\n# @Author: tobi\n# @Date: 2017-05-22 16:40:40\n# @Last Modified by: tobi\n# @Last Modified time: 2017-05-22 19:41:52\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import column_property\n\ndb = SQLAlchemy()\n\n\nclass Node(db.Model):\n mac_address = db.Column(db.String(17), primary_key=True)\n name = db.Column(db.String(64), index=True, unique=True)\n map_pos = db.Column(db.String(20))\n threshold = db.Column(db.Float, default=0.0)\n last_update = db.Column(db.DateTime)\n measurements = db.relationship('Measurement',\n backref=db.backref('node', lazy='joined'),\n lazy='joined',\n order_by='Measurement.timestamp')\n\n def __init__(self, mac_address, name):\n self.mac_address = mac_address\n self.name = name\n\n @classmethod\n def from_dict(cls, data):\n if 'mac_address' not in data or 'name' not in data:\n raise ValueError('mac_address or name does not exist in data')\n\n return cls(name=data['name'], mac_address=data['mac_address'])\n\n def __repr__(self):\n return (\"name: %s\" % self.name)\n\n @property\n def state(self):\n if len(self.measurements) > 0:\n last = self.measurements[-1]\n return int(last >= self.threshold)\n else:\n return 1\n\n def serialize(self):\n return {\n 'mac_address': self.mac_address,\n 'name': self.name,\n 'map_pos': self.map_pos,\n 'last_update': self.last_update,\n 'threshold': self.threshold,\n 'state': self.state\n }\n\n def update(self, node):\n if hasattr(node, 'mac_address'):\n self.mac_address = node['mac_address']\n\n if hasattr(node, 'name'):\n self.name = node['name']\n\n if hasattr(node, 'threshold'):\n self.threshold = node['threshold']\n\n if hasattr(node, 'map_pos'):\n self.threshold = node['map_pos']\n\n\nclass Measurement(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n value = db.Column(db.Float)\n timestamp = db.Column(db.DateTime)\n fk_node = db.Column(db.String(17), db.ForeignKey('node.mac_address'))\n exceeded = column_property(value >= Node.threshold)\n\n def __init__(self, value, fk):\n self.value = value\n self.fk_node = fk\n self.timestamp = func.now()\n\n def __repr__(self):\n return \"node %s value: %d\" % (self.node.name, self.value)\n\n @classmethod\n def from_dict_with_node(cls, data, mac):\n if 'value' not in data:\n raise ValueError('value does not exist in data')\n\n return cls(value=data['value'], fk=mac)\n\n def serialize(self):\n return {\n 'id': self.id,\n 'value': self.value,\n 'timestamp': self.timestamp,\n 'exceeded': self.exceeded,\n #'node': self.node.serialize()\n }","sub_path":"backend/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"520795890","text":"from tkinter import *\nfrom math import sqrt\nfrom random import shuffle\n\nHEIGHT = 768\nWIDTH = 1366\nwindow = Tk()\ncolors = [\"darkred\", \"green\", \"blue\", \"purple\", \"pink\", \"lime\"]\n\nwindow.title(\"Bubble Game\")\n\nc = Canvas(window, width=WIDTH, height=HEIGHT, bg=\"skyblue\")\nc.pack()\nship_id = c.create_polygon(5, 5, 5, 25, 30, 15, fill=\"green\")\nship_id2 = c.create_oval(0, 0, 30, 30, outline=\"red\")\nSHIP_R = 15\nMID_X = WIDTH / 2\nMID_Y = HEIGHT / 2\nc.move(ship_id, MID_X, MID_Y)\nc.move(ship_id2, MID_X, MID_Y)\nship_spd = 10\nscore = 0\n\ndef move_ship(event):\n if event.keysym == \"Up\":\n c.move(ship_id, 0, -ship_spd)\n c.move(ship_id2, 0, -ship_spd)\n elif event.keysym == \"Down\":\n c.move(ship_id, 0, ship_spd)\n c.move(ship_id2, 0, ship_spd)\n elif event.keysym == \"Left\":\n c.move(ship_id, -ship_spd, 0)\n c.move(ship_id2, -ship_spd, 0)\n elif event.keysym == \"Right\":\n c.move(ship_id, ship_spd, 0)\n c.move(ship_id2, ship_spd, 0)\n elif event.keysym == \"P\":\n score += 10000\n \nc.bind_all('', move_ship)\n\nfrom random import randint\n\nbub_id = []\nbub_r = []\nbub_speed = []\nbub_id_e = []\nbub_r_e = []\nbub_speed_e = []\nmin_bub_r = 10\nmax_bub_r = 30\nmax_bub_spd = 10\ngap = 100\n\ndef create_bubble():\n x = WIDTH + gap\n y = randint(0, HEIGHT)\n r = randint(min_bub_r, max_bub_r)\n id1 = c.create_oval(x - r, y - r, x + r, y + r, outline=\"white\", fill=\"lightblue\")\n bub_id.append(id1)\n bub_r.append(r)\n bub_speed.append(randint(5, max_bub_spd))\n \ndef create_bubble_e():\n x = WIDTH + gap\n y = randint(0, HEIGHT)\n r = randint(min_bub_r, max_bub_r)\n id1 = c.create_oval(x - r, y - r, x + r, y + r, outline=\"black\", fill=\"red\")\n bub_id_e.append(id1)\n bub_r_e.append(r)\n bub_speed_e.append(randint(6, max_bub_spd))\n \ndef create_bubble_r():\n x = WIDTH + gap\n y = randint(0, HEIGHT)\n r = randint(min_bub_r, max_bub_r)\n id1 = c.create_oval(x - r, y - r, x + r, y + r, outline=\"white\", fill=colors[0])\n bub_id.append(id1)\n bub_r.append(r)\n bub_speed.append(randint(6, max_bub_spd))\n \ndef move_bubbles():\n for i in range(len(bub_id)):\n c.move(bub_id[i], -bub_speed[i], 0)\n for i in range(len(bub_id_e)):\n c.move(bub_id_e[i], -bub_speed_e[i], 0)\n \nfrom time import sleep, time\n\nbub_chance = 30\n\ndef get_coords(id_num):\n pos = c.coords(id_num)\n x = (pos[0] + pos[2]) / 2\n y = (pos[1] + pos[3]) / 2\n return x, y\n\ndef del_bubble(i):\n del bub_r[i]\n del bub_speed[i]\n c.delete(bub_id[i])\n del bub_id[i]\n \ndef clean():\n for i in range(len(bub_id) -1, -1, -1):\n x, y = get_coords(bub_id[i])\n if x < -gap:\n del_bubble(i)\n \ndef distance(id1, id2):\n x1, y1 = get_coords(id1)\n x2, y2 = get_coords(id2)\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n\ndef collision():\n points = 0\n for bub in range(len(bub_id) -1, -1, -1):\n if distance(ship_id2, bub_id[bub]) < (SHIP_R + bub_r[bub]):\n points += (bub_r[bub] + bub_speed[bub])\n del_bubble(bub)\n return points\n\ndef cleanAll():\n for i in range(len(bub_id) -1, -1, -1):\n x, y = get_coords(bub_id[i])\n del_bubble(i)\n \ndef collision_e():\n for bub in range(len(bub_id_e) -1, -1, -1):\n if distance(ship_id2, bub_id_e[bub]) < (SHIP_R + bub_r_e[bub]):\n window.destroy()\n print(\"Вы были убиты красным шаром...\")\n print(\"У вас \", score, \" очков!\")\n sleep(100)\n \nc.create_text(50, 30, text=\"SCORE\", fill=\"white\")\nst = c.create_text(50, 50, fill=\"white\")\ntt = c.create_text(100, 50, fill='white')\n\ndef show(score):\n c.itemconfig(st, text=str(score))\nevil_bub = 50\n\n#самое главное\n\nwhile True:\n if randint(1, bub_chance) == 1:\n create_bubble()\n if randint(1, evil_bub) == 1:\n create_bubble_e()\n if randint(1, 100) == 1:\n create_bubble_r()\n \n move_bubbles()\n collision_e()\n clean()\n score += collision()\n \n if score >= 400:\n evil_bub = 40\n bub_chance = 25\n if score >= 1000:\n evil_bub = 30\n bub_chance = 20\n \n show(score)\n window.update()\n shuffle(colors)\n sleep(0.01)\n\n\n#игра закончена \n \n \n \n \n\n \n \n \n \n\n\n\n \n \n \n\n","sub_path":"bubble game.py","file_name":"bubble game.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"267188846","text":"turmas = {}\n\ndef adicionarTurma():\n NomeT = str(input(\"Adicione o nome de sua turma:\"))\n Alunos = {}\n turmas[NomeT] = Alunos\n print(\"Turmas:\",turmas)\n\ndef adicionarAlunoNotas():\n TurmaNome = str(input(\"Digite o nome da turma do aluno:\"))\n if(TurmaNome in turmas):\n print(\"Turma:\",turmas[TurmaNome])\n Alunos = {}\n Matricula = int(input(\"Digite a matricula do aluno:\"))\n Notas = []\n add = \"Sim\"\n while(add == \"Sim\"):\n Nota = float(input(\"Digite a nota do aluno:\"))\n Notas.append(Nota)\n add = str(input(\"Se tiver mais de uma nota, digite: Sim, se não, digite: Não:\"))\n turmas[TurmaNome][Matricula] = Notas\n print(\"Turma:\",turmas[TurmaNome])\n else:\n print(\"Turma não existente!\")\n\ndef calcularMedia(Notas):\n Soma = 0\n for a in Notas:\n Soma = Soma + a\n Media = Soma/len(Notas)\n return(Media)\n\ndef mediaDaTurma():\n Soma = 0\n Contador = 0\n TurmaNome = str(input(\"Digite o nome da turma:\"))\n if(TurmaNome in turmas):\n for i in turmas[TurmaNome]:\n Soma = Soma + calcularMedia(turmas[TurmaNome][i])\n Contador = Contador + 1\n Media = Soma/Contador\n print(\"Média da Turma:\",Media)\n else:\n print(\"Turma não existente!\")\n return\n\ndef main():\n Contador = 1\n while(Contador > 0):\n print(\"Digite 1 para adicionar a turma.\")\n print(\"Digite 2 para adicionar o aluno e as notas.\")\n print(\"Digite 3 para calcular a média de um aluno.\")\n print(\"Digite 4 para calcular a média de uma Turma.\")\n OpcaoMenu = int(input(\"\"))\n if(OpcaoMenu == 1):\n adicionarTurma()\n elif(OpcaoMenu == 2):\n adicionarAlunoNotas()\n elif(OpcaoMenu == 3):\n Turma = str(input(\"Digite a turma:\"))\n if(Turma in turmas):\n Matricula = int(input(\"Digite a matricula do aluno:\"))\n if(Matricula in turmas[Turma]):\n print(\"Média do aluno:\",calcularMedia(turmas[Turma][Matricula]))\n else:\n print(\"Matrícula não existente!\")\n else:\n print(\"Turma não existente!\")\n elif(OpcaoMenu == 4):\n mediaDaTurma()\n else:\n print(\"Opção não válida!\")\nif __name__ == \"__main__\":\n main()\n","sub_path":"Questao4.py","file_name":"Questao4.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"87607228","text":"import os\nimport numpy as np\nfrom PIL import Image\nfrom .seg_dataset import SegDataset\nfrom .voc_seg_dataset import VOCMetaInfo\n\n\nclass ADE20KSegDataset(SegDataset):\n \"\"\"\n ADE20K semantic segmentation dataset.\n\n Parameters:\n ----------\n root : str\n Path to a folder with `ADEChallengeData2016` subfolder.\n mode : str, default 'train'\n 'train', 'val', 'test', or 'demo'.\n transform : callable, optional\n A function that transforms the image.\n \"\"\"\n def __init__(self,\n root,\n mode=\"train\",\n transform=None,\n **kwargs):\n super(ADE20KSegDataset, self).__init__(\n root=root,\n mode=mode,\n transform=transform,\n **kwargs)\n\n base_dir_path = os.path.join(root, \"ADEChallengeData2016\")\n assert os.path.exists(base_dir_path), \"Please prepare dataset\"\n\n image_dir_path = os.path.join(base_dir_path, \"images\")\n mask_dir_path = os.path.join(base_dir_path, \"annotations\")\n\n mode_dir_name = \"training\" if mode == \"train\" else \"validation\"\n image_dir_path = os.path.join(image_dir_path, mode_dir_name)\n mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)\n\n self.images = []\n self.masks = []\n for image_file_name in os.listdir(image_dir_path):\n image_file_stem, _ = os.path.splitext(image_file_name)\n if image_file_name.endswith(\".jpg\"):\n image_file_path = os.path.join(image_dir_path, image_file_name)\n mask_file_name = image_file_stem + \".png\"\n mask_file_path = os.path.join(mask_dir_path, mask_file_name)\n if os.path.isfile(mask_file_path):\n self.images.append(image_file_path)\n self.masks.append(mask_file_path)\n else:\n print(\"Cannot find the mask: {}\".format(mask_file_path))\n\n assert (len(self.images) == len(self.masks))\n if len(self.images) == 0:\n raise RuntimeError(\"Found 0 images in subfolders of: {}\\n\".format(base_dir_path))\n\n def __getitem__(self, index):\n image = Image.open(self.images[index]).convert(\"RGB\")\n if self.mode == \"demo\":\n image = self._img_transform(image)\n if self.transform is not None:\n image = self.transform(image)\n return image, os.path.basename(self.images[index])\n mask = Image.open(self.masks[index])\n\n if self.mode == \"train\":\n image, mask = self._sync_transform(image, mask)\n elif self.mode == \"val\":\n image, mask = self._val_sync_transform(image, mask)\n else:\n assert self.mode == \"test\"\n image, mask = self._img_transform(image), self._mask_transform(mask)\n\n if self.transform is not None:\n image = self.transform(image)\n\n return image, mask\n\n classes = 150\n vague_idx = 150\n use_vague = True\n background_idx = -1\n ignore_bg = False\n\n @staticmethod\n def _mask_transform(mask):\n np_mask = np.array(mask).astype(np.int32)\n np_mask[np_mask == 0] = ADE20KSegDataset.vague_idx + 1\n np_mask -= 1\n return np_mask\n\n def __len__(self):\n return len(self.images)\n\n\nclass ADE20KMetaInfo(VOCMetaInfo):\n def __init__(self):\n super(ADE20KMetaInfo, self).__init__()\n self.label = \"ADE20K\"\n self.short_label = \"voc\"\n self.root_dir_name = \"ade20k\"\n self.dataset_class = ADE20KSegDataset\n self.num_classes = ADE20KSegDataset.classes\n self.test_metric_extra_kwargs = [\n {\"vague_idx\": ADE20KSegDataset.vague_idx,\n \"use_vague\": ADE20KSegDataset.use_vague,\n \"macro_average\": False},\n {\"num_classes\": ADE20KSegDataset.classes,\n \"vague_idx\": ADE20KSegDataset.vague_idx,\n \"use_vague\": ADE20KSegDataset.use_vague,\n \"bg_idx\": ADE20KSegDataset.background_idx,\n \"ignore_bg\": ADE20KSegDataset.ignore_bg,\n \"macro_average\": False}]\n","sub_path":"pytorch/datasets/ade20k_seg_dataset.py","file_name":"ade20k_seg_dataset.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"578606292","text":"\n#finds total # of days in given year\ndef countingDays(year):\n total = 30*4 + 31*7\n if year%100 != 0 and year%4 ==0:\n total += 29\n elif year%100 ==0 and year%400 ==0:\n total +=29\n else:\n total +=28\n return total\n \n\n #finds all sundays between 1901 and 2000\ndayNum = 2\nsundays = []\nfor year in range(1901,2001):\n for day in range(0,countingDays(year)+1 ) :\n if dayNum%7 == 0:\n sundays.append(dayNum-1)\n dayNum+=1\n#print( sundays[0])\n\n#defined number to add to move to next month\nfirstdayinMonth = [0,31,28,31,30,31,30,31,31,30,31,30]\nfirstdayinMonthleap = [0,31,29,31,30,31,30,31,31,30,31,30]\n\n#find number of sundays on the 1st of each month \ntotalDays = 1\nsundayfirst=0\nfor year in range(1901,2001):\n dayinyear = countingDays(year)\n for sunday in sundays:\n #check if leap year\n if (year%100 != 0 and year%4 ==0) or (year%100 ==0 and year%400 ==0):\n #move to next month\n for item in firstdayinMonthleap:\n if sunday in range(dayinyear+totalDays+item, dayinyear+totalDays+item+ 1):\n sundayfirst +=1\n else:\n for item in firstdayinMonth:\n if sunday in range(dayinyear+totalDays+item, dayinyear+totalDays+item+ 1):\n sundayfirst +=1\n\n #move day number to next year\n totalDays += dayinyear \n\nprint(sundayfirst)","sub_path":"counting_sundays.py","file_name":"counting_sundays.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"527308120","text":"# -*- coding: utf-8 -*-\nimport boto3\nimport time\nfrom botocore.config import Config\nimport random\nimport pandas as pd\n\nFILE_TO_READ =\"./Data/query-scan.csv\" # Replace with your data file\n\nRESULT_FILE =\"./Data/result-query-scan.csv\" #Replace where the result needs to be saved\n\nMAX_RANGE_VALUE = 10 #Max iteration value\n\n#Set region config. It will overwrite region setting done as part of aws access key setup.\nREGION_CONFIG = Config(\n region_name = 'ap-southeast-2',\n signature_version = 'v4',\n retries = {\n 'max_attempts': 3\n }\n)\n\ndynamodb_client = boto3.client('dynamodb', config=REGION_CONFIG)\n\ndf1 = pd.DataFrame(columns=['Query'])\ndf2 = pd.DataFrame(columns=['Scan'])\n\n'''\nPerform query calls. Pass in random data from the file as a parameter.\nExecute query calls equal to MAX_RANGE_VALUE.\nThe query call will continue to loop while response has 'LastEvaluatedKey'.\n'''\nfor i in range(0, MAX_RANGE_VALUE):\n\n random_lines = random.choice(open(FILE_TO_READ).readlines())\n \n '''\n Pass parameter values based on your dynamodb table information. Also update the parameter values in while loop too.\n Using ExpressionAttrbiuteName because uuid is a reserved word in DynamoDB. And partition key of the demo table used for testing is uuid.\n '''\n operation_parameters = {\n 'TableName': 'workload',\n 'ExpressionAttributeNames':{ \"#dyno_uuid\": \"uuid\" }, \n 'KeyConditionExpression': '#dyno_uuid = :uuid_2',\n 'ExpressionAttributeValues': {\n ':uuid_2': {'S': random_lines.strip()}\n }\n }\n \n start_timer = time.perf_counter()\n response = dynamodb_client.query(**operation_parameters)\n \n while ('LastEvaluatedKey' in response):\n response = dynamodb_client.query(\n TableName= 'workload',\n Select= 'ALL_ATTRIBUTES',\n ExpressionAttributeNames={ \"#dyno_uuid\": \"uuid\" },\n KeyConditionExpression= '#dyno_uuid = :uuid_2',\n ExpressionAttributeValues= {\n ':uuid_2': {'S': random_lines.strip()}\n },\n ExclusiveStartKey=response['LastEvaluatedKey']\n ) \n end_timer = time.perf_counter()\n #print(\"%s-%s-%s\" %(response['Count'],response['ResponseMetadata']['HTTPHeaders']['content-length'],response['Items'][0]['uuid'])) \n df1 = df1.append({'Query': end_timer-start_timer}, ignore_index=True)\n\n\n'''\nPerform scan calls. Pass in random data from the file as a parameter.\nExecute scan calls equal to MAX_RANGE_VALUE.\nThe scan call will continue to scan data until it finds it. \nIt uses 'LastEvaluatedKey' & empty response to check the condition to continue scanning the table. \n'''\nfor i in range(0, MAX_RANGE_VALUE):\n\n random_lines = random.choice(open(FILE_TO_READ).readlines())\n \n '''\n Pass parameter values based on your dynamodb table information. Also update the parameter values in while loop too.\n Using ExpressionAttrbiuteName because uuid is a reserved word in DynamoDB. And partition key of the demo table used for testing is uuid.\n '''\n operation_parameters = {\n 'TableName': 'workload',\n 'Select': 'ALL_ATTRIBUTES',\n 'ExpressionAttributeNames':{ \"#dyno_uuid\": \"uuid\" },\n 'FilterExpression': '#dyno_uuid = :uuid_2',\n 'ExpressionAttributeValues': {\n ':uuid_2': {'S': random_lines.strip()}\n }\n }\n\n start_timer = time.perf_counter()\n response = dynamodb_client.scan(**operation_parameters)\n while ('LastEvaluatedKey' in response and response['Items']==[]):\n response = dynamodb_client.scan(\n TableName= 'workload',\n Select= 'ALL_ATTRIBUTES',\n ExpressionAttributeNames={ \"#dyno_uuid\": \"uuid\" },\n FilterExpression= '#dyno_uuid = :uuid_2',\n ExpressionAttributeValues= {\n ':uuid_2': {'S': random_lines.strip()}\n },\n ExclusiveStartKey=response['LastEvaluatedKey']\n )\n end_timer = time.perf_counter()\n #print(\"%s-%s-%s\" %(response['Count'],response['ResponseMetadata']['HTTPHeaders']['content-length'],response['Items'][0]['uuid'])) \n df2 = df2.append({'Scan': end_timer-start_timer}, ignore_index=True)\n\ndf_col_merged = pd.concat([df1, df2], axis=1)\n\nprint(df_col_merged.describe(percentiles=[0.25,0.5,0.75,0.90,0.95],include='all'))\n\ndf_col_merged.to_csv(RESULT_FILE,index=False)\n","sub_path":"dynamodb_query_scan_comparison.py","file_name":"dynamodb_query_scan_comparison.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"607071907","text":"from threading import Timer\nfrom time import sleep\nimport pifacecad\n\n\nLCD_ROW_LEN = 16\n\n\nclass PIFacePrinter:\n # refresh_speed_sec -- time to clean the screen\n def __init__(self, refresh_speed_sec, cad):\n self._cad = cad\n self._cad.lcd.backlight_on()\n self._timer = None\n self._refresh_time = refresh_speed_sec\n self._permanent_text = \"\"\n self._temporary_text = \"\"\n self._last_button_one = None\n self._ticks_left = 0\n\n def _shift_right(self):\n if self._ticks_left == 0:\n self._write_permanent_text()\n return\n self._cad.lcd.move_left()\n self._ticks_left = self._ticks_left - 1\n self._timer = Timer(self._refresh_time, self._shift_right)\n self._timer.start()\n\n def write_text(self, text):\n self._clean_timer()\n self._cad.lcd.clear()\n self._cad.lcd.write(text)\n\n exceeding_text_size = len(text) - LCD_ROW_LEN\n if exceeding_text_size > 0:\n self._ticks_left = exceeding_text_size\n self._shift_right()\n else:\n sleep(self._refresh_time * 4)\n self._write_permanent_text()\n\n def write_permanent(self, text):\n self._permanent_text = text\n self._write_permanent_text()\n\n def _write_permanent_text(self):\n self._cad.lcd.clear()\n self._cad.lcd.write(self._permanent_text)\n self._cad.lcd.left_justify()\n\n def _clean_timer(self):\n if self._timer is not None:\n self._timer.cancel()\n self._timer = None\n self._write_permanent_text()\n\n def _press_one(self, event):\n self._last_button_one = True\n print(\"one pressed\")\n\n def _press_two(self, event):\n self._last_button_one = False\n print(\"two pressed\")\n\n def wait_button_press(self):\n self._last_button_one = None\n listener = pifacecad.SwitchEventListener(chip=self._cad)\n listener.register(0, pifacecad.IODIR_FALLING_EDGE, self._press_one)\n listener.register(1, pifacecad.IODIR_FALLING_EDGE, self._press_two)\n listener.activate()\n while self._last_button_one is None:\n sleep(0.5)\n listener.deregister(0, pifacecad.IODIR_FALLING_EDGE)\n listener.deregister(1, pifacecad.IODIR_FALLING_EDGE)\n listener.deactivate()\n return self._last_button_one\n\n def free(self):\n self._cad.lcd.backlight_off()\n self._clean_timer()\n\n def __del__(self):\n self.free()\n","sub_path":"Python_software_engineering/Raspberry Pi/raspberry_pi/raspberry_display/piface_printer.py","file_name":"piface_printer.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"521503084","text":"# 2. Отсортируйте по возрастанию методом слияния одномерный вещественный массив,\n# заданный случайными числами на\n# промежутке [0; 50). Выведите на экран исходный и отсортированный массивы.\nimport random\nfrom collections import deque\n\n\nARRAY_LIMIT = 50\n\n\ndef merge_sort(array):\n\n def merge(first_array, second_array):\n merge_result = []\n\n while len(first_array) and len(second_array) > 0:\n if first_array[0] <= second_array[0]:\n merge_result.append(first_array.popleft())\n else:\n merge_result.append(second_array.popleft())\n\n while len(first_array) > 0:\n merge_result.append(first_array.popleft())\n\n while len(second_array) > 0:\n merge_result.append(second_array.popleft())\n\n return merge_result\n\n if len(array) == 1:\n return array\n else:\n m = len(array) // 2\n left = deque(merge_sort(array[:m]))\n right = deque(merge_sort(array[m:]))\n result = merge(left, right)\n return result\n\n\nmy_array = [random.randint(0, ARRAY_LIMIT) for _ in range(10)]\nprint(f'Исходный массив:\\t\\t {my_array}')\nprint(f'Отсортированный массив:\\t {merge_sort(my_array)}')\n\n","sub_path":"lesson07/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"147515514","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/12/27 20:43\n# @Author : lemon_huahua\n# @Email : 204893985@qq.com\n# @File : math_sutie.py\n\nimport unittest\nimport HTMLTestRunnerNew\nfrom week_6.class_0105.do_excel import DoExcel#导入doexcel模块 获取测试数据\nfrom week_6.class_0105.test_math_method_new import TestAdd #模块的方式加载用例\n#suite:集合套件 TestSuite测试套件 存储加载用例\n\nsuite=unittest.TestSuite()#对象\n\n#loader加载方式\nloader=unittest.TestLoader()\nsuite.addTest(loader.loadTestsFromTestCase(TestAdd))\n\nwith open('py13.html','wb') as file:\n runner=HTMLTestRunnerNew.HTMLTestRunner(stream=file,\n verbosity=2,\n title='python13的第一份报告',\n description='测试我们的数学类里的加法和减法',\n tester='华华')#测试者的名字\n runner.run(suite)#执行测试集里面的用例","sub_path":"Week_8/class_0105/math_suite_new.py","file_name":"math_suite_new.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416026423","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom main.forms import *\nfrom main.models import *\n\n# Create your views here.\n\ndef user_login(request):\n\tform=Loginform()\n\t#users=Login.objects.values()\n\treturn render(request,\"login.html\",{'form':form})\n\ndef home(request):\n\tbrands=Brand.objects.filter()\n\t\n\ttry:\n\t\tid=request.COOKIES.get('login')\n\t\tname=Customer.objects.get(loginid=id)\n\t\tdata={'brand':brands,'name':name}\n\t\treturn render(request,\"main/home.html\",data)\n\texcept:\n\t\tname='no name'\n\t\tdata={'brand':brands,'name':name}\n\t\treturn render(request,\"main/home.html\",data)\n\n\n\n","sub_path":"shopping/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364189667","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/ramusus/workspace/manufacture/env/src/django-m2m-history/m2m_history/migrations/0001_initial.py\n# Compiled at: 2016-02-26 14:17:16\nfrom __future__ import unicode_literals\nfrom django.db import models, migrations\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('contenttypes', '0001_initial')]\n operations = [\n migrations.CreateModel(name=b'ManyToManyHistoryVersion', fields=[\n (\n b'id', models.AutoField(verbose_name=b'ID', serialize=False, auto_created=True, primary_key=True)),\n (\n b'object_id', models.BigIntegerField(db_index=True)),\n (\n b'field_name', models.CharField(max_length=50, db_index=True)),\n (\n b'time', models.DateTimeField(db_index=True)),\n (\n b'count', models.PositiveIntegerField(default=0)),\n (\n b'added_count', models.PositiveIntegerField(default=0)),\n (\n b'removed_count', models.PositiveIntegerField(default=0)),\n (\n b'content_type', models.ForeignKey(related_name=b'm2m_history_versions', to=b'contenttypes.ContentType'))], options={}, bases=(\n models.Model,)),\n migrations.AlterUniqueTogether(name=b'manytomanyhistoryversion', unique_together=set([('content_type', 'object_id', 'field_name', 'time')]))]","sub_path":"pycfiles/django-m2m-history-0.3.6.tar/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"70217582","text":"# -*- coding: utf-8 -*\n\nfrom __future__ import print_function\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport lasagne\nimport codecs\nimport editdistance\nimport argparse\nimport utils\n\ndef gen_validation_data(p, data, seq_len, transliteration, trans_vocab_size, trans_to_index):\n \n x = np.zeros((1,int(seq_len),trans_vocab_size))\n turned = False\n new_p = min(p+seq_len,len(data))\n raw_translit = data[p:new_p]\n \n if new_p != len(data):\n if max([raw_translit.rfind(u' '),raw_translit.rfind(u'\\t'),raw_translit.rfind(u'\\n')]) > 0:\n new_p = max([raw_translit.rfind(u' '),raw_translit.rfind(u'\\t'),raw_translit.rfind(u'\\n')])\n raw_translit = raw_translit[:new_p]\n p += new_p\n else:\n p = new_p\n else:\n p = 0\n turned = True\n (translit,non_valids) = utils.valid(raw_translit, transliteration)\n for ind in range(len(translit)):\n x[0,ind,trans_to_index[translit[ind]]] = 1\n for ind in range(len(translit),int(seq_len)):\n x[0,ind,trans_to_index[u'\\u2001']] = 1\n \n return (x,non_valids,p,turned)\n \n\ndef get_residual_weight_matrix(network,csv_name, index_to_char, index_to_trans):\n W = network.get_params()[0].get_value()[-len(index_to_char):,:]\n fr = ['\" \"'] + ['\"' + index_to_char[i] + '\"' for i in range(len(index_to_char))]\n rows = [[index_to_trans[i]] + [x for x in W[i] ] for i in range(len(index_to_trans))]\n print(rows)\n codecs.open(csv_name,'w',encoding='utf-8').write(','.join(fr) + '\\n' + '\\n'.join(['\"' + row[0] + '\",' + ','.join([ \"%.3f\" %(r) for r in row[1:] ]) for row in rows]))\n\ndef translate_romanized(predict, data, seq_len, transliteration, trans_vocab_size, trans_to_index, index_to_char, long_letter_reverse_mapping):\n p = 0\n turned = False\n sentence_out = \"\\n\"\n while not turned:\n x, non_valids, p, turned = gen_validation_data(p, data, seq_len, transliteration, trans_vocab_size, trans_to_index)\n guess = utils.one_hot_matrix_to_sentence(predict(x),index_to_char).replace(u'\\u2001','').replace(u'\\u2000','')\n for letter in long_letter_reverse_mapping:\n guess = guess.replace(letter,long_letter_reverse_mapping[letter])\n\n final_guess = \"\"\n ind = 0\n for c in guess:\n if c == '#' and ind < len(non_valids):\n final_guess += non_valids[ind]\n ind += 1\n else:\n final_guess += c\n sentence_out += final_guess\n print(str(100.0*p/len(data)) + \"% done \", end='\\r')\n print(sentence_out)\n\ndef test(predict, data, language, model_name, seq_len, long_letter_reverse_mapping, transliteration, trans_to_index, char_to_index, index_to_trans, index_to_char):\n \n sentence_in = \"\"\n sentence_real = \"\"\n sentence_out = \"\"\n p = 0\n turned = False\n while not turned: \n x, y, non_valids, p, turned = utils.gen_data(p, seq_len, 1, data, transliteration, trans_to_index, char_to_index, is_train = False)\n sentence_in += utils.one_hot_matrix_to_sentence(x,index_to_trans).replace(u'\\u2001','').replace(u'\\u2000','')\n real_without_signs = utils.one_hot_matrix_to_sentence(y,index_to_char).replace(u'\\u2001','').replace(u'\\u2000','')\n ind = 0\n real = \"\"\n for c in real_without_signs:\n if c == '#' and ind < len(non_valids):\n real += non_valids[ind]\n ind += 1\n else:\n real += c\n sentence_real += real\n guess = utils.one_hot_matrix_to_sentence(predict(x),index_to_char).replace(u'\\u2001','').replace(u'\\u2000','')\n ind = 0\n final_guess = \"\"\n for c in guess:\n if c == '#' and ind < len(non_valids):\n final_guess += non_valids[ind]\n ind += 1\n else:\n final_guess += c\n sentence_out += final_guess\n print(str(100.0*p/len(data)) + \"% done \", end='\\r')\n for letter in long_letter_reverse_mapping:\n sentence_real = sentence_real.replace(letter,long_letter_reverse_mapping[letter])\n sentence_out = sentence_out.replace(letter,long_letter_reverse_mapping[letter])\n print(\"Computing editdistance and writing to -> \" + 'languages/' + language + '/results.' + model_name.split('/')[-1])\n \n fl = codecs.open('languages/' + language + '/results.' + model_name.split('/')[-1],'w',encoding='utf-8')\n fl.write(sentence_in + '\\n' + sentence_real + '\\n' + sentence_out + '\\n')\n fl.write(str(editdistance.eval(sentence_real,sentence_out)) + ' / ' + str(len(sentence_real)))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--hdim', default=512, type=int)\n parser.add_argument('--seq_len', default=40, type=int)\n parser.add_argument('--model', default=None)\n parser.add_argument('--depth', default=1, type=int)\n parser.add_argument('--translit_path', default=None)\n parser.add_argument('--language', default=None)\n \n args = parser.parse_args()\n\n print(\"Loading Files\")\n (char_to_index, index_to_char, vocab_size, trans_to_index, index_to_trans, trans_vocab_size) = utils.load_vocabulary(language = args.language)\n (test_text, trans, long_letter_reverse_mapping) = utils.load_language_data(language = args.language, is_train = False)\n print(\"Building network ...\")\n (output_layer, predict) = utils.define_model(args.hdim, args.depth, trans_vocab_size = trans_vocab_size, vocab_size = vocab_size, is_train = False)\n \n if args.model:\n f = np.load(args.model)\n param_values = [np.float32(f[i]) for i in range(len(f))]\n lasagne.layers.set_all_param_values(output_layer, param_values)\n print(\"Testing ...\")\n \n if args.translit_path:\n data = codecs.open(args.translit_path, 'r', encoding='utf-8').read()\n translate_romanized(predict, data, args.seq_len, trans, trans_vocab_size, trans_to_index, index_to_char, long_letter_reverse_mapping)\n\n else:\n test(predict, test_text, args.language, args.model, args.seq_len, long_letter_reverse_mapping, trans, trans_to_index, char_to_index, index_to_trans, index_to_char)\n \nif __name__ == '__main__':\n main()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"123944469","text":"#!/usr/bin/python\n\"\"\"Unit tests for poco.py.\n\nSTATE: testing xml\n\"\"\"\n\n__author__ = ['Ryan Barrett ']\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nimport mox\nfrom webob import exc\n\nimport poco\nimport source\nimport source_test\nimport testutil\n\n\nclass HandlersTest(testutil.HandlerTest):\n\n CONTACTS = [\n {'id': 2, 'displayName': 'me'},\n {'id': 4},\n {'id': 2, 'displayName': 'Ryan'}]\n SELF_CONTACTS = [\n {'id': 2, 'displayName': 'me'},\n {'id': 2, 'displayName': 'Ryan'}]\n\n def setUp(self):\n super(HandlersTest, self).setUp(application=poco.application)\n poco.SOURCE = source_test.FakeSource\n poco.SOURCE.contacts = self.CONTACTS\n poco.SOURCE.user_id = 2\n\n def assert_response(self, url, expected_contacts):\n resp = self.application.get_response(url)\n self.assertEquals(200, resp.status_int)\n self.assert_equals({\n 'startIndex': 0,\n 'itemsPerPage': 3,\n 'totalResults': len(expected_contacts),\n 'entry': expected_contacts,\n 'filtered': False,\n 'sorted': False,\n 'updatedSince': False,\n },\n json.loads(resp.body))\n\n def test_all_no_contacts(self):\n for url in '/poco', '/poco/', '/poco/@me/@all', '/poco/@me/@all/':\n self.setUp()\n poco.SOURCE.contacts = []\n self.assert_response(url, [])\n\n def test_all_get_some_contacts(self):\n self.assert_response('/poco/@me/@all/', self.CONTACTS)\n\n def test_self(self):\n self.assert_response('/poco/@me/@self/', self.SELF_CONTACTS)\n\n def test_user_id(self):\n self.assert_response('/poco/@me/@all/2/', self.SELF_CONTACTS)\n\n def test_json_format(self):\n self.assert_response('/poco/@me/@all/?format=json', self.CONTACTS)\n\n def test_xml_format(self):\n resp = self.application.get_response('/poco/@me/@all/?format=xml')\n self.assertEquals(200, resp.status_int)\n self.assertEqual(\"\"\"\\\n\n\n\nme\n2\n\n\n4\n\n\nRyan\n2\n\n3\nFalse\n0\nFalse\nFalse\n3\n\n\"\"\", resp.body)\n\n def test_pass_through_start_index_and_count(self):\n self.mox.StubOutWithMock(poco.SOURCE, 'get_contacts')\n poco.SOURCE.get_contacts(None, startIndex=2, count=4).AndReturn([])\n self.mox.ReplayAll()\n self.application.get_response('/poco/@me/@all/?startIndex=2&count=4')\n\n def test_bad_start_index(self):\n resp = self.application.get_response('/poco/@me/@all/?startIndex=foo')\n self.assertEquals(400, resp.status_int)\n\n def test_bad_count(self):\n resp = self.application.get_response('/poco/@me/@all/?count=-1')\n self.assertEquals(400, resp.status_int)\n\n def test_unknown_format(self):\n resp = self.application.get_response('/poco/@me/@all/?format=bad')\n self.assertEquals(400, resp.status_int)\n","sub_path":"poco_test.py","file_name":"poco_test.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"34366267","text":"from collections import OrderedDict, defaultdict\nimport numpy as np\nfrom StructureLearning.utils import get_angle_of_vector, get_positive_decimal, get_kabsch, \\\n get_permutation_matrix_hungarian\nfrom copy import deepcopy\nfrom itertools import chain\nimport datetime\n\n\nclass Structure:\n def __init__(self, unit_cell, atom_dict, cart_positions, direct_positions=None,\n name='Structure', scale=1.0, sdy=None):\n self.unit_cell = unit_cell\n self.atom_dict = atom_dict\n self.name = name\n self.scale = scale\n self.atom_list = list(atom_dict.keys())\n self.cart_positions = OrderedDict()\n self.direct_positions = OrderedDict()\n self.set_total_positions(cart_positions, direct_positions)\n self.__initialize()\n self.fit_positions_to_range()\n self.list_for_searcher = list()\n self.unique_id = str(id(self)) + str(hash(datetime.datetime.now()))\n self.sdy = OrderedDict()\n if sdy is not None:\n first_idx = 0\n for atom, second_idx in zip(self.atom_list, np.cumsum(np.array(list(self.atom_dict.values())))):\n self.sdy[atom] = sdy[first_idx: second_idx]\n first_idx += second_idx\n else:\n for atom, idx in self.atom_dict.items():\n self.sdy[atom] = [np.array([]) for _ in range(idx)]\n\n def __initialize(self):\n self.a, self.b, self.c = tuple([self.unit_cell[:, i] for i in range(3)])\n self.alpha = get_angle_of_vector(self.b, self.c)\n self.beta = get_angle_of_vector(self.a, self.c)\n self.gamma = get_angle_of_vector(self.a, self.b)\n\n def get_every_atom_to_list(self):\n l = []\n for key, value in self.atom_dict.items():\n for i in range(value):\n l.append(key)\n return l\n\n def get_total_number_of_atom(self):\n return sum(self.atom_dict.values())\n\n def get_total_positions(self, data_type='Cartesian'):\n pos = self.cart_positions if data_type == 'Cartesian' else self.direct_positions\n return np.vstack(tuple(pos.values()))\n\n def set_total_positions(self, positions, positions2=None, first_data_type='Cartesian'):\n if positions2 is None:\n matrix = np.linalg.inv(self.unit_cell) if first_data_type == 'Cartesian' else self.unit_cell\n positions2 = np.array(list(map(lambda x: np.dot(x, matrix), positions)))\n cart, direct = (positions, positions2) if first_data_type == 'Cartesian' else (positions2, positions)\n first_idx = 0\n for atom, second_idx in zip(self.atom_list, np.cumsum(np.array(list(self.atom_dict.values())))):\n self.cart_positions[atom] = cart[first_idx:second_idx]\n self.direct_positions[atom] = direct[first_idx:second_idx]\n first_idx += second_idx\n\n def get_duplicate(self):\n return deepcopy(self)\n\n def fit_positions_to_range(self, synchronized=True):\n if not synchronized:\n self.set_total_positions(self.get_total_positions())\n for atom in self.direct_positions.keys():\n for idx in range(len(self.direct_positions[atom])):\n self.direct_positions[atom][idx] = np.array(list(map(get_positive_decimal,\n self.direct_positions[atom][idx])))\n self.set_total_positions(self.get_total_positions('Direct'), first_data_type='Direct')\n\n def get_total_sdy(self):\n return np.vstack(tuple(self.sdy.values()))\n\n def insert_sdy(self, sdy_type):\n if sdy_type is None:\n insert = np.array([])\n elif sdy_type == 'T':\n insert = np.array(['T', 'T', 'T'])\n else:\n insert = np.array(['F', 'F', 'F'])\n for atom, idx in self.atom_dict.items():\n for i in range(idx):\n self.sdy[atom][i] = insert\n\n def update_atom_list(self):\n self.atom_list = list(self.atom_dict.keys())\n\n # Functions below are only used for calculation\n def get_sorted_index_of_ranged_atoms(self, range_of_atom):\n self.list_for_searcher = list()\n for atom in range_of_atom.keys():\n if range_of_atom[atom] is None:\n for idx in range(len(self.cart_positions[atom])):\n self.list_for_searcher.append([self.cart_positions[atom][idx], idx, atom])\n else:\n for idx in range_of_atom[atom]:\n self.list_for_searcher.append([self.cart_positions[atom][idx], idx, atom])\n self.list_for_searcher.sort(key=lambda x: sum(x[0]**2))\n\n # Function for calculating rmsd between two structure\n def get_position_of_selected_range_of_atoms(self, range_of_atom):\n l_matrix = list()\n for atom in range_of_atom.keys():\n if range_of_atom[atom] is None:\n for idx in range(len(self.cart_positions[atom])):\n l_matrix.append(self.cart_positions[atom][idx])\n else:\n for idx in range_of_atom[atom]:\n l_matrix.append(self.cart_positions[atom][idx])\n return np.array(l_matrix)\n\n\ndef get_rmsd_between_two_structure(structure1: Structure, structure2: Structure, range_atom: dict)-> float:\n matrix_p = structure1.get_position_of_selected_range_of_atoms(range_atom)\n matrix_q = structure2.get_position_of_selected_range_of_atoms(range_atom)\n atom_num = len(matrix_p)\n sorted_matrix_p = np.array(sorted(matrix_p, key=lambda x: np.linalg.norm(x)))\n sorted_matrix_q = np.array(sorted(matrix_q, key=lambda x: np.linalg.norm(x)))\n rmsd1 = np.linalg.norm(np.dot(sorted_matrix_p, get_kabsch(sorted_matrix_p, sorted_matrix_q)) - sorted_matrix_q)\n kabsch_1 = np.dot(matrix_q, get_kabsch(matrix_q, matrix_p))\n rmsd2 = np.linalg.norm(matrix_p - np.dot(get_permutation_matrix_hungarian(matrix_p, kabsch_1), kabsch_1))\n hungarian_1 = np.dot(get_permutation_matrix_hungarian(matrix_p, matrix_q), matrix_q)\n rmsd3 = np.linalg.norm(matrix_p - np.dot(hungarian_1, get_kabsch(hungarian_1, matrix_p)))\n return min(rmsd1, rmsd2, rmsd3) / (atom_num ** 0.5)\n\n\n\n","sub_path":"StructureLearning/Structure.py","file_name":"Structure.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"312260467","text":"# -----------------------------------------------------------------------------\n#\n# P A G E B O T E X A M P L E S\n#\n# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens\n# www.pagebot.io\n# Licensed under MIT conditions\n#\n# Supporting DrawBot, www.drawbot.com\n# Supporting Flat, xxyxyz.org/flat\n# -----------------------------------------------------------------------------\n#\n# E05_ImageClipping.py\n#\n# Draw images with clipping paths and rotation.\n#\nfrom random import random\n#from pagebot.contexts.flat.flatcontext import FlatContext\nfrom pagebot import getContext\n\nfrom pagebot.filepaths import getResourcesPath\nfrom pagebot.document import Document\nfrom pagebot.toolbox.color import color, noColor\nfrom pagebot.toolbox.units import em, p, pt, inch, degrees\nfrom pagebot.conditions import * # Import all conditions for convenience.\nfrom pagebot.constants import *\nfrom pagebot.elements import *\n\n#context = FlatContext()\ncontext = getContext('DrawBot')\n\n# Example image that has nice areas to put text as example.\nimagePath = getResourcesPath() + '/images/peppertom_lowres_398x530.png'\nEXPORT_PATH = '_export/05_ImageClipping.pdf'\n\nW = pt(400) # Document size\nH = pt(400)\nPADDING = pt(24) # Page padding on all sides\nBLEED = pt(6)\n\n# Create a new document with 1 page. Set overall size and padding.\ndoc = Document(w=W, h=H, title=EXPORT_PATH, padding=PADDING, context=context)\n# Get the default page view of the document and set viewing parameters\nview = doc.view\nview.padding = pt(30)\nview.showFrame = True\nview.showPadding = True\nview.showColorBars = False\nview.showCropMarks = True\nview.showRegistrationMarks = True\nview.showNameInfo = True # Showing page info and title on top of the page.\n\n# Get the page\npage = doc[1]\n# Make image box as child element of the page and set its layout conditions.\n# The image is portrait, so fitting vertical makes the image fit in the\n# padding box of the page.\nconditions = [Fit2Height(), Center2Center()]\nim = newImage(imagePath, parent=page, conditions=conditions, \n\tshowOrigin=True)\n\npage = page.next\n# Fitting the image by width, it does not fit vertically anymore.\nconditions = [Fit2Width(), Middle2Middle()]\nim = newImage(imagePath, parent=page, conditions=conditions, \n\tshowOrigin=True)\n\npage = page.next\n# Fitting the image by width, it does not fit vertically anymore.\n# Adding a mask as sibling, we can clip the image on the page padding.\nconditions = [Fit2Width(), Middle2Middle()]\nim = newImage(imagePath, parent=page, conditions=conditions, \n\tshowOrigin=True)\nconditions =[Fit()]\nmask = newMask(parent=page, conditions=conditions, # Fit page padding\n\tshowOrigin=True)\n\npage = page.next\n# Fitting the image by width, it does not fit vertically anymore.\n# The Mask can be any size and position on the page.\nconditions = [Fit2Width(), Middle2Middle()]\nim = newImage(imagePath, parent=page, conditions=conditions, \n\tshowOrigin=True)\nconditions = [Right2Right(), Top2Top()]\nmask = newMask(parent=page, conditions=conditions,\n\tw=page.pw/2, h=page.ph/2, showOrigin=True)\n\npage = page.next\npage.bleed = BLEED # Set all bleed sides to the same value\n# Fitting the image by width, it does not fit vertically anymore.\n# Making the image bleed on page width.\nconditions = [Left2BleedLeft(), Fit2BleedRight(), Middle2Middle()]\nim = newImage(imagePath, parent=page, conditions=conditions, \n\tshowOrigin=True)\nconditions = [Right2Right(), Top2Top()]\n\npage = page.next\npage.bleed = BLEED # Set all bleed sides to the same value\n# Fitting the image by width, it does not fit vertically anymore.\n# Making the image bleed on page width.\n# Now the mask needs to follow the bleed fit too.\nconditions = [Left2BleedLeft(), Fit2BleedRight(), Middle2Middle()]\nim = newImage(imagePath, parent=page, conditions=conditions, \n\tshowOrigin=True)\n# Fit the mask on top half of the page, including bleed\nconditions = [Left2BleedLeft(), Fit2BleedRight(), Top2BleedTop()]\nmask = newMask(parent=page, conditions=conditions,\n\tw=page.w/2+2*BLEED, h=page.h/2+BLEED, showOrigin=True)\n\ndoc.solve()\n\n# Export the document to this PDF file.\ndoc.export(EXPORT_PATH)\n\n","sub_path":"Basics/E05_Images/E05_ImageClipping.py","file_name":"E05_ImageClipping.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"267450983","text":"import socket\r\n\r\nclass Socket:\r\n\r\n def __init__(self):\r\n self.HOST=\"localhost\"\r\n self.PORT=8000\r\n self.s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n self.s.connect((self.HOST,self.PORT))\r\n\r\n\r\n def toServer(self,select):\r\n if select==1:\r\n self.s.sendall(self.Dangnhap().encode())\r\n if select==2:\r\n self.s.sendall(self.Dangky().encode())\r\n if select==3:\r\n self.s.sendall(self.Dangxuat().encode())\r\n if select==4:\r\n self.s.sendall(self.Ketban().encode())\r\n if select==5:\r\n self.s.sendall(self.Block().encode())\r\n if select==6:\r\n self.s.sendall(self.Guitinnhan().encode())\r\n if select==7:\r\n self.s.sendall(self.Xemtinnhan().encode())\r\n if select==8:\r\n self.s.sendall(self.Danhsachbanbe().encode())\r\n def fromServer(self):\r\n data=self.s.recv(1024)\r\n return data.decode()\r\n\r\n\r\n\r\n def Dangnhap(self):\r\n print(\"Tai khoan:\")\r\n taikhoan=input()\r\n print(\"Mat khau:\")\r\n matkhau=input()\r\n data=\"\"\r\n data=data+\"dangnhap\"+\",\"+taikhoan+\",\"+matkhau\r\n return data\r\n\r\n\r\n\r\n def Dangky(self):\r\n print(\"Tai khoan:\")\r\n taikhoan=input()\r\n print(\"Mat khau:\")\r\n matkhau=input()\r\n print(\"Ho va ten:\")\r\n hoten=input()\r\n print(\"Thanh pho:\")\r\n thanhpho=input()\r\n data=\"\"\r\n data=data+\"dangky\"+\",\"+taikhoan+\",\"+matkhau+\",\"+hoten+\",\"+thanhpho\r\n return data\r\n\r\n\r\n\r\n def Dangxuat(self):\r\n data=\"dangxuat\"\r\n return data\r\n\r\n\r\n def Ketban(self):\r\n print(\"Muon ket ban voi tai khoan:\")\r\n taikhoan=input()\r\n data=\"\"\r\n data=data+\"ketban\"+\",\"+taikhoan\r\n return data\r\n\r\n\r\n\r\n def Block(self):\r\n print(\"Muon block tai khoan:\")\r\n taikhoan=input()\r\n data=\"\"\r\n data=data+\"block\"+\",\"+taikhoan\r\n return data\r\n\r\n\r\n\r\n def Guitinnhan(self):\r\n print(\"Gui tin nhan den tai khoan:\")\r\n taikhoan=input()\r\n print(\"Noi dung tin nhan\")\r\n noidung=input()\r\n data=\"\"\r\n data=data+\"guitinnhan\"+\",\"+taikhoan+\",\"+noidung\r\n return data\r\n\r\n\r\n\r\n def Xemtinnhan(self):\r\n result=\"xemtinnhan\"\r\n return result\r\n\r\n\r\n def Danhsachbanbe(self):\r\n result=\"danhsachbanbe\"\r\n return result","sub_path":"Client/SocketClient.py","file_name":"SocketClient.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"157186637","text":"#! -*- coding:utf-8 -*-\n\nimport sys\nimport os\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom flask.ext.script import Manager, Server\nfrom luffy import app\ntry:\n from luffy.config.online_config import *\n app.debug = False\nexcept:\n from luffy.config.local_config import *\n app.debug = True\n\n\nmanager = Manager(app)\n\nmanager.add_command(\"runserver\", Server(\n use_debugger = True,\n use_reloader = True,\n host = \"0.0.0.0\",\n port = portnum)\n)\n\nif __name__ == \"__main__\":\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"31444925","text":"import requests\r\nimport json\r\nBSAE_URL='http://127.0.0.1:8000/'\r\nENDPOINT='apiempall/'\r\n# def get_resource():\r\n# resp=requests.get(BSAE_URL+ENDPOINT)\r\n# print(resp.content)\r\n# print(resp.status_code)\r\n# print(resp.json())\r\n# get_resource()\r\ndef get_all():\r\n resp=requests.get(BSAE_URL+ENDPOINT)\r\n print(resp.json())\r\nget_all()\r\n\r\ndef create_resource():\r\n new_emp={\r\n 'eno':'109',\r\n 'ename':'test',\r\n 'esal':111,\r\n 'eadd':'chhattisgarh',\r\n }\r\n resp=requests.post(BSAE_URL+ENDPOINT,data=json.dumps(new_emp))\r\n print(resp.status_code)\r\n print(resp.json())\r\n \r\ncreate_resource()\r\n\r\n\r\n ","sub_path":"withoutrest/apitest.py","file_name":"apitest.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"38835055","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis program is a modification of the original provided by Fran. It creates a simple lexicon with a one to one grapheme to phoneme mapping, besides some consonant clusters that are known to map to a single phone.\n\nIf provided with a JSON dictionary of normalised to Dieth forms, it is expected to produced a lexicon for normalised transcriptions.\n\"\"\"\n\nimport sys\nimport re\nimport argparse\nimport unicodedata\nimport json\nfrom collections import Counter\n\n# Signs to exclude from the transcriptions (when --add-signs is not specified)\nEXCLUDE_SET = set([\"'\", '-', '.'])\n\n\ndef get_args():\n \"\"\"\n Returns the command line arguments\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description='Generates pronunciations for an input vocabulary by mapping clusters of graphemes to phonetic symbols')\n\n parser.add_argument('--vocabulary', '-v', help='Input vocabulary',\n required=True)\n\n parser.add_argument('--cluster-file', '-c',\n help='File with the consonant clusters', required=False)\n\n parser.add_argument('--n2d', '-d', required=False,\n help='If provided, lexicon is created with a mapping from normalised to Dieth transcription forms. JSON file is expected')\n\n parser.add_argument('--map-diacritic', '-m', required=False,\n help='Map compound diacritics to alternative character. If null, just recombines', default=None)\n\n parser.add_argument('--output-file', '-o', help='Output lexicon',\n required=True)\n\n parser.add_argument('--verbose', required=False,\n action='store_true', help='Print progress to stdout.')\n\n args = parser.parse_args()\n\n if not args.map_diacritic:\n args.map_diacritic = None\n\n return args\n\n\ndef process_unicode_compounds(data, map_diacritic=None):\n \"\"\"\n Correctly re-combines compound unicode characters.\n\n Args:\n * data (str|list) Input unicode data. String or list.\n * map_diacritic (None|unicode) Unicode string to map all combining characters to. Default to original character.\n Returns:\n list of unicode characters, where all compounds have been recombined, either using the original, or the map_diacritic value.\n \"\"\"\n\n # print(daa)\n for char in data:\n if not isinstance(char, str):\n raise(TypeError, 'All chars in data must be valid unicode instances!')\n\n if map_diacritic != None and not isinstance(map_diacritic, str):\n raise(TypeError, 'map_diacritic MUST be None or a valid unicode string.')\n\n # Split into individual characters (not graphemes!)\n # it is necessary to recombine once, just in case the user provided a list\n chars = [char for char in ''.join(data)]\n\n # Recombine unicode compounds.\n # NOTE: unicodedata.normalize does NOT cover all examples in the data, so we have to\n # do this manually.\n # The compound diacritics always follow the letter they combine with.\n chars.reverse()\n chunk = []\n tmp_chars = []\n for char in chars:\n # check if characters has a combined diactritic\n if unicodedata.combining(char):\n if map_diacritic:\n chunk.append(map_diacritic)\n # print(chars)\n else:\n # remove additional diacritics (they are used inconsistently anyway!)\n pass\n # chunk.append(char)\n else:\n chunk.append(char)\n chunk.reverse()\n tmp_chars.append(''.join(chunk))\n chunk = []\n\n # After successful recombination we finally have a list of actual graphemes\n chars = [char for char in tmp_chars]\n chars.reverse()\n\n return chars\n\n\ndef get_max_length(clusters_dict):\n \"\"\"\n Calculates the maximum cluster length given the input 'clusters_dict'\n \"\"\"\n max_length_cluster = 0\n for clust in clusters_dict:\n if len(clust) > max_length_cluster:\n max_length_cluster = len(clust)\n return max_length_cluster\n\n\ndef read_clusters(clusters_file, verbose=False):\n \"\"\"\n Reads the file with the clusters\n input:\n * input_file (str) name of the input file, with the consonant clusters and their mappings to some phoneme name (\"cluster\" \"phone\")\n returns:\n * a dictionary with the clusters as keys, and the corresponding phones as values\n \"\"\"\n\n output = {}\n\n with open(clusters_file, 'r', encoding='utf8') as inf:\n\n if verbose:\n print('In read_clusters:')\n\n for line in inf:\n\n if verbose:\n print('\\tLine = ' + line)\n\n fields = line.rstrip().split('\\t')\n\n if len(fields) != 2:\n sys.stderr.write(\n 'Error: the file {0} must have exactly two columns separated by tabs. Check {1}\\n'.format(input_file, line))\n sys.exit(1)\n\n output[fields[0]] = fields[1].split()\n\n if verbose:\n print('\\t{}'.format('-'.join(fields[1].split())))\n\n return output\n\n\ndef transcribe_simple(word, clusters, max_length_cluster, map_diacritic=None, verbose=False):\n \"\"\"\n Transcribes a word mapping each grapheme to itself, besides some special clusters\n input:\n * word (str): Input word\n * cluster (dict): Dictionary mapping clusters of graphemes to single\n phones\n * max_length_cluster (int): maximum length of all the consonant\n clusters\n returns:\n * a string with a pseudo phonetic transcription of the input word\n \"\"\"\n\n word = process_unicode_compounds(word, map_diacritic)\n word_length = len(word)\n\n output = ['']\n\n graph_index = 0\n\n while graph_index < word_length:\n\n if word[graph_index] in EXCLUDE_SET:\n if verbose:\n print('skipping \"{0}\" ...'.format(word[graph_index]))\n graph_index += 1\n continue\n\n transcribed = 0\n\n for index in range(graph_index + max_length_cluster - 1,\n graph_index, -1):\n\n if verbose:\n print('\\tIndex = {0}. Graph = {1}. Length = {2}'.format(\n index, graph_index, word_length))\n\n if index >= word_length:\n continue\n\n current_clust = ''.join(word[graph_index:index+1])\n\n if verbose:\n print('\\tLooking for cluster {}'.format(current_clust))\n\n if current_clust in clusters:\n\n if verbose:\n print('\\tFound \"{}\" ...'.format(current_clust))\n\n interm_output = []\n for trans in clusters[current_clust]:\n for multi in output:\n interm_output.append(multi+' '+trans)\n\n output = interm_output\n\n graph_index += len(current_clust)\n transcribed = 1\n break\n\n if transcribed == 0:\n interm_output = []\n for multi in output:\n interm_output.append(multi + ' ' + word[graph_index])\n output = interm_output\n # output = output + ' ' + word[graph_index]\n\n graph_index += 1\n\n if verbose:\n for multi in output:\n print('\\tNo cluster: {}'.format(multi))\n\n if verbose:\n print('Output: {}'.format(','.join(output)))\n\n return [multi.strip() for multi in output]\n\n\ndef main():\n \"\"\"\n Main function of the program\n \"\"\"\n\n # Get the command line arguments:\n args = get_args()\n\n # clusters = read_clusters(args.cluster_file)\n # max_length_cluster = get_max_length(clusters)\n\n try:\n input_f = open(args.vocabulary, 'r', encoding='utf8')\n except IOError as err:\n sys.stderr.write(\n 'Error opening {0} ({1})\\n'.format(args.vocabulary, err))\n sys.exit(1)\n\n try:\n output_f = open(args.output_file, 'w', encoding='utf8')\n except IOError as err:\n sys.stderr.write(\n 'Error creating {0} ({1})\\n'.format(args.output_file, err))\n sys.exit(1)\n\n for w in input_f:\n w = w.strip()\n if w:\n output_f.write('{} {}\\n'.format(w.strip(), w.strip()))\n\n output_f.close()\n input_f.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"archimob_char/create_char_lexicon.py","file_name":"create_char_lexicon.py","file_ext":"py","file_size_in_byte":8346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"84754085","text":"import numpy\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\ndef get_accuracy(predicted, expected):\n count = 0.0\n total = len(expected)\n for i in range(total):\n if predicted[i] != expected[i]:\n count += 1\n\n return (total - count) / total\n\n\nclass HardCodedModel:\n def __init__(self):\n pass\n\n def predict(self, data):\n targets = numpy.zeros(len(data))\n return targets\n\n\nclass HardCodedClassifier:\n def __init__(self):\n pass\n\n def fit(self, data, targets):\n return HardCodedModel()\n\n\nclass KNNModel:\n def __init__(self, data, targets):\n self.k = 3\n self.data = data\n self.targets = targets\n\n def predict(self, test_data):\n # Predict each element in dataset\n targets = []\n for element in test_data:\n predicted_target = self.predict_one(element)\n targets.append(predicted_target)\n\n return targets\n\n def predict_one(self, test_element):\n test_element_size = len(test_element)\n training_data_size = len(self.data)\n distance_list = []\n for i in range(training_data_size):\n distance = 0.0\n for j in range(test_element_size):\n distance += (test_element[j] - self.data[i][j]) ** 2\n distance_list.append((distance, self.targets[i]))\n\n # Sort distance list\n # Source: https://stackoverflow.com/questions/10695139/sort-a-list-of-tuples-by-2nd-item-integer-value\n sorted_distance_list = sorted(distance_list, key=lambda x: x[0])\n\n # Get k nearest neighbors\n nearest_neighbors = sorted_distance_list[:self.k]\n\n # Find most common neighbor type\n types_of_nearest_neighbors = []\n for neighbor in nearest_neighbors:\n types_of_nearest_neighbors.append(neighbor[1])\n\n # Source: https://stackoverflow.com/questions/10797819/finding-the-mode-of-a-list\n predicted_type = max(types_of_nearest_neighbors, key=types_of_nearest_neighbors.count)\n return predicted_type\n\n\nclass KNNClassifier:\n def __init__(self):\n pass\n\n def fit(self, data, targets):\n return KNNModel(data, targets)\n\n\ndef main():\n # Load the datasets\n iris = datasets.load_iris()\n data = iris.data\n target = iris.target\n data_train, data_test, targets_train, targets_test = train_test_split(\n data,\n target,\n train_size=0.7,\n test_size=0.3,\n shuffle=True\n )\n\n classifier = KNNClassifier()\n model = classifier.fit(data_train, targets_train)\n targets_predicted = model.predict(data_test)\n print(get_accuracy(targets_predicted, targets_test))\n\nmain()","sub_path":"week2.py","file_name":"week2.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"408880681","text":"\"\"\"Strongly connected components for directed graph\"\"\"\nfrom collections import deque\n\nclass DirectedGraphNode:\n\n def __init__(self, id):\n self.id = id\n self.neighbors = []\n\nclass Graph:\n\n def __init__(self):\n self.nodes = {}\n\n def __getitem__(self, id):\n return self.nodes[id]\n\n def __setitem__(self, id, value):\n self.nodes[id] = value\n\ndef scc(graph):\n reversed_graph = graph_reverse(graph)\n seq = dfs_generate_seq(reversed_graph)\n scc = dfs_compute_scc(graph, seq)\n return scc\n\ndef graph_reverse(graph):\n new_graph = Graph()\n for id in graph.nodes:\n new_graph[id] = DirectedGraphNode(id)\n for id, node in graph.nodes.items():\n for neighbor in node.neighbors:\n new_graph[neighbor.id].neighbors.append(new_graph[id])\n return new_graph\n\ndef dfs_generate_seq(graph):\n visited = set()\n seq = []\n for id, node in graph.nodes.items():\n if id not in visited:\n visited.add(id)\n dfs_generate_seq_helper(node, visited, seq)\n return seq\n\ndef dfs_generate_seq_helper(node, visited, seq):\n for neighbor in node.neighbors:\n if neighbor.id not in visited:\n visited.add(neighbor.id)\n dfs_generate_seq_helper(neighbor, visited, seq)\n seq.append(node.id)\n\ndef dfs_compute_scc(graph, seq):\n visited = set()\n scc = []\n for id in reversed(seq):\n node = graph[id]\n if id not in visited:\n visited.add(id)\n cluster = [id]\n dfs_compute_scc_helper(node, visited, cluster)\n scc.append(cluster)\n return scc\n\ndef dfs_compute_scc_helper(node, visited, cluster):\n for neighbor in node.neighbors:\n if neighbor.id not in visited:\n visited.add(neighbor.id)\n cluster.append(neighbor.id)\n dfs_compute_scc_helper(neighbor, visited, cluster)\n","sub_path":"data_structures/tree/graph/strong_connected_components_directed_graph.py","file_name":"strong_connected_components_directed_graph.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"43307468","text":"#! /usr/bin/env python\nimport sys\nimport rospy\nimport tf\nimport moveit_commander\nimport copy\nfrom math import pi\nimport geometry_msgs\n\ndef open_gripper(group, w):\n group.set_named_target(\"open\")\n group.go(wait=w)\n group.stop()\n\ndef close_gripper(group, w):\n group.set_named_target(\"close\")\n group.go(wait=w)\n group.stop()\n\ndef go_home(group, w):\n group.set_named_target(\"HOME\")\n group.go(wait=w)\n group.stop()\n\ndef go_to_joint_state(group, joint_goal, w):\n group.go(joint_goal, wait=w)\n group.stop()\n\ndef go_to_pose_goal(group, pose_goal, w):\n group.set_pose_target(pose_goal)\n group.go(wait=w)\n group.stop()\n group.clear_pose_targets()\n\ndef plan_cartesian_path(group, waypoints):\n # We want the Cartesian path to be interpolated at a resolution of 1 cm\n # which is why we will specify 0.01 as the eef_step in Cartesian\n # translation. We will disable the jump threshold by setting it to 0.0 disabling:\n (plan, fraction) = group.compute_cartesian_path(\n waypoints, # waypoints to follow\n 0.01, # eef_step\n 0.0) # jump_threshold\n # print(plan)\n # Note: We are just planning, not asking move_group to actually move the robot yet:\n return plan, fraction\n\ndef execute_plan(group, plan, w):\n ## Executing a Plan\n ## ^^^^^^^^^^^^^^^^\n ## Use execute if you would like the robot to follow\n ## the plan that has already been computed:\n group.execute(plan, wait=w)\n\n\ndef main():\n ## First initialize `moveit_commander`_ and a `rospy`_ node:\n moveit_commander.roscpp_initializer.roscpp_initialize(sys.argv)\n rospy.init_node('move_group_simul_motion', anonymous=True)\n\n ## Instantiate a `RobotCommander`_ object. This object is the outer-level interface to\n ## the robot:\n l_robot = moveit_commander.robot.RobotCommander(\"/left_manipulator/robot_description\", ns=\"left_manipulator\")\n r_robot = moveit_commander.robot.RobotCommander(\"/right_manipulator/robot_description\", ns=\"right_manipulator\")\n\n ## Instantiate a `MoveGroupCommander`_ object.\n left_arm = moveit_commander.MoveGroupCommander(\"manipulator\", \"/left_manipulator/robot_description\", ns=\"left_manipulator\")\n right_arm = moveit_commander.MoveGroupCommander(\"manipulator\", \"/right_manipulator/robot_description\", ns=\"right_manipulator\")\n left_hand = moveit_commander.MoveGroupCommander(\"gripper\", \"/left_manipulator/robot_description\", ns=\"left_manipulator\")\n right_hand = moveit_commander.MoveGroupCommander(\"gripper\", \"/right_manipulator/robot_description\", ns=\"right_manipulator\")\n\n rospy.sleep(5)\n \n joint_goal = left_arm.get_current_joint_values()\n joint_goal[4] = -pi/2\n go_to_joint_state(left_arm, joint_goal, True)\n\n rospy.sleep(5)\n\n joint_goal = left_arm.get_current_joint_values()\n joint_goal[4] = pi/2\n go_to_joint_state(left_arm, joint_goal, True)\n\n rospy.sleep(5)\n\n # Go home\n go_home(left_arm, True)\n \n rospy.sleep(1)\n\n moveit_commander.roscpp_initializer.roscpp_shutdown()\n \nif __name__ == '__main__':\n main()","sub_path":"src/itri_control/doc/nj130_test/scripts/ft_test.py","file_name":"ft_test.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"159391254","text":"import imageio\nimport os\nimport sys\n\ndef create_gif(source,name,duration):\n\tframes=[]\n\tfor image in source:\n\t\tframes.append(imageio.imread(image))\n\timageio.mimsave(name,frames,'GIF',duration=duration)\n\tprint(\"Finish\")\n\ndef main(path):\n\tpic_list=os.listdir(path)\n\ttemp_list=[]\n\tfor file in pic_list:\n\t\ttemp_list.append(os.path.join(path,file))\n\tpic_list=temp_list\n\tgif_name=\"result.gif\"\n\tduration_time=0.0\n\tcreate_gif(pic_list,gif_name,duration_time)\n\nif __name__=='__main__':\n\tparam_list=sys.argv\n\tif len(param_list)!=2:\n\t\tprint(\"Input the folder!\")\n\telse:\n\t\tmain(param_list[1])\n","sub_path":"Homeworks/7_SimulationTaichi/report/demo/Gengif.py","file_name":"Gengif.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"627745285","text":"import numpy as np\nimport torch.utils.data\n\nfrom data.mmhand_dataset import MMHandDataset\nfrom data.rhd_dataset import RHDdataset\nfrom data.stb_dataset import STBdataset\n\n\nclass MMHandDatasetDataLoader():\n def __init__(self, opt):\n self.opt = opt\n if opt.dataset == 'stb':\n self.dataset = STBdataset(opt)\n elif opt.dataset == 'rhd':\n self.dataset = RHDdataset(opt)\n else:\n self.dataset = MMHandDataset(opt)\n\n def _init_fn(w_id):\n np.random.seed(opt.seed)\n self.distributed_sampler = None\n if self.opt.distributed:\n self.distributed_sampler = torch.utils.data.distributed.DistributedSampler(\n self.dataset)\n self.func = _init_fn\n if self.opt.local_rank == 0:\n print(\"dataset [%s] was created\" % type(self.dataset).__name__)\n else:\n print(\"dataset [%s] was created\" % type(self.dataset).__name__)\n self.func = None\n\n self.dataloader = torch.utils.data.DataLoader(\n self.dataset,\n batch_size=opt.batchSize,\n shuffle=False,\n pin_memory=True,\n sampler=self.distributed_sampler,\n worker_init_fn=self.func,\n num_workers=int(opt.nThreads))\n\n def __len__(self):\n return min(len(self.dataset), self.opt.max_dataset_size)\n\n def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i >= self.opt.max_dataset_size:\n break\n yield data\n","sub_path":"data/mmhand_dataset_data_loader.py","file_name":"mmhand_dataset_data_loader.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"124435464","text":"import io\nimport xlsxwriter\nimport zipfile\nfrom django.conf import settings\nfrom django.http import Http404, HttpResponse\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.views.generic.detail import DetailView\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .viewsAlexis import *\nfrom django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom carga_horaria.models import Periodo, Colegio, Plan\nfrom carga_horaria.formsDani import PeriodoForm, ColegioForm, PlanForm\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom guardian.shortcuts import get_objects_for_user\nfrom guardian.shortcuts import assign_perm\nfrom guardian.shortcuts import remove_perm\nfrom wkhtmltopdf.views import PDFTemplateResponse, PDFTemplateView\nfrom .models import Nivel\nfrom .models import Profesor\nfrom .models import Asistente\nfrom .models import Periodo\nfrom .models import Asignacion\nfrom .models import AsignacionExtra\nfrom .models import AsignacionNoAula\nfrom .models import Colegio\nfrom .forms import AsignacionForm\nfrom .forms import AsignacionUpdateForm\nfrom .forms import AsignacionFUAForm\nfrom .forms import AsignacionNoAulaFUAForm\nfrom .forms import AsignacionFUAUpdateForm\nfrom .forms import AsignacionNoAulaFUAUpdateForm\nfrom .forms import AsignacionExtraForm\nfrom .forms import AsignacionExtraUpdateForm\nfrom .forms import AsignacionNoAulaForm\nfrom .forms import AsignacionNoAulaUpdateForm\nfrom .models import AsignacionAsistente\nfrom .forms import AsignacionAsistenteForm\nfrom .forms import AssignPermForm\nfrom .formsDani import PlantillaPlanForm\n\n\n@login_required\ndef assign(request):\n if not request.user.is_superuser:\n raise Http404\n\n year = request.session.get('periodo', 2020)\n if request.method == 'POST':\n form = AssignPermForm(request.POST, year=year)\n if form.is_valid():\n user = form.cleaned_data['usuario']\n\n # clear perms first\n remove_perm('carga_horaria.change_colegio', user, get_objects_for_user(user, 'carga_horaria.change_colegio').filter(periode=year))\n\n for c in form.cleaned_data['colegios']:\n assign_perm('change_colegio', user, c)\n \n form = AssignPermForm(year=year)\n return render(request, 'carga_horaria/assign.html', {'form': form})\n\n\n\n@login_required\ndef switch_periodo(request, year=2021):\n request.session['periodo'] = year\n try:\n del request.session['colegio__pk']\n del request.session['colegio__nombre']\n except KeyError:\n pass\n return redirect('carga-horaria:home')\n\n@login_required\ndef switch(request, pk=None):\n if pk:\n colegio = get_object_or_404(Colegio, pk=pk)\n request.session['colegio__pk'] = colegio.pk\n request.session['colegio__nombre'] = colegio.nombre\n return redirect('carga-horaria:home')\n colegios = get_objects_for_user(request.user, \"carga_horaria.change_colegio\", Colegio.objects.filter(periode=request.session.get('periodo', 2020)))\n return render(request, 'carga_horaria/switch.html', {'colegios': colegios})\n\n@login_required\ndef clear(request):\n del request.session['colegio__pk']\n del request.session['colegio__nombre']\n return redirect('carga-horaria:home')\n\n@login_required\ndef home(request):\n return render(request, 'carga_horaria/home.html')\n\n\n\n@login_required\ndef anexo(request, pk):\n p = get_object_or_404(Profesor, pk=pk)\n colegio = Colegio.objects.get(pk=request.session['colegio__pk'])\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/profesor/anexo_profesor.html',\n filename='anexo1.pdf',\n context={'profesor': p,\n 'colegio': colegio,\n 'periodo': request.session.get('periodo', 2020)},\n show_content_in_browser=settings.DEBUG)\n return response\n\n@login_required\ndef anexos(request):\n profesores = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)\n mem_zip = io.BytesIO()\n with zipfile.ZipFile(mem_zip, mode=\"w\", compression=zipfile.ZIP_DEFLATED) as zf:\n for pp in profesores:\n zf.writestr(*pp.generar_anexo_1())\n\n response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')\n response['Content-Disposition'] = 'attachment; filename=\"anexos1.zip\"'\n return response\n\n\n@login_required\ndef anexo_asistente(request, pk):\n p = get_object_or_404(Asistente, pk=pk)\n colegio = Colegio.objects.get(pk=request.session['colegio__pk'])\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/asistente/anexo_asistente.html',\n filename='anexo1.pdf',\n context={'profesor': p,\n 'colegio': colegio,\n 'periodo': request.session.get('periodo', 2020)},\n show_content_in_browser=settings.DEBUG)\n return response\n\n@login_required\ndef anexos_asistentes(request):\n profesores = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)\n mem_zip = io.BytesIO()\n with zipfile.ZipFile(mem_zip, mode=\"w\", compression=zipfile.ZIP_DEFLATED) as zf:\n for pp in profesores:\n zf.writestr(*pp.generar_anexo_1())\n\n response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')\n response['Content-Disposition'] = 'attachment; filename=\"anexos1.zip\"'\n return response\n\n\n@login_required\ndef profesores_pdf(request):\n profesores = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/profesor/listado_profesor_pdf.html',\n filename='listado_profesores.pdf',\n context={'profesores': profesores},\n show_content_in_browser=settings.DEBUG)\n return response\n\n\n@login_required\ndef asistentes_pdf(request):\n asistentes = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/asistente/listado_asistente_pdf.html',\n filename='listado_asistentes.pdf',\n context={'asistentes': asistentes},\n show_content_in_browser=settings.DEBUG)\n return response\n\n@login_required\ndef periodo_pdf(request, pk):\n periodo = get_object_or_404(Periodo, pk=pk)\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/periodo/periodo_pdf.html',\n filename='carga_horaria.pdf',\n context={'object': periodo},\n show_content_in_browser=settings.DEBUG)\n return response\n\n@login_required\ndef plan_refresh(request, pk):\n plan = get_object_or_404(Plan, pk=pk)\n plan.refresh_asignaturas()\n messages.success(request, \"Se han actualizado los cursos asociados al plan ID: {}\".format(plan.pk))\n return redirect('carga-horaria:planes')\n\n# class AnexoView(PDFTemplateView):\n# template_name = 'carga_horaria/profesor/anexo_profesor.html'\n# filename = 'anexo1.pdf'\n\n# def get(self, request, *args, **kwargs):\n# pk = kwargs.pop('pk')\n# self.p = get_object_or_404(Profesor, pk=pk)\n# self.ax = [{'descripcion': 'Planificación', 'curso': '', 'horas': self.p.horas_planificacion},\n# {'descripcion': 'Recreo', 'curso': '', 'horas': self.p.horas_recreo}] + list(self.p.asignacionextra_set.all())\n# return super(AnexoView, self).get(request, *args, **kwargs)\n\n# def get_context_data(self, *args, **kwargs):\n# ctx = super(AnexoView, self).get_context_data(*args, **kwargs)\n# ctx.update({'asignaciones': self.p.asignacion_set.all(),\n# 'asignaciones_extra': self.ax,\n# 'profesor': self.p})\n\n# anexo = AnexoView.as_view()\n\n\n\"\"\"\n Comienzo Crud Periodos\n\"\"\"\nclass PeriodoListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Periodo\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/periodo/listado_periodos.html'\n search_fields = ['nombre', 'colegio']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(PeriodoListView, self).get_context_data(*args, **kwargs)\n ox = ctx['object_list']\n ordering = {str(value): index for index, value in enumerate(Nivel)}\n ctx['object_list'] = sorted(ox, key=lambda x: ordering[\"Nivel.\"+x.plan.nivel])\n # added for convenience, pasted from AsignaturaBaseListView\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n\n return qs\n\n\n\n\nclass PeriodoDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Periodo\n \"\"\"\n model = Periodo\n template_name = 'carga_horaria/periodo/detalle_periodo.html'\n\n\nclass PeriodoCreateView(LoginRequiredMixin, CreateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/nuevo_periodo.html'\n success_url = reverse_lazy('carga-horaria:periodos')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n\nclass PeriodoUpdateView(LoginRequiredMixin, UpdateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/editar_periodo.html'\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoUpdateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:periodo',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n\nclass PeriodoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Periodo\n success_url = reverse_lazy('carga-horaria:periodos')\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def test_func(self):\n return self.request.user.is_superuser\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\"\"\"\n Fin Crud Periodos\n\"\"\"\n\n\"\"\"\n Comienzo Crud Colegios\n\"\"\"\nclass ColegioListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Colegio\n lookup = 'pk'\n template_name = 'carga_horaria/colegio/listado_colegios.html'\n search_fields = ['nombre', 'jec']\n paginate_by = 6\n\n\nclass ColegioDetailView(LoginRequiredMixin, ObjPermissionRequiredMixin, DetailView):\n \"\"\"\n Detalle de Colegio\n \"\"\"\n model = Colegio\n permission = 'carga_horaria.change_colegio'\n template_name = 'carga_horaria/colegio/detalle_colegio.html'\n\n\nclass ColegioCreateView(LoginRequiredMixin, CreateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/nuevo_colegio.html'\n success_url = reverse_lazy('carga-horaria:colegios')\n# success_message = u\"Nuevo periodo %(nombre)s creado satisfactoriamente.\"\n# error_message = \"Revise que todos los campos del formulario hayan sido validados correctamente.\"\n\n def form_valid(self, form):\n colegio = form.save(commit=False)\n colegio.periode = self.request.session.get('periodo', 2020)\n colegio.save()\n return redirect(reverse('carga-horaria:colegios'))\n\n\nclass ColegioUpdateView(LoginRequiredMixin, UpdateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/editar_colegio.html'\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:colegio',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n\n\nclass ColegioDeleteView(LoginRequiredMixin, DeleteView):\n model = Colegio\n success_url = reverse_lazy('carga-horaria:colegios')\n \n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n\"\"\"\n Fin Crud Colegios\n\"\"\"\n\n\"\"\"\n Comienzo Crud Planes\n\"\"\"\nclass PlanListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de planes\n \"\"\"\n model = Plan\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/plan/listado_planes.html'\n search_fields = ['nombre', 'nivel']\n paginate_by = 10\n ordering = ['-pk']\n\n\nclass PlanDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Plan\n \"\"\"\n model = Plan\n template_name = 'carga_horaria/plan/detalle_plan.html'\n\n\nclass PlanCreateView(LoginRequiredMixin, CreateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/nuevo_plan.html'\n success_url = reverse_lazy('carga-horaria:planes')\n# success_message = u\"Nuevo periodo %(nombre)s creado satisfactoriamente.\"\n# error_message = \"Revise que todos los campos del formulario hayan sido validados correctamente.\"\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PlanCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n\n@login_required\ndef crear_desde_plantilla(request):\n if request.method == 'POST':\n form = PlantillaPlanForm(request.POST)\n if form.is_valid():\n plantilla = form.cleaned_data['plantilla']\n nivel = form.cleaned_data['nivel']\n\n colegio_pk = request.session.get('colegio__pk', None)\n if colegio_pk:\n colegio = Colegio.objects.get(pk=colegio_pk)\n nuevo = Plan.objects.create(nivel=nivel, colegio=colegio)\n else:\n nuevo = Plan.objects.create(nivel=nivel)\n for ab in plantilla.asignaturabase_set.all():\n AsignaturaBase.objects.create(nombre=ab.nombre,\n plan=nuevo,\n horas_jec=ab.horas_jec,\n horas_nec=ab.horas_nec)\n return redirect('carga-horaria:planes')\n else:\n form = PlantillaPlanForm()\n return render(request, 'carga_horaria/plantilla.html', {'form': form})\n\n\nclass PlanUpdateView(LoginRequiredMixin, UpdateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/editar_plan.html'\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:plan',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n\nclass PlanDeleteView(LoginRequiredMixin, DeleteView):\n model = Plan\n success_url = reverse_lazy('carga-horaria:planes')\n template_name = 'carga_horaria/plan/eliminar_plan.html'\n \n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\"\"\"\n Fin Crud Planes\n\"\"\"\n\n@login_required\ndef asignatura_limpiar(request, pk, periodo_pk):\n aa = get_object_or_404(Asignatura, pk=pk)\n aa.asignacion_set.all().delete()\n return redirect(reverse('carga-horaria:periodo', kwargs={'pk': periodo_pk}))\n\n\n@login_required\ndef asignatura_dif(request, pk):\n pp = get_object_or_404(Periodo, pk=pk)\n\n if request.method == 'POST':\n # check first if there are any candidates for merging\n nombre = request.POST['asignatura']\n colegio_pk = request.session.get('colegio__pk', None)\n can_confirm = request.POST.get('can_confirm', False)\n if colegio_pk and Asignatura.objects.filter(periodos__colegio=colegio_pk, nombre=nombre) and not can_confirm:\n ax = Asignatura.objects.filter(periodos__colegio=colegio_pk, nombre=nombre).distinct()\n return render(request, 'carga_horaria/asignatura/asignatura_dif_confirm.html', {'object': pp,\n 'candidatas': ax})\n else:\n aa = Asignatura.objects.create(nombre=request.POST['asignatura'],\n diferenciada=True,\n horas=6)\n aa.periodos.add(pp)\n return redirect('carga-horaria:periodo', pp.pk)\n return render(request, 'carga_horaria/asignatura/asignatura_dif.html', {'object': pp})\n\n\n@login_required\ndef asignatura_merge(request, pk, asignatura_pk):\n pp = get_object_or_404(Periodo, pk=pk)\n aa = get_object_or_404(Asignatura, pk=asignatura_pk)\n aa.periodos.add(pp)\n return redirect('carga-horaria:periodo', pk)\n\n\n@login_required\ndef asignatura_maybe(request, pk):\n pp = get_object_or_404(Periodo, pk=pk)\n candidatas = Asignatura.objects.filter(periodos__colegio=pp.colegio, combinable=True).exclude(periodos__pk__in=[pk]).distinct()\n if candidatas:\n return render(request, 'carga_horaria/asignatura/asignatura_maybe.html', {'object': pp, 'candidatas': candidatas})\n else:\n return redirect('carga-horaria:asignatura__nuevo', pk)\n\n\n@login_required\ndef asignar(request, pk, periodo_pk):\n aa = get_object_or_404(Asignatura, pk=pk)\n\n if request.method == 'POST':\n form = AsignacionForm(request.POST, asignatura=aa, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.asignatura = aa\n asignacion.save()\n return redirect('carga-horaria:periodo', periodo_pk)\n else:\n form = AsignacionForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar.html', {'object': aa,\n 'form': form})\n\n\n@login_required\ndef asignar_fua(request, pk, tipo):\n pp = get_object_or_404(Profesor, pk=pk)\n tipo_display = dict(Asignacion.TIPO_CHOICES)[int(tipo)]\n\n if request.method == 'POST':\n form = AsignacionFUAForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n asignacion.tipo = tipo\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionFUAForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_fua.html', {'object': pp,\n 'tipo': tipo_display,\n 'form': form})\n\n@login_required\ndef asignar_no_aula_fua(request, pk, tipo):\n pp = get_object_or_404(Profesor, pk=pk)\n tipo_display = dict(AsignacionNoAula.TIPO_CHOICES)[int(tipo)]\n\n if request.method == 'POST':\n form = AsignacionNoAulaFUAForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n asignacion.tipo = tipo\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_aula_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionNoAulaFUAForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_no_aula_fua.html', {'profesor': pp,\n 'tipo': tipo_display,\n 'form': form})\n\n\n\n@login_required\ndef asignar_extra(request, pk):\n pp = get_object_or_404(Profesor, pk=pk)\n\n if request.method == 'POST':\n form = AsignacionExtraForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_lectivas_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionExtraForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_extra.html', {'profesor': pp,\n 'form': form})\n\n\n@login_required\ndef asignar_no_aula(request, pk):\n pp = get_object_or_404(Profesor, pk=pk)\n\n if request.method == 'POST':\n form = AsignacionNoAulaForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_aula_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionNoAulaForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_no_aula.html', {'profesor': pp,\n 'form': form})\n\nclass AsignacionDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignacion\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.kwargs['profesor_pk']})\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\nclass AsignacionUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignacion\n form_class = AsignacionUpdateForm\n template_name = 'carga_horaria/asignar_update.html'\n\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\n\nclass AsignacionExtraUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionExtra\n form_class = AsignacionExtraUpdateForm\n template_name = 'carga_horaria/asignar_extra.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionExtraUpdateView, self).get_context_data(*args, **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n\n kwargs = super(AsignacionExtraUpdateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'profesor': pp,\n 'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = Asignacion.objects.get(pk=asignacion.pk)\n asignacion.horas = asignacion.profesor.horas_no_lectivas_disponibles + float(asignacion_old.horas)\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\n\nclass AsignacionExtraDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionExtra\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\n\nclass AsignacionNoAulaUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionNoAula\n form_class = AsignacionNoAulaUpdateForm\n template_name = 'carga_horaria/asignar_no_aula.html'\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = AsignacionNoAula.objects.get(pk=asignacion.pk)\n asignacion.horas = asignacion.profesor.horas_no_aula_disponibles + asignacion_old.horas\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionNoAulaUpdateView, self).get_context_data(*args, **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n\n kwargs = super(AsignacionNoAulaUpdateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'profesor': pp,\n 'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\nclass AsignacionNoAulaDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionNoAula\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\n\n@login_required\ndef asignar_asistente(request, pk, tipo):\n pp = get_object_or_404(Asistente, pk=pk)\n tipo_display = dict(AsignacionAsistente.TIPO_CHOICES)[int(tipo)]\n\n if request.method == 'POST':\n form = AsignacionAsistenteForm(request.POST, asistente=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.asistente = pp\n asignacion.tipo = tipo\n # if asignacion.horas == 0:\n # asignacion.horas = pp.horas_no_lectivas_disponibles\n asignacion.save()\n return redirect('carga-horaria:asistente', pp.pk)\n else:\n form = AsignacionAsistenteForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_asistente.html', {'asistente': pp,\n 'form': form})\n\nclass AsignacionAsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionAsistente\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:asistente',\n kwargs={\n 'pk': self.object.asistente.pk,\n }\n )\n\n@login_required\ndef profesores_info(request):\n output = io.BytesIO()\n\n # Create a workbook and add a worksheet.\n workbook = xlsxwriter.Workbook(output)\n worksheet = workbook.add_worksheet('Profesores')\n \n # Some data we want to write to the worksheet.\n qs = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)\n\n # Start from the first cell. Rows and columns are zero indexed.\n row = 0\n col = 0\n\n # Iterate over the data and write it out row by row.\n worksheet.write(0, 0, 'RUT')\n worksheet.write(0, 1, 'Nombre Docente')\n worksheet.write(0, 2, 'Dirección Docente')\n worksheet.write(0, 3, 'Comuna')\n worksheet.write(0, 4, 'Nacionalidad')\n worksheet.write(0, 5, 'Teléfono')\n worksheet.write(0, 6, 'Email personal')\n worksheet.write(0, 7, 'Email institucional')\n worksheet.write(0, 8, 'Estado civil')\n worksheet.write(0, 9, 'Discapacidad')\n worksheet.write(0, 10, 'Recibe pensión')\n worksheet.write(0, 11, 'Adventista')\n worksheet.write(0, 12, 'Fecha de Nacimiento')\n worksheet.write(0, 13, 'Tipo de Contrato')\n worksheet.write(0, 14, 'Cargo')\n worksheet.write(0, 15, 'Fecha de Inicio Contrato')\n worksheet.write(0, 16, 'Horas Contrato Propuestas')\n worksheet.write(0, 17, 'Horas SBVG')\n worksheet.write(0, 18, 'Horas SEP')\n worksheet.write(0, 19, 'Horas PIE')\n worksheet.write(0, 20, 'Horas Indefinidas Actual')\n worksheet.write(0, 21, 'Horas Plazo Fijo Actual')\n worksheet.write(0, 22, 'Horas Jornada Semanal')\n worksheet.write(0, 23, 'Asignaciones Aula Plan')\n worksheet.write(0, 24, 'Horas Aula PIE')\n worksheet.write(0, 25, 'Horas Aula SEP')\n worksheet.write(0, 26, 'Horas Aula Sostenedor')\n worksheet.write(0, 27, 'Horas disponibles')\n worksheet.write(0, 28, 'Asignación No Lectiva')\n worksheet.write(0, 29, 'Horas no lectivas disponibles')\n worksheet.write(0, 30, 'Asignación No Aula Normal')\n worksheet.write(0, 31, 'Asignación No Aula PIE')\n worksheet.write(0, 32, 'Asignación No Aula SEP')\n worksheet.write(0, 33, 'Especialidad')\n worksheet.write(0, 34, 'Profesor Jefe')\n worksheet.write(0, 35, 'Fundación que lo contrata')\n worksheet.write(0, 36, 'Colegio')\n \n\n\n \n row = 1\n for pp in qs:\n worksheet.write(row, 0, pp.rut)\n worksheet.write(row, 1, pp.nombre)\n worksheet.write(row, 2, pp.direccion)\n worksheet.write(row, 3, pp.persona.comuna)\n worksheet.write(row, 4, pp.persona.nacionalidad)\n worksheet.write(row, 5, pp.persona.telefono)\n worksheet.write(row, 6, pp.persona.email_personal)\n worksheet.write(row, 7, pp.persona.email_institucional)\n worksheet.write(row, 8, pp.persona.get_estado_civil_display())\n worksheet.write(row, 9, 'Sí' if pp.persona.discapacidad else 'No')\n worksheet.write(row, 10, 'Sí' if pp.persona.recibe_pension else 'No')\n worksheet.write(row, 11, 'Sí' if pp.persona.adventista else 'No')\n worksheet.write(row, 12, pp.persona.fecha_nacimiento)\n worksheet.write(row, 13, pp.get_tipo_display())\n worksheet.write(row, 14, pp.get_cargo_display())\n worksheet.write(row, 15, pp.fecha_inicio)\n worksheet.write(row, 16, pp.horas_semanales_total)\n worksheet.write(row, 17, pp.horas_sbvg_total)\n worksheet.write(row, 18, pp.total_sep)\n worksheet.write(row, 19, pp.total_pie)\n worksheet.write(row, 20, pp.horas_indefinidas)\n worksheet.write(row, 21, pp.horas_plazo_fijo)\n worksheet.write(row, 22, pp.horas_semanales)\n worksheet.write(row, 23, pp.horas_asignadas_plan)\n worksheet.write(row, 24, pp.horas_asignadas_pie)\n worksheet.write(row, 25, pp.horas_asignadas_sep)\n worksheet.write(row, 26, pp.horas_asignadas_sostenedor)\n worksheet.write(row, 27, pp.horas_disponibles)\n worksheet.write(row, 28, pp.horas_no_lectivas_asignadas_anexo)\n worksheet.write(row, 29, pp.horas_no_lectivas_disponibles)\n worksheet.write(row, 30, pp.horas_no_aula_asignadas_ordinaria)\n worksheet.write(row, 31, pp.horas_no_aula_asignadas_pie)\n worksheet.write(row, 32, pp.horas_no_aula_asignadas_sep)\n worksheet.write(row, 33, str(pp.especialidad))\n worksheet.write(row, 34, pp.jefatura if pp.es_profesor_jefe else 'No')\n worksheet.write(row, 35, str(pp.fundacion))\n worksheet.write(row, 36, str(pp.colegio))\n\n row += 1\n\n workbook.close()\n output.seek(0)\n\n # Set up the Http response.\n filename = 'profesores-info.xlsx'\n response = HttpResponse(\n output,\n content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n )\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n\n return response\n\n\n@login_required\ndef asistentes_info(request):\n output = io.BytesIO()\n\n # Create a workbook and add a worksheet.\n workbook = xlsxwriter.Workbook(output)\n worksheet = workbook.add_worksheet('Asistentes')\n \n # Some data we want to write to the worksheet.\n qs = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)\n\n # Start from the first cell. Rows and columns are zero indexed.\n row = 0\n col = 0\n\n # Iterate over the data and write it out row by row.\n worksheet.write(0, 0, 'RUT')\n worksheet.write(0, 1, 'Nombre Asistente')\n worksheet.write(0, 2, 'Fecha de Nacimiento')\n worksheet.write(0, 3, 'Nacionalidad')\n worksheet.write(0, 4, 'Dirección')\n worksheet.write(0, 5, 'Comuna')\n worksheet.write(0, 6, 'Teléfono')\n worksheet.write(0, 7, 'Email personal')\n worksheet.write(0, 8, 'Email institucional')\n worksheet.write(0, 9, 'Estado civil')\n worksheet.write(0, 10, 'Adventista')\n worksheet.write(0, 11, 'Discapacidad')\n worksheet.write(0, 12, 'Recibe pensión')\n worksheet.write(0, 13, 'Fecha de Inicio Contrato')\n worksheet.write(0, 14, 'Horas Contrato')\n worksheet.write(0, 15, 'Función')\n worksheet.write(0, 16, 'SEP')\n worksheet.write(0, 17, 'PIE')\n worksheet.write(0, 18, 'Sostenedor')\n worksheet.write(0, 19, 'Fundación que lo contrata')\n worksheet.write(0, 20, 'Colegio')\n\n \n row = 1\n for pp in qs:\n worksheet.write(row, 0, pp.rut)\n worksheet.write(row, 1, pp.nombre)\n worksheet.write(row, 2, pp.persona.fecha_nacimiento)\n worksheet.write(row, 3, pp.persona.nacionalidad)\n worksheet.write(row, 4, pp.persona.direccion)\n worksheet.write(row, 5, pp.persona.comuna)\n worksheet.write(row, 6, pp.persona.telefono)\n worksheet.write(row, 7, pp.persona.email_personal)\n worksheet.write(row, 8, pp.persona.email_institucional)\n worksheet.write(row, 9, pp.persona.get_estado_civil_display())\n worksheet.write(row, 10, 'Sí' if pp.persona.adventista else 'No')\n worksheet.write(row, 11, 'Sí' if pp.persona.discapacidad else 'No')\n worksheet.write(row, 12, 'Sí' if pp.persona.recibe_pension else 'No')\n worksheet.write(row, 13, pp.fecha_inicio)\n worksheet.write(row, 14, pp.horas)\n worksheet.write(row, 15, pp.funcion)\n worksheet.write(row, 16, pp.horas_sep)\n worksheet.write(row, 17, pp.horas_pie)\n worksheet.write(row, 18, pp.horas_sostenedor)\n worksheet.write(row, 19, str(pp.fundacion))\n worksheet.write(row, 20, str(pp.colegio))\n row += 1\n\n workbook.close()\n output.seek(0)\n\n # Set up the Http response.\n filename = 'asistentes-info.xlsx'\n response = HttpResponse(\n output,\n content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n )\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n\n return response\n","sub_path":"carga_horaria/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":36377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"504444624","text":"import cv2\n\n#加载人脸模型库\nface_engine = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')\ncamera=cv2.VideoCapture(0)\ni=1\n# 1.设置一个窗口\ncv2.namedWindow('shuoshuo')\n# 2.如果正确的话,\nwhile True:\n # 2.1读取摄像头的帧画面\n # 2.2显示图片\n # 2.3暂停窗口\n # 3.释放资源\n # 4.关闭窗口\n ret,frame=camera.read()\n faces = face_engine.detectMultiScale(frame,scaleFactor=1.3, minNeighbors=5)\n for (x,y,w,b) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + b), (255, 255, 0), 2)\n cv2.imshow('shuo',frame)\n if cv2.waitKey(5) & i==2:\n break\ncamera.release()\ncv2.destroyAllWindows()\n\n\n\n\n\n\n","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"371561451","text":"class Atomtype:\n\tdef __init__(self, inputType, inputMass):\n\t\tself.type = inputType\n\t\tself.mass = inputMass\n\nglobal atomtypes\natomtypes = {}\n\natomtypes[\"HYDROGEN\"] = Atomtype(\"H\", 1.0079)\natomtypes[\"CARBON\"] = Atomtype(\"C\", 12.0107)\natomtypes[\"NITROGEN\"] = Atomtype(\"N\", 14.0067)\natomtypes[\"OXYGEN\"] = Atomtype(\"O\", 15.999)\natomtypes[\"FLUORINE\"] = Atomtype(\"F\", 18.9984)\natomtypes[\"SILICON\"] = Atomtype(\"Si\", 28.0855)\natomtypes[\"PHOSPHORUS\"] = Atomtype(\"P\", 30.9738)\natomtypes[\"SULFUR\"] = Atomtype(\"S\", 32.065)\natomtypes[\"CHLORINE\"] = Atomtype(\"Cl\", 35.453)\natomtypes[\"SELENIUM\"] = Atomtype(\"Se\", 78.96)","sub_path":"Atomtypes.py","file_name":"Atomtypes.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454061952","text":"from typing import List\nfrom collections import deque\n\n\nclass Node:\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n\nclass Solution:\n def levelOrder(self, root: Node) -> List[List[int]]:\n if not root:\n return [[]]\n\n visited, queue = [], deque([root])\n level = 0\n\n while queue:\n visited.append([])\n length = len(queue)\n for _ in range(length):\n node = queue.popleft()\n visited[level].append(node.val)\n queue.extend(node.children)\n level += 1\n\n return visited\n","sub_path":"Week_03/id_34/leetcode-429.py","file_name":"leetcode-429.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"140932562","text":"import tensorflow.keras as keras\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport gensim\n\n###################################\n# Load models and preprocess data #\n###################################\n\nprint(\">>> Loading Doc2Vec and Node2Vec models...\")\n\n# loading node2vec model\nn2v_model = gensim.models.word2vec.Word2Vec.load('models/node2vec_model_64_15_100')\n# loading doc2vec model\nd2v_model = gensim.models.doc2vec.Doc2Vec.load('models/doc2vec_model_20_3_40')\n\nprint(\">>> Creating embedded dataset...\")\n# compute training set\nds_train = pd.read_csv(\"training.txt\", header=None, delimiter=' ').values\n\n# creates an embedding of each node as a 64-coordinate-vector given by the node2vec model\ndef create_XY(dataset):\n X = []\n Y = []\n for ii in range(len(dataset)):\n X.append(np.array(\n [np.array(n2v_model.wv.get_vector(str(dataset[ii][0]))),\n np.array(n2v_model.wv.get_vector(str(dataset[ii][1])))\n ]))\n Y.append(dataset[ii][2])\n return np.array(X), np.array(Y)\n\n# creates an embedding of each node as a (64+20)-coordinate-vector given by the node2vec and the doc2vec models\ndef create_with_textdata_XY(dataset):\n X = []\n Y = []\n for ii in range(len(dataset)):\n X.append(np.array(\n [np.concatenate((n2v_model.wv.get_vector(str(dataset[ii][0])), d2v_model.docvecs[dataset[ii][0]])),\n np.concatenate((n2v_model.wv.get_vector(\n str(dataset[ii][1])), d2v_model.docvecs[dataset[ii][1]]))\n ]))\n Y.append(dataset[ii][2])\n return np.array(X), np.array(Y)\n\n\nX, Y = create_with_textdata_XY(ds_train)\n\n###################################\n# Create and train neural network #\n###################################\n\nprint(\">>> Creating and training neural network...\")\n\nlayers = [\n keras.layers.Flatten(input_shape=(2, 84)),\n keras.layers.Dense(32, activation=\"relu\"),\n keras.layers.Dense(10, activation=\"softmax\"),\n keras.layers.Dense(1, activation=\"sigmoid\") # to get a 0-1 probability as a prediction\n]\n\nnn_model = keras.Sequential(layers)\n\nnn_model.compile(optimizer='sgd',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# callback to stop when val_loss increases, prevent overfitting\nes_callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)\n\nhistory = nn_model.fit(X, Y, batch_size=64, epochs=40, validation_split=0.2, callbacks=[es_callback])\n\nprint(\"\")\nprint(\">>> Saving model...\")\nnn_model.save(\"models/nn_model_32_20_1.h5\")","sub_path":"code/create_nn_model.py","file_name":"create_nn_model.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81582354","text":"# -*- coding: utf-8 -*-\nfrom calendar import monthrange\nfrom datetime import date\nfrom decimal import Decimal\n\nfrom dateutil.rrule import rrule, MONTHLY\nfrom extended_choices import Choices\nimport numpy as np\n\nfrom .date_tools import get_workdays\n\n\nRATE_TYPES = Choices(\n ('DAY', 1, 'Daily rate'),\n ('MONTH', 2, 'Monthly salary'),\n ('YEAR', 3, 'Yearly salary'),\n)\n\n\ndef today():\n return date.today() # pragma: no cover\n\n\ndef dec_workdays(start_date, end_date):\n \"\"\"\n Returns workdays in Decimal\n :param start_date: date object\n :param end_date: date object\n :return: Decimal object - number of workdays between\n \"\"\"\n return Decimal(get_workdays(start_date, end_date))\n\n\ndef last_date_in_month(d):\n \"\"\"\n Returns the date of the last day in the month\n :param d: date object\n :return: date object = last day of month\n \"\"\"\n day, number = monthrange(d.year, d.month)\n return date(d.year, d.month, number)\n\n\ndef month_segments(converter, start_date, end_date):\n \"\"\"\n Split in to segments of months between start_date and end_date\n :param converter: RateConverter object\n :param start_date: date object - start date of segments\n :param end_date: date object - end date of segments\n :return: list of tuples with (start, end, rate) for each month\n \"\"\"\n def rate(d):\n return converter.rate / dec_workdays(*converter.get_date_range(d))\n\n months = [(dt.date(), last_date_in_month(dt), rate(dt.date())) for dt in\n rrule(MONTHLY, dtstart=start_date, until=end_date)]\n # include last month\n months.append(\n (date(end_date.year, end_date.month, 1), end_date, rate(end_date))\n )\n return months\n\n\ndef average_rate_from_segments(segments, total_workdays):\n \"\"\"\n Returns average rate for a list of date segments\n :param segments: list of tuples (start (date object), end (date object),\n rate (Decimal object))\n :param total_workdays: total number of workdays to average over\n :return: Decimal object - average day rate over segments\n \"\"\"\n if total_workdays:\n weights = []\n rates = []\n\n for start, end, rate in segments:\n # numpy won't average decimals\n # this seems close enough though\n weight = dec_workdays(start, end) / total_workdays\n if weight:\n weights.append(float(weight))\n rates.append(float(rate))\n\n if rates and weights:\n return round(Decimal(np.average(rates, weights=weights)), 2)\n\n\nclass RateConverter():\n \"\"\"\n RateConverter converts yearly salary and monthly salary to day rate\n \"\"\"\n def __init__(self, rate, rate_type=RATE_TYPES.DAY):\n \"\"\"\n param: rate: Decimal object\n param: rate_type: int object - one of RATE_TYPES\n \"\"\"\n self.rate = rate\n self.rate_type = rate_type\n\n def _year_date_range(self, on):\n \"\"\"\n Returns dated for beginning and end of year\n :param on: date object\n :return: tuple object - beginning and end of year\n \"\"\"\n year = on.year\n return date(on.year, 1, 1), date(year, 12, 31)\n\n def _month_date_range(self, on):\n \"\"\"\n Returns dated for beginning and end of month\n :param on: date object\n :return: tuple object - beginning and end of month\n \"\"\"\n year = on.year\n month = on.month\n return date(year, month, 1), last_date_in_month(on)\n\n @property\n def range_method(self):\n return '_{}_date_range'.format(\n RATE_TYPES.for_value(self.rate_type).constant.lower())\n\n @property\n def get_date_range(self):\n return getattr(self, self.range_method)\n\n def rate_on(self, on=None):\n \"\"\"\n Returns an average day rate over a time period\n\n param: start_date: date object - beginning of time period for average\n param: end_date: date - object end of time period for average\n return: Decimal object - average day rate\n \"\"\"\n if self.rate_type == RATE_TYPES.DAY:\n return self.rate\n\n if not on:\n on = today()\n\n start_date, end_date = self.get_date_range(on)\n # No point continuing although would be same result\n return round(self.rate / dec_workdays(start_date, end_date), 2)\n\n def rate_between(self, start_date, end_date):\n \"\"\"\n Returns an average day rate over a time period\n\n param: start_date: date object - beginning of time period for average\n param: end_date: date - object end of time period for average\n return: Decimal object - average day rate\n \"\"\"\n if self.rate_type == RATE_TYPES.DAY:\n return self.rate\n\n total_workdays = dec_workdays(start_date, end_date)\n segments = month_segments(self, start_date, end_date)\n\n return average_rate_from_segments(segments, total_workdays)\n","sub_path":"dashboard/libs/rate_converter.py","file_name":"rate_converter.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"525339988","text":"\"\"\"\nRuchi Jain B54\nKatrina Lee B58\nDealOrNoDealGUI.py\n\nANALYSIS:\nThis program writes a GUI that simulates Deal Or No Deal.\n1. Output to GUI labels:\n self.__offer\n self.__winnings\n2. Input to GUI\n3. Import modules\n tkinter\n Player\n Briefcase\n Teller\n4. Uses classes:\n - Player\n - Briefcase\n - Teller\n5. Contains classes:\n DealOrNoDealGUI\n\"\"\"\n\nfrom random import *\nfrom tkinter import *\nimport tkinter.messagebox as tkMessageBox\nfrom Player import Player\nfrom Briefcase import Briefcase\nfrom Teller import Teller\n\nclass DealOrNoDealGUI:\n\n#-- Class Variables -------------------------------------------------------\n\n BRIEFCASE_VALUES = [1,5,10,15,25,50,75,85,90,100]\n \"\"\"used to create briefcases with random values\"\"\"\n\n LOWEST_CASE = 1\n \"\"\"lowest number of case\"\"\"\n\n LIMIT = 11\n \"\"\"upper limit of case numbers\"\"\"\n \n \"\"\"\n Model: self.__teller (Teller)\n self.__player (Player)\n View widgets:\n self.__header (Label)\n self.__entryLabel (Label)\n self.__cases (Label)\n self.__tellerStr (StringVar)\n self.__playerStr (StringVar)\n self.__caseText1 - self.__caseText10 (StringVar)\n self.__winnings (StringVar)\n Control widgets:\n self.__entry (Entry)\n self.__dealButton (Button)\n self.__noDealButton (Button)\n self.__button1 - self.__button10 (Button)\n self.__closeWindowButton (Button)\n Organizational widgets:\n self.__mainWindow (Tk)\n Other instance variables\n self.__case1 - self.__case10 (Briefcase)\n self.__allCases (Dict) - set of all cases, their integer numbers as keys\n - set of all case buttons, number-stringed as keys\n \"\"\"\n#-- Constructor -----------------------------------------------------------\n\n def __init__(self):\n \"\"\"Creates DealOrNoDealGUI object to model interface of the game\n creates: Teller object\n Player object\n Briefcase objects\n Tk object\n Entry object\n Button object\n Label object\n StringVar object\n invokes: bind (Entry)\n set (StringVar)\n grid (Entry, Button, Label)\n mainloop() (Tkinter)\n randint\n pop (List)\"\"\"\n \n # Create an instance of the Teller class (Teller)\n self.__teller = Teller()\n \n # Create an instance of the Player class (Player)\n self.__player = Player()\n\n # Create empty dictionary\n self.__allCases = {}\n\n # Create briefcases with random values\n # Add briefcases to dictionary using their numbers as keys\n self.__case1 = Briefcase(1,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,9)))\n self.__allCases[1] = self.__case1\n self.__case2 = Briefcase(2,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,8)))\n self.__allCases[2] = self.__case2\n self.__case3 = Briefcase(3,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,7)))\n self.__allCases[3] = self.__case3\n self.__case4 = Briefcase(4,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,6)))\n self.__allCases[4] = self.__case4\n self.__case5 = Briefcase(5,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,5)))\n self.__allCases[5] = self.__case5\n self.__case6 = Briefcase(6,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,4)))\n self.__allCases[6] = self.__case6\n self.__case7 = Briefcase(7,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,3)))\n self.__allCases[7] = self.__case7\n self.__case8 = Briefcase(8,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,2)))\n self.__allCases[8] = self.__case8\n self.__case9 = Briefcase(9,DealOrNoDealGUI.BRIEFCASE_VALUES.pop(randint(0,1)))\n self.__allCases[9] = self.__case9\n self.__case10 = Briefcase(10,DealOrNoDealGUI.BRIEFCASE_VALUES[0])\n self.__allCases[10] = self.__case10\n\n # Create a main window\n self.__mainWindow = Tk()\n\n # Create header\n self.__header = Label(self.__mainWindow, text='DEAL OR NO DEAL (FOR KIDS)',\\\n fg = \"#c1af17\")\n self.__header.grid(row=0,columnspan=3)\n\n # create entry label and box for player to get a briefcase\n self.__entryLabel = Label(self.__mainWindow, \\\n text = 'Enter number of case: ')\n self.__entryLabel.grid(row=1,column=0)\n self.__entry = Entry(self.__mainWindow)\n self.__entry.grid(row=1,column=1,sticky=W)\n self.__entry.bind('',self.assignCase)\n\n # create header for cases - make it gold\n self.__casesLabel = Label(self.__mainWindow,\\\n text='CASES', fg = \"#84780e\")\n self.__casesLabel.grid(row=2,columnspan=3)\n\n # create briefcase buttons and labels for each briefcase\n # ten times - comments modeled once\n\n # create image for button\n caseImage1 = PhotoImage(file = '1.gif')\n self.__button1 = Button(self.__mainWindow, image = caseImage1,\\\n command = self.openCase1)\n self.__button1.grid(row=3,column=0)\n # add button to dictionary using the string of its number for a key\n self.__allCases['1'] = self.__button1\n # create a string variable for the case\n self.__caseText1 = StringVar()\n # set string variable to string of briefcase\n self.__caseText1.set(str(self.__case1))\n # create label for briefcase\n self.__caseLabel1 = Label(self.__mainWindow,textvariable = self.__caseText1)\n self.__caseLabel1.grid(row=3,column=1,columnspan=2,sticky=W)\n\n caseImage2 = PhotoImage(file = '2.gif')\n self.__button2 = Button(self.__mainWindow, image = caseImage2,\\\n command = self.openCase2)\n self.__button2.grid(row=4,column=0)\n self.__allCases['2'] = self.__button2\n self.__caseText2 = StringVar()\n self.__caseText2.set(str(self.__case2))\n self.__caseLabel2 = Label(self.__mainWindow,textvariable = self.__caseText2)\n self.__caseLabel2.grid(row=4,column=1,columnspan=2,sticky=W)\n \n caseImage3 = PhotoImage(file = '3.gif')\n self.__button3 = Button(self.__mainWindow, image = caseImage3,\\\n command = self.openCase3)\n self.__button3.grid(row=5,column=0)\n self.__allCases['3'] = self.__button3\n self.__caseText3 = StringVar()\n self.__caseText3.set(str(self.__case3))\n self.__caseLabel3 = Label(self.__mainWindow,textvariable = self.__caseText3)\n self.__caseLabel3.grid(row=5,column=1,columnspan=2,sticky=W)\n\n caseImage4 = PhotoImage(file = '4.gif')\n self.__button4 = Button(self.__mainWindow, image = caseImage4,\\\n command = self.openCase4)\n self.__allCases['4'] = self.__button4\n self.__button4.grid(row=6,column=0)\n self.__caseText4 = StringVar()\n self.__caseText4.set(str(self.__case4))\n self.__caseLabel4 = Label(self.__mainWindow,textvariable = self.__caseText4)\n self.__caseLabel4.grid(row=6,column=1,columnspan=2,sticky=W)\n\n caseImage5 = PhotoImage(file = '5.gif')\n self.__button5 = Button(self.__mainWindow, image = caseImage5,\\\n command = self.openCase5)\n self.__allCases['5'] = self.__button5\n self.__button5.grid(row=7,column=0)\n self.__caseText5 = StringVar()\n self.__caseText5.set(str(self.__case5))\n self.__caseLabel5 = Label(self.__mainWindow,textvariable = self.__caseText5)\n self.__caseLabel5.grid(row=7,column=1,columnspan=2,sticky=W)\n\n caseImage6 = PhotoImage(file = '6.gif')\n self.__button6 = Button(self.__mainWindow, image = caseImage6,\\\n command = self.openCase6)\n self.__allCases['6'] = self.__button6\n self.__button6.grid(row=8,column=0)\n self.__caseText6 = StringVar()\n self.__caseText6.set(str(self.__case6))\n self.__caseLabel6 = Label(self.__mainWindow,textvariable = self.__caseText6)\n self.__caseLabel6.grid(row=8,column=1,columnspan=2,sticky=W)\n\n caseImage7 = PhotoImage(file = '7.gif')\n self.__button7 = Button(self.__mainWindow, image = caseImage7,\\\n command = self.openCase7)\n self.__allCases['7'] = self.__button7\n self.__button7.grid(row=9,column=0)\n self.__caseText7 = StringVar()\n self.__caseText7.set(str(self.__case7))\n self.__caseLabel7 = Label(self.__mainWindow,textvariable = self.__caseText7)\n self.__caseLabel7.grid(row=9,column=1,columnspan=2,sticky=W)\n\n caseImage8 = PhotoImage(file = '8.gif')\n self.__button8 = Button(self.__mainWindow, image = caseImage8,\\\n command = self.openCase8)\n self.__allCases['8'] = self.__button8\n self.__button8.grid(row=10,column=0)\n self.__caseText8 = StringVar()\n self.__caseText8.set(str(self.__case8))\n self.__caseLabel8 = Label(self.__mainWindow,textvariable = self.__caseText8)\n self.__caseLabel8.grid(row=10,column=1,columnspan=2,sticky=W)\n\n caseImage9 = PhotoImage(file = '9.gif')\n self.__button9 = Button(self.__mainWindow, image = caseImage9,\\\n command = self.openCase9)\n self.__allCases['9'] = self.__button9\n self.__button9.grid(row=11,column=0)\n self.__caseText9 = StringVar()\n self.__caseText9.set(str(self.__case9))\n self.__caseLabel9 = Label(self.__mainWindow,textvariable = self.__caseText9)\n self.__caseLabel9.grid(row=11,column=1,columnspan=2,sticky=W)\n\n caseImage10 = PhotoImage(file = '10.gif')\n self.__button10 = Button(self.__mainWindow, image = caseImage10,\\\n command = self.openCase10)\n self.__allCases['10'] = self.__button10\n self.__button10.grid(row=12,column=0)\n self.__caseText10 = StringVar()\n self.__caseText10.set(str(self.__case10))\n self.__caseLabel10 = Label(self.__mainWindow,textvariable=self.__caseText10)\n self.__caseLabel10.grid(row=12,column=1,columnspan=2,sticky=W)\n\n self.disableAll()\n \n self.__playerStr = StringVar()\n self.__playerStr.set(str(self.__player))\n self.__playerLabel = Label(self.__mainWindow,textvariable=self.__playerStr)\n self.__playerLabel.grid(row=13,columnspan=3)\n\n self.__tellerStr = StringVar()\n self.__tellerStr.set(str(self.__teller))\n self.__tellerLabel = Label(self.__mainWindow,textvariable=self.__tellerStr)\n self.__tellerLabel.grid(row=14,columnspan=3)\n\n self.__previousOffers = StringVar()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.__previousOffersLabel = Label(self.__mainWindow,\\\n textvariable=self.__previousOffers)\n self.__previousOffersLabel.grid(row= 15,columnspan=3)\n\n #Create DEAL button\n self.__dealButton = Button(self.__mainWindow,text='DEAL',\\\n background = \"#c1af17\", command=self.deal)\n self.__dealButton.configure(state='disabled')\n self.__dealButton.grid(row=16,column=0)\n\n #Create NO DEAL button\n self.__noDealButton = Button(self.__mainWindow,text='NO DEAL',\\\n background = \"#c1af17\", command=self.noDeal)\n self.__noDealButton.configure(state='disabled')\n self.__noDealButton.grid(row=16,column=1)\n\n #Create Quit button\n self.__closeWindowButton = Button(self.__mainWindow,text='Quit',\\\n background = \"white\", command=self.__mainWindow.destroy)\n self.__closeWindowButton.grid(row=16,column=2)\n mainloop()\n\n#-- Predicates -----------------------------------------------------------\n\n # determines if the case being opened is the players case\n # params: case (Briefcase)\n # invokes getBriefcase (player)\n # returns True or False (bool)\n def thisIsPlayersCase(self,case):\n \"\"\"determines if this is the player's case\"\"\"\n return self.__player.getBriefcase() == case\n\n#-- Mutators -------------------------------------------------------------\n\n # Enable all unopened cases after player selects their case\n # invokes: isOpen, isReserved (Briefcase)\n def enableAllUnopened(self):\n for x in range(DealOrNoDealGUI.LOWEST_CASE,DealOrNoDealGUI.LIMIT):\n if not (self.__allCases.get(x).isOpen() or \\\n self.__allCases.get(x).isReserved()):\n self.__allCases.get(str(x)).configure(state='normal')\n\n #Disables all buttons for cases\n def disableAll(self):\n for x in range(DealOrNoDealGUI.LOWEST_CASE,DealOrNoDealGUI.LIMIT):\n self.__allCases.get(str(x)).configure(state='disabled')\n\n # Activate DEAL and NO DEAL buttons when an offer is made\n # invokes: isEnding() (Teller)\n def enableBusiness(self):\n if not self.__teller.isEnding():\n self.__dealButton.configure(state='normal')\n self.__noDealButton.configure(state='normal')\n\n #Disable DEAL and NO DEAL buttons\n def disableBusiness(self):\n self.__dealButton.configure(state='disabled')\n self.__noDealButton.configure(state='disabled')\n\n # calls teller to make offers to player\n # invokes offerPlayer (teller)\n def tellerActions(self):\n self.__teller.offerPlayer(self.__player)\n self.__tellerStr.set(str(self.__teller))\n\n # sets status of player depending upon where in the game it is at\n # invokes: isAccepted() (Player), isEnding() Teller\n def setPlayerStatus(self):\n if not self.__player.isAccepted():\n if self.__teller.isEnding():\n self.__playerStr.set('Your case had $%.2f'%self.__player.getCaseValue())\n else:\n self.__playerStr.set(str(self.__player))\n else:\n self.__playerStr.set('You win ' + self.__player.getWinnings() + '!')\n\n # Display amount in player's case when game is over\n # Disables all buttons\n # invokes disableAll(), disableBusiness() \n def endGame(self):\n self.disableAll()\n self.disableBusiness()\n self.__playerStr.set('Your case had $%.2f'%self.__player.getCaseValue()+\\\n '\\nYou win ' + self.__player.getWinnings() + '!')\n \n#-- Event Handlers ---------------------------------------------------------\n\n # Invoke error trap when invalid case number is entered\n def assignCase(self,event):\n # try to get briefcase based on users entry and assign it to player\n try:\n self.__player.addBriefcase(self.__allCases.get(int(self.__entry.get())))\n # open messagebox displaying error\n except:\n tkMessageBox.showerror('Error',\\\n 'Error: Enter a valid case number.')\n else:\n self.__entry.configure(state='disabled')\n self.enableAllUnopened()\n self.__playerStr.set(str(self.__player))\n\n # Activate all unopened cases when a deal has been made\n # Inactivate any deals\n def deal(self):\n self.disableBusiness()\n self.enableAllUnopened()\n self.__player.acceptOffer()\n self.__allCases.get(self.__entry.get()).configure(state = 'normal')\n self.setPlayerStatus()\n\n # Activate all unopened cases when a deal has been made\n # inactivate all business\n def noDeal(self):\n self.disableBusiness()\n self.enableAllUnopened()\n self.setPlayerStatus()\n\n#-- Open Case Functions ---------------------------------------------------\n \n # if the case does not belong to the player already\n # opens a case, prompts teller to make an offer to player\n # deactivates all case openings\n # activates deal or no deal buttons\n # otherwise prompts the termination of the game\n def openCase1(self):\n if not self.thisIsPlayersCase(self.__case1):\n self.__case1.openCase(self.__teller)\n self.__caseText1.set(str(self.__case1))\n self.tellerActions()\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase2(self):\n if not self.thisIsPlayersCase(self.__case2):\n self.__case2.openCase(self.__teller)\n self.tellerActions()\n self.__caseText2.set(str(self.__case2))\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase3(self):\n if not self.thisIsPlayersCase(self.__case3):\n self.__case3.openCase(self.__teller)\n self.tellerActions()\n self.__caseText3.set(str(self.__case3))\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase4(self):\n if not self.thisIsPlayersCase(self.__case4):\n self.__case4.openCase(self.__teller)\n self.tellerActions()\n self.__caseText4.set(str(self.__case4))\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase5(self):\n if not self.thisIsPlayersCase(self.__case5):\n self.__case5.openCase(self.__teller)\n self.__caseText5.set(str(self.__case5))\n self.tellerActions()\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase6(self):\n if not self.thisIsPlayersCase(self.__case6):\n self.__case6.openCase(self.__teller)\n self.__caseText6.set(str(self.__case6))\n self.tellerActions()\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase7(self):\n if not self.thisIsPlayersCase(self.__case7):\n self.__case7.openCase(self.__teller)\n self.tellerActions()\n self.__caseText7.set(str(self.__case7))\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase8(self):\n if not self.thisIsPlayersCase(self.__case8):\n self.__case8.openCase(self.__teller)\n self.__caseText8.set(str(self.__case8))\n self.tellerActions()\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase9(self):\n if not self.thisIsPlayersCase(self.__case9):\n self.__case9.openCase(self.__teller)\n self.__caseText9.set(str(self.__case9))\n self.tellerActions()\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n \n def openCase10(self):\n if not self.thisIsPlayersCase(self.__case10):\n self.__case10.openCase(self.__teller)\n self.__caseText10.set(str(self.__case10))\n self.tellerActions()\n self.disableAll()\n self.enableBusiness()\n self.__previousOffers.set(self.__player.getBankOffersListStr())\n self.setPlayerStatus()\n else:\n self.endGame()\n\n# create instance of GUI class \nDealNoDeal = DealOrNoDealGUI()\n","sub_path":"DOrNoDGUI.py","file_name":"DOrNoDGUI.py","file_ext":"py","file_size_in_byte":18854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"247817895","text":"from rl.environment import Stonks\nfrom rl.ppo import PPO\nfrom rl.utils.callbacks import TestOnEpochEndCallback, SaveLoggerCallback\nfrom rl.utils.epochs import Epochs\nfrom rl.utils.logger import Logger\n\nimport sys\n\nLEARN_STEPS = 20\nBATCH_SIZE = 5\nTRAIN_EPOCHS = 4\nLR = 0.0001\n\nWINDOW_SIZE = 100\nHIDDEN_DIMS = 24\nUSE_SENTIMENT = False\n\n\nif __name__ == '__main__':\n\n path = \"../data/trade/{}.csv\"\n log_path = \"../data/trade/{}_log.csv\"\n crypto = \"bitcoin\"\n date_types = [\"_ta_sa\",\n # \"_ta_sa_metrics\",\n \"_sa\", \"_ta\", \"_p\"]\n run_type = f\"{crypto}{date_types[0]}\"\n if len(sys.argv) > 1:\n run_type = f\"{crypto}{date_types[int(sys.argv[1])]}\"\n\n path = path.format(run_type)\n log_path = log_path.format(run_type)\n print(path)\n\n env = Stonks(\n currency=\"BTC\",\n # use_sentiment=USE_SENTIMENT,\n window_size=WINDOW_SIZE,\n training_dataset_filepath=path\n )\n\n testing_env = Stonks(\n currency=\"BTC\",\n # use_sentiment=USE_SENTIMENT,\n window_size=WINDOW_SIZE,\n testing=True,\n training_dataset_filepath=path\n )\n\n # print(f\"{'No ' if not USE_SENTIMENT else ''}Sentiment\")\n\n agent = PPO(\n input_dims=env.observation_shape,\n n_actions=env.actions,\n batch_size=BATCH_SIZE,\n alpha=LR,\n n_epochs=TRAIN_EPOCHS,\n dims=HIDDEN_DIMS\n )\n\n logger = Logger(plot=f\" Trading with {run_type} ↗\")\n testing_callback = TestOnEpochEndCallback(testing_env, agent, render=False, action=lambda a: a[0])\n saving_callback = SaveLoggerCallback(logger, log_path)\n\n for epoch, episode in Epochs(50, 25, callbacks=[testing_callback, saving_callback]):\n observation = env.reset()\n done = False\n while not done:\n action, prob, val = agent.choose_action(observation)\n\n observation_, reward, done, info = env.step(action)\n\n agent.remember(observation, action, prob, val, reward, done)\n logger.log(epoch, episode, reward, info[\"assets\"], action, done)\n observation = observation_\n\n","sub_path":"src/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"56524315","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Face Quality Assessment train.\"\"\"\nimport os\nimport time\nimport datetime\nimport numpy as np\nimport random\nimport argparse\nimport warnings\n\nimport mindspore\nfrom mindspore import context\nfrom mindspore import Tensor\nfrom mindspore.context import ParallelMode\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom mindspore.train.callback import ModelCheckpoint, RunContext, _InternalCallbackParam, CheckpointConfig\nfrom mindspore.nn import TrainOneStepCell\nfrom mindspore.nn.optim import Momentum\nfrom mindspore.communication.management import get_group_size, init, get_rank\n\nfrom src.loss import CriterionsFaceQA\nfrom src.config import faceqa_1p_cfg, faceqa_8p_cfg\nfrom src.face_qa import FaceQABackbone, BuildTrainNetwork\nfrom src.lr_generator import warmup_step\nfrom src.dataset import faceqa_dataset\nfrom src.log import get_logger, AverageMeter\n\nwarnings.filterwarnings('ignore')\ndevid = int(os.getenv('DEVICE_ID'))\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", save_graphs=True, device_id=devid)\nrandom.seed(1)\nnp.random.seed(1)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Face Quality Assessment')\n parser.add_argument('--is_distributed', type=int, default=0, help='if multi device')\n parser.add_argument('--train_label_file', type=str, default='', help='image label list file, e.g. /home/label.txt')\n parser.add_argument('--pretrained', type=str, default='', help='pretrained model to load')\n\n args = parser.parse_args()\n\n if args.is_distributed == 0:\n cfg = faceqa_1p_cfg\n else:\n cfg = faceqa_8p_cfg\n\n cfg.data_lst = args.train_label_file\n cfg.pretrained = args.pretrained\n\n # Init distributed\n if args.is_distributed:\n init()\n cfg.local_rank = get_rank()\n cfg.world_size = get_group_size()\n parallel_mode = ParallelMode.DATA_PARALLEL\n else:\n parallel_mode = ParallelMode.STAND_ALONE\n\n # parallel_mode 'STAND_ALONE' do not support parameter_broadcast and mirror_mean\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=cfg.world_size,\n gradients_mean=True)\n\n mindspore.common.set_seed(1)\n\n # logger\n cfg.outputs_dir = os.path.join(cfg.ckpt_path, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))\n cfg.logger = get_logger(cfg.outputs_dir, cfg.local_rank)\n loss_meter = AverageMeter('loss')\n\n # Dataloader\n cfg.logger.info('start create dataloader')\n de_dataset = faceqa_dataset(imlist=cfg.data_lst, local_rank=cfg.local_rank, world_size=cfg.world_size,\n per_batch_size=cfg.per_batch_size)\n cfg.steps_per_epoch = de_dataset.get_dataset_size()\n de_dataset = de_dataset.repeat(cfg.max_epoch)\n de_dataloader = de_dataset.create_tuple_iterator(output_numpy=True)\n # Show cfg\n cfg.logger.save_args(cfg)\n cfg.logger.info('end create dataloader')\n\n # backbone and loss\n cfg.logger.important_info('start create network')\n create_network_start = time.time()\n\n network = FaceQABackbone()\n criterion = CriterionsFaceQA()\n \n # load pretrain model\n if os.path.isfile(cfg.pretrained):\n param_dict = load_checkpoint(cfg.pretrained)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.'):\n continue\n elif key.startswith('network.'):\n param_dict_new[key[8:]] = values\n else:\n param_dict_new[key] = values\n load_param_into_net(network, param_dict_new)\n cfg.logger.info('load model {} success'.format(cfg.pretrained))\n\n # optimizer and lr scheduler\n lr = warmup_step(cfg, gamma=0.9)\n opt = Momentum(params=network.trainable_params(), \n learning_rate=lr, \n momentum=cfg.momentum, \n weight_decay=cfg.weight_decay, \n loss_scale=cfg.loss_scale)\n\n # package training process, adjust lr + forward + backward + optimizer\n train_net = BuildTrainNetwork(network, criterion)\n train_net = TrainOneStepCell(train_net, opt, sens=cfg.loss_scale,) \n\n # checkpoint save\n if cfg.local_rank == 0:\n ckpt_max_num = cfg.max_epoch * cfg.steps_per_epoch // cfg.ckpt_interval\n train_config = CheckpointConfig(save_checkpoint_steps=cfg.ckpt_interval, keep_checkpoint_max=ckpt_max_num)\n ckpt_cb = ModelCheckpoint(config=train_config, directory=cfg.outputs_dir, prefix='{}'.format(cfg.local_rank))\n cb_params = _InternalCallbackParam()\n cb_params.train_network = train_net\n cb_params.epoch_num = ckpt_max_num\n cb_params.cur_epoch_num = 1\n run_context = RunContext(cb_params)\n ckpt_cb.begin(run_context)\n\n train_net.set_train()\n t_end = time.time()\n t_epoch = time.time()\n old_progress = -1\n\n cfg.logger.important_info('====start train====')\n for i, (data, gt) in enumerate(de_dataloader):\n # clean grad + adjust lr + put data into device + forward + backward + optimizer, return loss\n data = data.astype(np.float32)\n gt = gt.astype(np.float32)\n data = Tensor(data)\n gt = Tensor(gt)\n\n loss = train_net(data, gt)\n loss_meter.update(loss.asnumpy())\n\n # ckpt\n if cfg.local_rank == 0:\n cb_params.cur_step_num = i + 1 # current step number\n cb_params.batch_num = i + 2\n ckpt_cb.step_end(run_context)\n\n # logging loss, fps, ...\n if i == 0:\n time_for_graph_compile = time.time() - create_network_start\n cfg.logger.important_info('{}, graph compile time={:.2f}s'.format(cfg.task, time_for_graph_compile))\n\n if i % cfg.log_interval == 0 and cfg.local_rank == 0:\n time_used = time.time() - t_end\n epoch = int(i / cfg.steps_per_epoch)\n fps = cfg.per_batch_size * (i - old_progress) * cfg.world_size / time_used\n cfg.logger.info('epoch[{}], iter[{}], {}, {:.2f} imgs/sec'.format(epoch, i, loss_meter, fps))\n t_end = time.time()\n loss_meter.reset()\n old_progress = i\n\n if i % cfg.steps_per_epoch == 0 and cfg.local_rank == 0:\n epoch_time_used = time.time() - t_epoch\n epoch = int(i / cfg.steps_per_epoch)\n fps = cfg.per_batch_size * cfg.world_size * cfg.steps_per_epoch / epoch_time_used\n cfg.logger.info('=================================================')\n cfg.logger.info('epoch time: epoch[{}], iter[{}], {:.2f} imgs/sec'.format(epoch, i, fps))\n cfg.logger.info('=================================================')\n t_epoch = time.time()\n\n cfg.logger.important_info('====train end====')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"built-in/MindSpore/Research/cv/image_classification/FaceQualityAssessment_for_MindSpore/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"982359","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\n\nfrom models import FNN2d\n\nfrom tqdm import tqdm\nfrom timeit import default_timer\nfrom utils import count_params\nfrom data_utils import DataConstructor\nfrom losses import LpLoss, PINO_loss\n\ntry:\n import wandb\nexcept ImportError:\n wandb = None\n\ntorch.manual_seed(0)\nnp.random.seed(0)\n\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nntrain = 1000\nntest = 200\n\nsub = 8 # subsampling rate\nh = 2**10 // sub\ns = h\nsub_t = 1\nT = 100 // sub_t\n\nbatch_size = 100\nlearning_rate = 0.001\n\nepochs = 2500\nstep_size = 100\ngamma = 0.5\n\nmodes = 20\nwidth = 64\n\ndatapath = '/mnt/md1/zongyi/burgers_v100_t100_r1024_N2048.mat'\nlog = True\n\nif wandb and log:\n wandb.init(project='PINO-burgers',\n group='FDM',\n config={'lr': learning_rate,\n 'schedule_step': step_size,\n 'batch_size': batch_size,\n 'modes': modes,\n 'width': width})\n\nconstructor = DataConstructor(datapath, sub=sub, sub_t=sub_t)\ntrain_loader = constructor.make_loader(n_sample=ntrain, batch_size=batch_size, train=True)\ntest_loader = constructor.make_loader(n_sample=ntest, batch_size=batch_size, train=False)\n\nif not os.path.exists('checkpoints'):\n os.makedirs('checkpoints')\nif not os.path.exists('figs'):\n os.makedirs('figs')\n\npath = 'PINO_FDM_burgers_N' + \\\n str(ntrain)+'_ep' + str(epochs) + '_m' + str(modes) + '_w' + str(width)\npath_model = 'checkpoints/' + path + '.pt'\n\n\nlayers = [width * (2+i) // 4 for i in range(5)]\nmodes = [modes * (4-i) // 4 for i in range(4)]\n\nmodel = FNN2d(modes1=modes, modes2=modes, width=width, layers=layers).to(device)\nnum_param = count_params(model)\nprint('Number of model parameters', num_param)\n\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=epochs//5, gamma=gamma/2)\n# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=100)\n\nmyloss = LpLoss(size_average=True)\npbar = tqdm(range(epochs), dynamic_ncols=True, smoothing=0.01)\n\nfor ep in pbar:\n model.train()\n t1 = default_timer()\n train_pino = 0.0\n train_l2 = 0.0\n for x, y in train_loader:\n x, y = x.to(device), y.to(device)\n\n optimizer.zero_grad()\n\n out = model(x)\n # out = y_normalizer.decode(out)\n # y = y_normalizer.decode(y)\n\n loss = myloss(out.view(batch_size, -1), y.view(batch_size, -1))\n loss_u, loss_f = PINO_loss(out, x[:, 0, :, 0])\n total_loss = loss_u * 10 + loss_f\n total_loss.backward()\n\n optimizer.step()\n train_l2 += loss.item()\n train_pino += loss_f.item()\n\n scheduler.step()\n\n model.eval()\n test_l2 = 0.0\n test_pino = 0.0\n with torch.no_grad():\n for x, y in test_loader:\n x, y = x.to(device), y.to(device)\n\n out = model(x)\n # out = y_normalizer.decode(out)\n\n test_l2 += myloss(out.view(batch_size, -1),\n y.view(batch_size, -1)).item()\n test_u, test_f = PINO_loss(out, x[:, 0, :, 0])\n test_pino = test_f.item()\n\n if ep % step_size == 0:\n plt.imsave('figs/y_%d.png' % ep, y[0, :, :].cpu().numpy())\n plt.imsave('figs/out_%d.png' % ep, out[0, :, :, 0].cpu().numpy())\n\n train_l2 /= ntrain\n test_l2 /= ntest\n train_pino /= len(train_loader)\n test_pino /= len(test_loader)\n\n t2 = default_timer()\n pbar.set_description(\n (\n f'Time cost: {t2- t1:.2f}; Train f error: {train_pino:.5f}; Train l2 error: {train_l2:.5f}. '\n f'Test f error: {test_pino:.5f}; Test l2 error: {test_l2:.5f}'\n )\n )\n if wandb and log:\n wandb.log(\n {\n 'Train f error': train_pino,\n 'Train L2 error': train_l2,\n 'Test f error': test_pino,\n 'Test L2 error': test_l2,\n 'Time cost': t2 - t1\n }\n )\n\ntorch.save(model, path_model)\n","sub_path":"pino_burger_fdm.py","file_name":"pino_burger_fdm.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"407146122","text":"\"\"\"\n============================================================\nTA Indicator Extraction\nAuthor: Zhao Rui, Quant Research, Harveston\n===========================================================\n\"\"\"\nfrom ._ta_factors import TAFactor\nimport pandas as pd\nimport datetime\nimport numpy as np\n\"\"\"\nSingle Factor Create\nBasically Two Class:\n1 without parameters:\nbop = fg.TAFactor(\"BOP\")\n2 with parameters:\ncci = fg.TAFactor(\"CCI\", kwparams={'timeperiod': [5, 10, 30, 50]})\nultosc = fg.TAFactor(\"ULTOSC\", kwparams={\n 'timeperiod1': [30, 50],\n 'timeperiod2': [60, 100],\n 'timeperiod3': [120, 200]})\n\"\"\"\ntplines = [5, 30, 50, 70]\n\n\"\"\"\nVolume Indicator\n\"\"\"\n# On balance Volume *\nobv = TAFactor(\"OBV\")\n# Chaikin A/D Osciallator *\nadosc = TAFactor(\"ADOSC\", kwparams={\n 'fastperiod': [5, 30],\n 'slowperiod': [20, 60]})\n######Volume Indicator End######\n\n\"\"\"\nMonmentum Indicator\n\"\"\"\n# Absolute Price Oscillator * Different between fast and slow\napo = TAFactor(\"APO\", kwparams={\n 'fastperiod': [5, 30, 50],\n 'slowperiod': [10, 60, 100]})\n# Ultimate Oscillator range from 0 to 100\nultosc = TAFactor(\"ULTOSC\", kwparams={\n 'timeperiod1': [10, 30],\n 'timeperiod2': [30, 60],\n 'timeperiod3': [60, 100]})\n\n# Aroon * return two lines so that the return can be used to\naroon = TAFactor(\"AROON\", kwparams={'timeperiod': tplines})\n# Chande Momentum Oscillator range from -100 to 100\ncmo = TAFactor(\"CMO\", kwparams={'timeperiod': tplines})\n# Directional Movement Index range from 0 to 100\ndx = TAFactor(\"DX\", kwparams={'timeperiod': tplines})\n# Money Flow Index range from 0 to 100\nmfi = TAFactor(\"MFI\", kwparams={'timeperiod': tplines})\n# Minus Directional Indicator range from 0 to 100\ndi_minus = TAFactor(\"MINUS_DI\", kwparams={\n 'timeperiod': tplines})\n# Plus Directional Indicator range from 0 to 100\ndi_plus = TAFactor(\"PLUS_DI\", kwparams={\n 'timeperiod': tplines})\n# Rate of Change range from -1 to 1\nroc = TAFactor(\"ROC\", kwparams={'timeperiod': tplines})\n# 1 day rate-of-change of a triple smooth EMA range from -1 to 1\ntrix = TAFactor(\"TRIX\", kwparams={'timeperiod': tplines})\n# COMMODITY Channel Index * there is a range for the\ncci = TAFactor(\"CCI\", kwparams={\n 'timeperiod': tplines})\n# Relative Strength Index\nrsi = TAFactor(\"RSI\", kwparams={\n 'timeperiod': tplines})\n# Average Directional Movement Index Rating\nadxr = TAFactor(\"ADXR\", kwparams={\n 'timeperiod': tplines})\n\n# Williams' %R\nwillr = TAFactor(\"WILLR\", kwparams={\n 'timeperiod': tplines})\n# Momentum\nmom = TAFactor(\"MOM\", kwparams={\n 'timeperiod': tplines})\n# Balance of Power\nbop = TAFactor(\"BOP\")\n######Momentum Indicator End######\n\n\n\"\"\"\nStat Indicator\n\"\"\"\n# Pearson's Corrlation Coefficient\ncorrel = TAFactor(\"CORREL\", kwparams={\n 'timeperiod': tplines})\n# Linear Regression Slope\nlinear_slop = TAFactor(\"LINEARREG_SLOPE\", kwparams={\n 'timeperiod': tplines})\n# Time Series Forcast\ntsf = TAFactor(\"TSF\", kwparams={\n 'timeperiod': tplines})\n\nlinearreg = TAFactor(\"LINEARREG\", kwparams={\n 'timeperiod': tplines})\nlinear_intercept = TAFactor(\"LINEARREG_INTERCEPT\", kwparams={\n 'timeperiod': tplines})\n\nstddev = TAFactor(\"STDDEV\", kwparams={\n 'timeperiod': tplines})\nzscore = TAFactor(\"ZSCORE\", kwparams={\n 'timeperiod': tplines})\n######Stat Indicator End######\n\n\"\"\"\nVolatility Indicator\n\"\"\"\n# Normalized Average True Range\nnatr = TAFactor(\"NATR\", kwparams={\n 'timeperiod': tplines})\n# Average True Range\natr = TAFactor(\"ATR\", kwparams={\n 'timeperiod': tplines})\n# True Range\ntrange = TAFactor(\"TRANGE\")\n######Vol Indicator End######\n\"\"\"\nOverlap Studies\n\"\"\"\n# double exponential moving average\ndema = TAFactor(\"DEMA\", kwparams={\n 'timeperiod': tplines})\n\n# exponential moving average\nema = TAFactor(\"EMA\", kwparams={\n 'timeperiod': tplines})\n# Hilbert Transform - Instantaneous Trendline\nht_trendline = TAFactor(\"HT_TRENDLINE\")\n# Kaufman Adpative Moving Average\nkama = TAFactor(\"KAMA\", kwparams={\n 'timeperiod': tplines})\n# Simply Moving Average\nsma = TAFactor(\"SMA\", kwparams={\n 'timeperiod': tplines})\n# Triple Exponential Moving Average (T3)\nt3 = TAFactor(\"T3\", kwparams={\n 'timeperiod': tplines})\n# Weighted Moving Average\nwma = TAFactor(\"WMA\", kwparams={\n 'timeperiod': tplines})\n######Overlap Indicator End######\n\n\n\n\n\nDEFAULT_COLS_paras = [cci, natr, tsf, roc, cmo, rsi, correl, adxr, trix,\n linear_slop, ultosc, zscore, dema, ema, kama, sma,\n t3, wma, mom, linearreg, linear_intercept, willr, atr]\nDEFAULT_COLS = [trange, obv, bop, ht_trendline]\nDEFAULT_COLS_v1 = [trange, bop, ht_trendline]\n\ndef extract_tafea(df, col_paras=DEFAULT_COLS_paras, cols=DEFAULT_COLS):\n \"\"\"\n extract ta features, return the dataframe with features columns\n inputs:\n - df: should contain symbol, date, close, high, open, low and volume\n sort by date\n - col_paras: features names that need hyper-parameters\n - col: features names that do not need hyper-parameters\n return:\n - df: that will contain feature columns\n \"\"\"\n features = col_paras[0].run(df)\n for idx in range(1, len(col_paras)):\n features.extend(col_paras[idx].run(df))\n for col in cols:\n features.append(col(df))\n return pd.concat(features, axis=1)\n\ndef extract_tafea_withp(df, col_paras=DEFAULT_COLS_paras, cols=DEFAULT_COLS_v1):\n \"\"\"\n extract ta features, return the dataframe with features columns\n it will not inclue volume info\n inputs:\n - df: should contain symbol, date, close, high, open, low and volume\n sort by date\n - col_paras: features names that need hyper-parameters\n - col: features names that do not need hyper-parameters\n return:\n - df: that will contain feature columns\n \"\"\"\n features = col_paras[0].run(df)\n for idx in range(1, len(col_paras)):\n features.extend(col_paras[idx].run(df))\n for col in cols:\n features.append(col(df))\n return pd.concat(features, axis=1)\n\nif __name__ == '__main__':\n symbol = ['1'] * 50 + ['2']*50\n date1 = '2017-10-13'\n start = datetime.datetime.strptime(date1, '%Y-%m-%d')\n step = datetime.timedelta(days=1)\n dates = []\n for i in range(50):\n dates.append(start.date())\n all_dates = dates + dates\n closes = np.random.rand(100,)\n opens = np.random.rand(100,)\n lows = np.random.rand(100,)\n highs = np.random.rand(100,)\n volumes = np.random.rand(100,)\n dict_data = {'symbol': symbol, 'date': all_dates, 'close': closes, \n 'open': opens, 'low': lows, 'high': highs, 'volume': volumes}\n df = pd.DataFrame.from_dict(dict_data)\n # test feat\n df.sort_values(by=['symbol', 'date'], inplace=True)\n df.dropna(inplace=True)\n df_fea = df.groupby('symbol').apply(extract_tafea)\n print(df_fea.head(2))\n\n","sub_path":"ta_features/demo_feas.py","file_name":"demo_feas.py","file_ext":"py","file_size_in_byte":7582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"12645311","text":"# MIT License\n# Copyright (c) 2017 MassChallenge, Inc.\n\nfrom pytz import utc\nfrom datetime import (\n datetime,\n timedelta,\n)\nimport json\nfrom django.urls import reverse\n\nfrom impact.tests.api_test_case import APITestCase\nfrom accelerator.models import (\n JobPosting,\n Site,\n Startup,\n)\n\nfrom impact.tests.factories import (\n JobPostingFactory,\n ProgramFactory,\n ProgramStartupStatusFactory,\n SiteProgramAuthorizationFactory,\n StartupFactory,\n StartupStatusFactory,\n)\n\n\nclass TestJobPostingListView(APITestCase):\n\n def setUp(self):\n # Many objects must be created to scaffold this thing\n Site.objects.create()\n _site = Site.objects.first()\n startup1, startup2 = StartupFactory.create_batch(2)\n startup1.high_resolution_logo = \"hamsterdance.gif\"\n startup1.save()\n JobPostingFactory(startup=startup1)\n JobPostingFactory(startup=startup2)\n self.program = ProgramFactory()\n _pss = ProgramStartupStatusFactory(program=self.program)\n StartupStatusFactory(startup=startup1,\n program_startup_status=_pss)\n StartupStatusFactory(startup=startup2,\n program_startup_status=_pss)\n SiteProgramAuthorizationFactory(site=_site, program=self.program)\n self.url = reverse(\"job_posting_list\")\n\n def test_all_jobs_returned_when_no_program_key(self):\n with self.login(email=self.basic_user().email):\n response = self.client.post(self.url)\n response_data = json.loads(response.content)[\"job_postings\"]\n self.assertEqual(2, len(response_data))\n\n def test_filter_by_startup(self):\n startup = Startup.objects.first()\n with self.login(email=self.basic_user().email):\n data = {\"StartupKey\": startup.pk}\n response = self.client.post(self.url, data=data)\n response_data = json.loads(response.content)[\"job_postings\"][0]\n self.assertEqual(response_data[\"startup_name\"], startup.name)\n\n # Default ordering should be by post_date, descending\n def test_ordering_default(self):\n j1 = JobPosting.objects.first()\n j1.postdate = utc.localize(datetime.now())\n j1.save()\n j2 = JobPosting.objects.last()\n j2.postdate = utc.localize(datetime.now() - timedelta(1))\n j2.save()\n with self.login(email=self.basic_user().email):\n response = self.client.post(self.url)\n jobs = json.loads(response.content)[\"job_postings\"]\n self.assertTrue(jobs[0][\"post_date\"] > jobs[1][\"post_date\"])\n\n def test_ordering_by_type(self):\n with self.login(email=self.basic_user().email):\n data = {\"OrderBy\": \"jobtype\"}\n response = self.client.post(self.url, data=data)\n jobs = json.loads(response.content)[\"job_postings\"]\n self.assertTrue(jobs[0][\"type\"] < jobs[1][\"type\"])\n\n def test_ordering_by_startup(self):\n with self.login(email=self.basic_user().email):\n data = {\"OrderBy\": \"startup\"}\n response = self.client.post(self.url, data=data)\n jobs = json.loads(response.content)[\"job_postings\"]\n self.assertTrue(jobs[0][\"startup_name\"] < jobs[1][\"startup_name\"])\n\n def test_valid_programkey(self):\n with self.login(email=self.basic_user().email):\n response = self.client.post(\n self.url, data={\"ProgramKey\": self.program.pk})\n self.assertEqual(200, response.status_code)\n\n def test_invalid_programkey(self):\n with self.login(email=self.basic_user().email):\n # Bogus ProgramKey\n response = self.client.post(self.url, data={\"ProgramKey\": \"x\"})\n self.assertEqual(400, response.status_code)\n\n def test_filter_on_keywords(self):\n # Tweak a couple jobs to have values we want\n j1 = JobPosting.objects.first()\n j1.title = \"Strategerist\"\n j1.save()\n j2 = JobPosting.objects.last()\n j2.description = \"enterprisey\"\n j2.save()\n with self.login(email=self.basic_user().email):\n # Match on title\n data = {\"Keywords\": \"Strategerist\"}\n response = self.client.post(self.url, data=data)\n jobs = json.loads(response.content)[\"job_postings\"]\n self.assertTrue(len(jobs) == 1)\n # Match on description\n data = {\"Keywords\": \"enterprisey\"}\n response = self.client.post(self.url, data=data)\n jobs = json.loads(response.content)[\"job_postings\"]\n self.assertTrue(len(jobs) == 1)\n\n def test_filter_on_jobtype(self):\n # Tweak a couple jobs to have values we want\n j1 = JobPosting.objects.first()\n j1.type = \"INTERNSHIP\"\n j1.save()\n j2 = JobPosting.objects.last()\n j2.type = \"PART_TIME_PERMANENT\"\n j2.save()\n with self.login(email=self.basic_user().email):\n # Match on title\n data = {\"JobType\": \"INTERNSHIP\"}\n response = self.client.post(self.url, data=data)\n jobs = json.loads(response.content)[\"job_postings\"]\n self.assertTrue(len(jobs) == 1)\n # Match on description\n data = {\"JobType\": \"PART_TIME_PERMANENT\"}\n response = self.client.post(self.url, data=data)\n jobs = json.loads(response.content)[\"job_postings\"]\n self.assertTrue(len(jobs) == 1)\n","sub_path":"web/impact/impact/tests/test_job_posting_list_view.py","file_name":"test_job_posting_list_view.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"546290043","text":"import logging\nlogging.basicConfig(level=logging.WARN,\n filename=\"log.txt\",\n format=\"%(asctime)s=>%(levelname)s==>%(message)s\")\nlogging.info(\"program started\")\na=input(\"enter a value:\")\nlogging.info(\"a vlaue entered\")\nb=input(\"Enter b value:\")\nlogging.info(\"b value entered.\")\nlogging.debug(f\"before converting: a={a},b={b}\")\ntry:\n a=float(a)\n logging.info(\"a value converted.\")\n b=float(b)\n logging.info(\"b value converted\")\n logging.debug(f\"after converting: a={a},b={b}\")\n res=a/b\n r = f\"result={res}\"\n print(r)\n logging.debug(r)\nexcept ZeroDivisionError as err:\n error = \"ERROR: Not expecting zero for b\"\n print(error)\n logging.error(error)\nexcept ValueError as err:\n error = \"ERROR: Excpeting only digits\"\n print(error)\n logging.error(error)\nexcept Exception as err:\n error = \"ERROR:%s\"%err\n print(error)\n logging.error(error)\nlogging.info(\"program ended\")","sub_path":"a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"110944011","text":"#!/usr/bin/env python\n\n# Copyright (C) 2013 Hiroki Horiuchi \n#\n# Copying and distribution of this file, with or without modification,\n# are permitted in any medium without royalty provided\n# the copyright notice and this notice are preserved.\n# This file is offered as-is, without any warranty.\n\nfrom unittest import TestCase, TestProgram\n\nfrom transitivity import cmp_abc, cmp_abc_expected, transitivity\nimport pythonpath\nfrom versioncore import IntegralVersionAtom as A,\\\n MinimumVersionAtom as Min, MaximumVersionAtom as Max\n\nclass T0Sanity(TestCase):\n def test00(self):\n self.assertRaises(ValueError, A, '3.14')\n\nclass T1Create(TestCase):\n def test00accept_int(self):\n expected = '1', '23'\n got = tuple(A(int(s)).value for s in expected)\n self.assertEquals(expected, got)\n def test01leading_zeroes(self):\n expected = '010'\n got = A('010').value\n self.assertEquals(expected, got)\n\nclass T2StrRepr(TestCase):\n def test00str00normal(self):\n expected = '1', '23', '010'\n got = tuple(str(A(s)) for s in expected)\n self.assertEquals(expected, got)\n def test00str01inf(self):\n expected = '', 'INFINITY'\n got = tuple(str(a) for a in (Min.s, Max.s))\n self.assertEquals(expected, got)\n def test01repr00normal(self):\n expected = (\n \"IntegralVersionAtom('1')\",\n \"IntegralVersionAtom('23')\",\n \"IntegralVersionAtom('010')\",\n )\n got = tuple(repr(A(s)) for s in ('1', '23', '010'))\n self.assertEquals(expected, got)\n def test01repr01inf(self):\n expected = 'MinimumVersionAtom.s', 'MaximumVersionAtom.s'\n got = tuple(repr(a) for a in (Min.s, Max.s))\n self.assertEquals(expected, got)\n\nclass T3Cmp(TestCase):\n def test00(self):\n expected = cmp_abc_expected\n got = cmp_abc(A('01'), A(2), A(10))\n self.assertEquals(expected, got)\n def test01(self):\n data = (\n Min.s,\n A(0),\n A('01'), A('010'), A('02'), A('020'),\n A(1), A(2), A(10), A(20), A(999),\n Max.s,\n )\n self.assertTrue(transitivity(data))\n\nif __name__ == '__main__':\n TestProgram()\n","sub_path":"py/versionutil/tests/test00versioncore-atom.py","file_name":"test00versioncore-atom.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92719688","text":"#!/usr/bin/env python\n\nimport rospy\nimport numpy as np\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import TwistStamped\n\nKp = 0.7\nKd = 0.3\n\ngoal_position = '/goal'\ncurrent_velocity = '/mavros/local_position/velocity_local'\ncurrent_position = '/mavros/local_position/pose'\ntarget_velocity = '/mavros/setpoint_velocity/cmd_vel_unstamped'\n\ndef update_u():\n global goal_pos\n global cur_pos\n global goal_pos\n \n ts.header.stamp = rospy.Time.now()\n\n u_x = Kp*(goal_pos.pose.position.x-cur_pos.pose.position.x) + Kd*(-cur_vel.twist.linear.x ) #* 0.02\n u_y = Kp*(goal_pos.pose.position.y-cur_pos.pose.position.y) + Kd*(-cur_vel.twist.linear.y ) #* 0.02\n u_z = Kp*(goal_pos.pose.position.z-cur_pos.pose.position.z) + Kd*(-cur_vel.twist.linear.z ) #* 0.02\n \n\n if u_x > 2:\n u_x = 2\n elif u_x < -2:\n u_x = -2\n \n if u_y > 2:\n u_y = 2\n elif u_y < -2:\n u_y = -2\n \n if u_z > 1.5:\n u_z = 1.5\n elif u_z < -1.5:\n u_z = -1.5\n \n #angular part\n g_z_rot = trans_q_to_e(goal_pos)\n c_z_rot = trans_q_to_e(cur_pos) \n ts.twist.angular.z = Kp*(g_z_rot -c_z_rot) + Kd*(0.0 - cur_vel.twist.angular.z)\n\n\n\n ts.twist.linear.x = u_x\n ts.twist.linear.y = u_y\n ts.twist.linear.z = u_z\n \ndef checking(msg):\n print(msg)\n \n \ndef main():\n \n rospy.init_node('talker')\n \n pub = rospy.Publisher('/mavros/setpoint_velocity/cmd_vel',TwistStamped,queue_size=10)\n \n rate = rospy.Rate(50)\n rospy.Subscriber(goal_position,PoseStamped,update_goal)#need to write function\n rospy.Subscriber(current_velocity,TwistStamped,update_cur_vel)#need to write function\n rospy.Subscriber(current_position,PoseStamped,update_cur_pos)#need to write function\n rospy.Subscriber(target_velocity,TwistStamped,checking)\n\n# cur_pos = PoseStamped()\n global cur_pos \n cur_pos = PoseStamped()\n \n global cur_vel\n cur_vel = TwistStamped()\n \n global goal_pos\n goal_pos = PoseStamped()\n \n global ts\n ts = TwistStamped()\n ts.header.frame_id = 'map'\n\n\n while not rospy.is_shutdown():\n \n update_u()\t\t\n\n \n \n pub.publish(ts)\n \n \n \n rate.sleep()\n \ndef trans_q_to_e(obj):\n \n qx = obj.pose.orientation.x\n qy = obj.pose.orientation.y\n qz = obj.pose.orientation.z\n qw = obj.pose.orientation.w \n \n rotateZa0 = 2.0*(qx*qy + qw*qz)\n rotateZa1 = qw*qw + qx*qx - qy*qy - qz*qz;\n rotateZ = 0.0;\n if rotateZa0 != 0.0 and rotateZa1 != 0.0:\n rotateZ = np.arctan2(rotateZa0, rotateZa1)\n return rotateZ \n\n\ndef update_cur_pos(msg):\n global cur_pos\n cur_pos = msg\n\n \ndef update_cur_vel(msg):\t\n global cur_vel\n cur_vel = msg\n\ndef update_goal(msg):\n global goal_pos\n goal_pos = msg\n\n\n \nif __name__=='__main__':\n main()\n\n","sub_path":"ground_control/scripts/controlling_drone.py","file_name":"controlling_drone.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"225066551","text":"# coding: utf-8\n\nimport re\nimport six\n\n\n\n\n\nclass Multidrm:\n\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n\n sensitive_list = []\n\n openapi_types = {\n 'content_id': 'str',\n 'streaming_mode': 'str',\n 'encrypt_audio': 'int',\n 'emi': 'int',\n 'drm_list': 'list[str]'\n }\n\n attribute_map = {\n 'content_id': 'content_id',\n 'streaming_mode': 'streaming_mode',\n 'encrypt_audio': 'encrypt_audio',\n 'emi': 'emi',\n 'drm_list': 'drm_list'\n }\n\n def __init__(self, content_id=None, streaming_mode=None, encrypt_audio=None, emi=None, drm_list=None):\n \"\"\"Multidrm - a model defined in huaweicloud sdk\"\"\"\n \n \n\n self._content_id = None\n self._streaming_mode = None\n self._encrypt_audio = None\n self._emi = None\n self._drm_list = None\n self.discriminator = None\n\n if content_id is not None:\n self.content_id = content_id\n self.streaming_mode = streaming_mode\n if encrypt_audio is not None:\n self.encrypt_audio = encrypt_audio\n if emi is not None:\n self.emi = emi\n self.drm_list = drm_list\n\n @property\n def content_id(self):\n \"\"\"Gets the content_id of this Multidrm.\n\n 唯一标识 \n\n :return: The content_id of this Multidrm.\n :rtype: str\n \"\"\"\n return self._content_id\n\n @content_id.setter\n def content_id(self, content_id):\n \"\"\"Sets the content_id of this Multidrm.\n\n 唯一标识 \n\n :param content_id: The content_id of this Multidrm.\n :type: str\n \"\"\"\n self._content_id = content_id\n\n @property\n def streaming_mode(self):\n \"\"\"Gets the streaming_mode of this Multidrm.\n\n 定义数据流的类型,取值为DASH或HLS \n\n :return: The streaming_mode of this Multidrm.\n :rtype: str\n \"\"\"\n return self._streaming_mode\n\n @streaming_mode.setter\n def streaming_mode(self, streaming_mode):\n \"\"\"Sets the streaming_mode of this Multidrm.\n\n 定义数据流的类型,取值为DASH或HLS \n\n :param streaming_mode: The streaming_mode of this Multidrm.\n :type: str\n \"\"\"\n self._streaming_mode = streaming_mode\n\n @property\n def encrypt_audio(self):\n \"\"\"Gets the encrypt_audio of this Multidrm.\n\n 音频加密开关。 取值如下: - 0:标识音频不加密。 - 1:标识音频加密。 默认值:0。 该参数只对dash有效。 \n\n :return: The encrypt_audio of this Multidrm.\n :rtype: int\n \"\"\"\n return self._encrypt_audio\n\n @encrypt_audio.setter\n def encrypt_audio(self, encrypt_audio):\n \"\"\"Sets the encrypt_audio of this Multidrm.\n\n 音频加密开关。 取值如下: - 0:标识音频不加密。 - 1:标识音频加密。 默认值:0。 该参数只对dash有效。 \n\n :param encrypt_audio: The encrypt_audio of this Multidrm.\n :type: int\n \"\"\"\n self._encrypt_audio = encrypt_audio\n\n @property\n def emi(self):\n \"\"\"Gets the emi of this Multidrm.\n\n 定义加密方式。 取值如下: - 16418(AES-128,CBC) - 16420(AES-128,CTR) - 16422(SM4CBC) \n\n :return: The emi of this Multidrm.\n :rtype: int\n \"\"\"\n return self._emi\n\n @emi.setter\n def emi(self, emi):\n \"\"\"Sets the emi of this Multidrm.\n\n 定义加密方式。 取值如下: - 16418(AES-128,CBC) - 16420(AES-128,CTR) - 16422(SM4CBC) \n\n :param emi: The emi of this Multidrm.\n :type: int\n \"\"\"\n self._emi = emi\n\n @property\n def drm_list(self):\n \"\"\"Gets the drm_list of this Multidrm.\n\n HLS视频加密控制参数。 取值如下: - PLAYREADY - CHINA_DRM - WIDEVINE \n\n :return: The drm_list of this Multidrm.\n :rtype: list[str]\n \"\"\"\n return self._drm_list\n\n @drm_list.setter\n def drm_list(self, drm_list):\n \"\"\"Sets the drm_list of this Multidrm.\n\n HLS视频加密控制参数。 取值如下: - PLAYREADY - CHINA_DRM - WIDEVINE \n\n :param drm_list: The drm_list of this Multidrm.\n :type: list[str]\n \"\"\"\n self._drm_list = drm_list\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n import simplejson as json\n return json.dumps(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Multidrm):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/multidrm.py","file_name":"multidrm.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"653391604","text":"from pymongo import MongoClient\nimport pymongo\nimport pprint\nimport json\nimport requests\nimport auth as d2lauth\nfrom auth import *\nimport csv\n\nwith open('cred/cred-dev.json') as cred_file:\n\tcred = json.load(cred_file)\n\napp_id = cred[\"appID\"]\napp_key = cred[\"appKey\"]\nredirect_url_attributes = cred[\"redirectUrlAttributes\"]\nbase_url = cred[\"url\"]\napp_creds = { 'app_id': app_id, 'app_key': app_key }\nredirect_url_base = \"https://\" + base_url + \"/d2l/home\"\nac = d2lauth.fashion_app_context(app_id=app_creds['app_id'], app_key=app_creds['app_key'])\nauth_url = ac.create_url_for_authentication(base_url, 'http://127.0.0.1:8000')\nredirect_url = redirect_url_base + redirect_url_attributes\nuc = ac.create_user_context(redirect_url, base_url, True)\n\n\nclient = MongoClient(\"mongodb://s172-21-81-h202.paws.uga.edu:27017\")\ndb = client.valence\n\nitems = db.discuss.find({})\n\nfor item in items:\n\ttry:\n\t\tprint(item['messageNoReply'])\n\texcept:\n\t\tpass","sub_path":"Data Analysis Scripts/discussions_query.py","file_name":"discussions_query.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102895887","text":"# https://codejam.withgoogle.com/2018/challenges/00000000000000cb/dashboard/0000000000007a30\n\nimport sys\nimport math\nimport random\nimport logging\n\nlogger = logging.getLogger('go_gopher')\nlogger.setLevel(logging.DEBUG)\nfh = logging.FileHandler('log', mode='w')\nfh.setLevel(logging.DEBUG)\nlogger.addHandler(fh)\nlogger.debug('===== GO GOPHER STARTED ====')\n\ndef log_pool(logger, pool):\n for row in pool:\n logger.debug(row)\n\ndef deploy(a):\n pool = [[0] * 3 for _ in range(3)]\n log_pool(logger, pool)\n num_undeployed = 9\n j_offset = 0\n i_desired, j_desired = 2, 2 + j_offset\n print(i_desired, j_desired, flush=True)\n for num_exchange in range(0, 1000 + 1):\n i_deployed, j_deployed = map(int, input().strip().split())\n logger.debug('Exchange {0}: {1} {2}'.format(num_exchange, i_deployed, j_deployed))\n if i_deployed == -1 and j_deployed == -1:\n return False\n if i_deployed == 0 and j_deployed == 0:\n return True\n i_pool, j_pool = i_deployed - 1, j_deployed - j_offset - 1\n if not pool[i_pool][j_pool]:\n logger.debug(' Deploy at {0} {1}'.format(i_deployed, j_deployed))\n pool[i_pool][j_pool] = 1\n num_undeployed -= 1\n a -= 1\n logger.debug(' Remain in pool: {0}'.format(num_undeployed))\n logger.debug(' Remain: {0}'.format(a))\n if not num_undeployed:\n new = min(int((a - 1) / 3 + 1), 3)\n pool_new = [row[new:] + [0] * new for row in pool]\n pool, num_undeployed = pool_new, 3 * new\n j_offset += new\n j_desired += new\n log_pool(logger, pool)\n print(i_desired, j_desired, flush=True)\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n a = int(input())\n if not deploy(a):\n exit(1)\n","sub_path":"google_codejam/2018/qual/go_gopher.py","file_name":"go_gopher.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"475299676","text":"\"\"\"Blocks for TensorFlow models.\"\"\"\nimport tensorflow as tf\nfrom coldnet.tensor_flow import util as tf_util\n\n\ndef adam_with_grad_clip(learning_rate, loss, parameters, grad_clip_norm):\n optimizer = tf.train.AdamOptimizer(learning_rate)\n # items in grads_and_vars: gv[0] = gradient; gv[1] = variable.\n grads_and_vars = optimizer.compute_gradients(\n loss,\n parameters)\n if grad_clip_norm > 0.0:\n grads_and_vars = tf_util.clip_gradients(\n grads_and_vars=grads_and_vars,\n norm=grad_clip_norm)\n optimizer.apply_gradients(grads_and_vars)\n\n\ndef cross_entropy_with_l2(labels, logits, _lambda, parameters):\n cross_entropy = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n name='sparse_softmax_cross_entropy'))\n penalty_term = tf.multiply(\n tf.cast(_lambda, tf.float32),\n sum([tf.nn.l2_loss(p) for p in parameters]),\n name='penalty_term')\n return tf.add(cross_entropy, penalty_term, name='loss')\n","sub_path":"coldnet/tensor_flow/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"280241312","text":"#!usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# file_name: log_record\r\n# description: \r\n# author: libo\r\n# Histort:\r\n# \tfirst created: 2015/11/9\r\n\r\n__author__ = 'libo'\r\n\r\nimport ctypes\r\nimport logging\r\n# from logging.handlers import TimedRotatingFileHandler\r\n\r\ndef initlog(logger_name, log_file_path, log_level=logging.DEBUG, save_in_file=True):\r\n logger = logging.Logger(logger_name)\r\n #\r\n # for handler in logger.handlers:\r\n # logger.removeHandler(handler)\r\n\r\n datefmt = \"%Y-%m-%d %H:%M:%S\"\r\n format_str = \"[%(asctime)s]: %(levelname)s - %(name)s - %(message)s\"\r\n formatter = logging.Formatter(format_str, datefmt)\r\n\r\n # file handler\r\n if save_in_file:\r\n file_handler = logging.FileHandler(log_file_path)\r\n file_handler.setFormatter(formatter)\r\n file_handler.setLevel(log_level)\r\n logger.addHandler(file_handler)\r\n\r\n try:\r\n stream_handler = ColorizingStreamHandler()\r\n except:\r\n stream_handler = logging.StreamHandler()\r\n stream_handler.setFormatter(formatter)\r\n stream_handler.setLevel(log_level)\r\n # stream_handler.emit = decorate_emit(stream_handler.emit)\r\n\r\n logger.addHandler(stream_handler)\r\n logger.setLevel(log_level)\r\n\r\n return logger\r\n\r\nclass ColorizingStreamHandler(logging.StreamHandler):\r\n\r\n FOREGROUND_RED = 0x0c\r\n FOREGROUND_GREEN = 0x02\r\n FOREGROUND_WHITE = 0x0f\r\n FOREGROUND_YELLOW = 0x0e\r\n FOREGROUND_BLUE = 0x09\r\n\r\n FOREGROUND_DARKWHITE = 0x07\r\n FOREGROUND_DARKRED = 0x04\r\n FOREGROUND_DARKSKYBLUE = 0x03\r\n FOREGROUND_DARKGREEN = 0x02\r\n FOREGROUND_DARKGRAY = 0x08\r\n\r\n\r\n STD_INPUT_HANDLE = -10\r\n STD_OUTPUT_HANDLE= -11\r\n STD_ERROR_HANDLE = -12\r\n\r\n def __init__(self, *args, **kwargs):\r\n self._colors = {logging.DEBUG: self.FOREGROUND_WHITE,\r\n logging.INFO: self.FOREGROUND_GREEN,\r\n logging.WARNING: self.FOREGROUND_YELLOW,\r\n logging.ERROR: self.FOREGROUND_RED,\r\n }\r\n super(ColorizingStreamHandler, self).__init__(*args, **kwargs)\r\n self.std_out_handler = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)\r\n self.std_err_handler = ctypes.windll.kernel32.GetStdHandle(self.STD_ERROR_HANDLE)\r\n\r\n @property\r\n def is_tty(self):\r\n isatty = getattr(self.stream, 'isatty', None)\r\n return isatty and isatty()\r\n\r\n def emit(self, record):\r\n try:\r\n message = self.format(record)\r\n stream = self.stream\r\n if not self.is_tty:\r\n stream.write(message)\r\n else:\r\n self.__set_test_color(self._colors[record.levelno])\r\n # message = self._colors[record.levelno] + message + self.RESET\r\n stream.write(message)\r\n self.__reset_tex_color()\r\n stream.write(getattr(self, 'terminator', '\\n'))\r\n self.flush()\r\n except (KeyboardInterrupt, SystemExit):\r\n raise\r\n except:\r\n self.handleError(record)\r\n\r\n def setLevelColor(self, logging_level, escaped_ansi_code):\r\n self._colors[logging_level] = escaped_ansi_code\r\n\r\n def __set_test_color(self, color):\r\n ctypes.windll.kernel32.SetConsoleTextAttribute(self.std_out_handler, color)\r\n ctypes.windll.kernel32.SetConsoleTextAttribute(self.std_err_handler, color)\r\n\r\n def __reset_tex_color(self):\r\n self.__set_test_color(self.FOREGROUND_DARKWHITE)\r\n\r\ndef decorate_emit(fn):\r\n # add methods we need to the class\r\n def new(*args):\r\n levelno = args[0].levelno\r\n if(levelno >= logging.CRITICAL):\r\n color = '\\x1b[31;1m'\r\n elif(levelno >= logging.ERROR):\r\n color = '\\x1b[31;1m'\r\n elif(levelno >= logging.WARNING):\r\n color = '\\x1b[33;1m'\r\n elif(levelno >= logging.INFO):\r\n color = '\\x1b[32;1m'\r\n elif(levelno >= logging.DEBUG):\r\n color = '\\x1b[35;1m'\r\n else:\r\n color = '\\x1b[0m'\r\n # add colored *** in the beginning of the message\r\n args[0].msg = \"{0}***\\x1b[0m {1}\".format(color, args[0].msg)\r\n\r\n # new feature i like: bolder each args of message\r\n args[0].args = tuple('\\x1b[1m' + arg + '\\x1b[0m' for arg in args[0].args)\r\n return fn(*args)\r\n return new","sub_path":"build/lib/PyLogger/log_record.py","file_name":"log_record.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
' + cmd + '' + cmds[cmd]['description'] + '
' + cmd + '' + cmds[cmd]['description'] + '
Buffer' + buffer.buffer + '